Skip to content

Commit 4afd3c9

Browse files
authored
Hide ov::Model from wrappers (#309)
* Hide ov::Model in classification * Cover segmentation * Cover MRCNN * Cover detection * Cover anomaly * Cleanup
1 parent 606fa98 commit 4afd3c9

File tree

9 files changed

+111
-85
lines changed

9 files changed

+111
-85
lines changed

src/cpp/include/adapters/inference_adapter.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,11 @@ class InferenceAdapter {
3131
virtual void awaitAll() = 0;
3232
virtual void awaitAny() = 0;
3333
virtual size_t getNumAsyncExecutors() const = 0;
34-
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
35-
ov::Core& core,
34+
virtual void loadModel(const std::string& modelPath,
3635
const std::string& device = "",
37-
const ov::AnyMap& compilationConfig = {},
38-
size_t max_num_requests = 0) = 0;
36+
const ov::AnyMap& adapterConfig = {},
37+
bool preCompile = true) = 0;
38+
virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) = 0;
3939
virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0;
4040
virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0;
4141
virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const = 0;

src/cpp/include/adapters/openvino_adapter.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,11 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
2525
virtual bool isReady();
2626
virtual void awaitAll();
2727
virtual void awaitAny();
28-
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
29-
ov::Core& core,
28+
virtual void loadModel(const std::string& modelPath,
3029
const std::string& device = "",
31-
const ov::AnyMap& compilationConfig = {},
32-
size_t max_num_requests = 1) override;
30+
const ov::AnyMap& adapterConfig = {},
31+
bool preCompile = true) override;
32+
virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) override;
3333
virtual size_t getNumAsyncExecutors() const;
3434
virtual ov::PartialShape getInputShape(const std::string& inputName) const override;
3535
virtual ov::PartialShape getOutputShape(const std::string& outputName) const override;
@@ -39,6 +39,8 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
3939
virtual std::vector<std::string> getOutputNames() const override;
4040
virtual const ov::AnyMap& getModelConfig() const override;
4141

42+
void applyModelTransform(std::function<void(std::shared_ptr<ov::Model>&)> t);
43+
4244
protected:
4345
void initInputsOutputs();
4446

@@ -48,7 +50,6 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
4850
std::vector<std::string> outputNames;
4951
std::unique_ptr<AsyncInferQueue> asyncQueue;
5052
ov::AnyMap modelConfig; // the content of model_info section of rt_info
51-
52-
public:
53+
std::shared_ptr<ov::Model> model;
5354
ov::CompiledModel compiledModel;
5455
};

src/cpp/src/adapters/openvino_adapter.cpp

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,16 @@
99
#include <stdexcept>
1010
#include <vector>
1111

12-
void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>& model,
13-
ov::Core& core,
14-
const std::string& device,
15-
const ov::AnyMap& compilationConfig,
16-
size_t max_num_requests) {
17-
ov::AnyMap customCompilationConfig(compilationConfig);
12+
#include "utils/config.h"
13+
14+
void OpenVINOInferenceAdapter::compileModel(const std::string& device, const ov::AnyMap& adapterConfig) {
15+
if (!model) {
16+
throw std::runtime_error("Model is not loaded");
17+
}
18+
size_t max_num_requests = 1;
19+
max_num_requests = utils::get_from_any_maps("max_num_requests", adapterConfig, {}, max_num_requests);
20+
21+
ov::AnyMap customCompilationConfig(adapterConfig);
1822
if (max_num_requests != 1) {
1923
if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) {
2024
customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::THROUGHPUT;
@@ -30,10 +34,31 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>&
3034
}
3135
}
3236

37+
ov::Core core;
3338
compiledModel = core.compile_model(model, device, customCompilationConfig);
3439
asyncQueue = std::make_unique<AsyncInferQueue>(compiledModel, max_num_requests);
3540
initInputsOutputs();
41+
}
3642

43+
void OpenVINOInferenceAdapter::loadModel(const std::string& modelPath,
44+
const std::string& device,
45+
const ov::AnyMap& adapterConfig,
46+
bool preCompile) {
47+
ov::Core core;
48+
model = core.read_model(modelPath);
49+
if (model->has_rt_info({"model_info"})) {
50+
modelConfig = model->get_rt_info<ov::AnyMap>("model_info");
51+
}
52+
if (preCompile) {
53+
compileModel(device, adapterConfig);
54+
}
55+
}
56+
57+
void OpenVINOInferenceAdapter::applyModelTransform(std::function<void(std::shared_ptr<ov::Model>&)> t) {
58+
if (!model) {
59+
throw std::runtime_error("Model is not loaded");
60+
}
61+
t(model);
3762
if (model->has_rt_info({"model_info"})) {
3863
modelConfig = model->get_rt_info<ov::AnyMap>("model_info");
3964
}

src/cpp/src/tasks/anomaly.cpp

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,11 @@
55
#include "utils/tensor.h"
66

77
void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
8+
if (utils::model_has_embedded_processing(ov_model)) {
9+
std::cout << "model already was serialized" << std::endl;
10+
return;
11+
}
12+
813
auto input = ov_model->inputs().front();
914

1015
auto layout = ov::layout::get_layout(input);
@@ -47,23 +52,21 @@ void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
4752
}
4853

4954
Anomaly Anomaly::load(const std::string& model_path) {
50-
auto core = ov::Core();
51-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
55+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
56+
adapter->loadModel(model_path, "", {}, false);
57+
58+
std::string model_type;
59+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
5260

53-
if (model->has_rt_info("model_info", "model_type")) {
54-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
55-
<< std::endl;
61+
if (!model_type.empty()) {
62+
std::cout << "has model type in info: " << model_type << std::endl;
5663
} else {
5764
throw std::runtime_error("Incorrect or unsupported model_type");
5865
}
5966

60-
if (utils::model_has_embedded_processing(model)) {
61-
std::cout << "model already was serialized" << std::endl;
62-
} else {
63-
serialize(model);
64-
}
65-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
66-
adapter->loadModel(model, core, "AUTO");
67+
adapter->applyModelTransform(Anomaly::serialize);
68+
adapter->compileModel("AUTO", {});
69+
6770
return Anomaly(adapter);
6871
}
6972

src/cpp/src/tasks/classification.cpp

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,10 @@ std::vector<size_t> get_non_xai_output_indices(const std::vector<ov::Output<ov::
8383
} // namespace
8484

8585
void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
86+
if (utils::model_has_embedded_processing(ov_model)) {
87+
std::cout << "model already was serialized" << std::endl;
88+
return;
89+
}
8690
// --------------------------- Configure input & output -------------------------------------------------
8791
// --------------------------- Prepare input ------------------------------------------------------
8892
auto config = ov_model->has_rt_info("model_info") ? ov_model->get_rt_info<ov::AnyMap>("model_info") : ov::AnyMap{};
@@ -177,23 +181,18 @@ void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
177181
}
178182

179183
Classification Classification::load(const std::string& model_path) {
180-
auto core = ov::Core();
181-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
184+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
185+
adapter->loadModel(model_path, "", {}, false);
182186

183-
if (model->has_rt_info("model_info", "model_type")) {
184-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
185-
<< std::endl;
186-
} else {
187-
throw std::runtime_error("Incorrect or unsupported model_type");
188-
}
187+
std::string model_type;
188+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
189189

190-
if (utils::model_has_embedded_processing(model)) {
191-
std::cout << "model already was serialized" << std::endl;
192-
} else {
193-
Classification::serialize(model);
190+
if (model_type.empty() || model_type != "Classification") {
191+
throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification");
194192
}
195-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
196-
adapter->loadModel(model, core, "AUTO");
193+
adapter->applyModelTransform(Classification::serialize);
194+
adapter->compileModel("AUTO", {});
195+
197196
return Classification(adapter);
198197
}
199198

src/cpp/src/tasks/detection.cpp

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,23 +12,19 @@
1212
#include "utils/tensor.h"
1313

1414
DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) {
15-
auto core = ov::Core();
16-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
15+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
16+
adapter->loadModel(model_path, "", {}, false);
1717

18-
if (model->has_rt_info("model_info", "model_type")) {
19-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
20-
<< std::endl;
21-
} else {
22-
throw std::runtime_error("Incorrect or unsupported model_type");
23-
}
18+
std::string model_type;
19+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
20+
transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower);
2421

25-
if (utils::model_has_embedded_processing(model)) {
26-
std::cout << "model already was serialized" << std::endl;
27-
} else {
28-
SSD::serialize(model);
22+
if (model_type.empty() || model_type != "ssd") {
23+
throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd");
2924
}
30-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
31-
adapter->loadModel(model, core, "AUTO");
25+
adapter->applyModelTransform(SSD::serialize);
26+
adapter->compileModel("AUTO", {});
27+
3228
return DetectionModel(std::make_unique<SSD>(adapter), configuration);
3329
}
3430

src/cpp/src/tasks/detection/ssd.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,10 @@ std::map<std::string, ov::Tensor> SSD::preprocess(cv::Mat image) {
6868
}
6969

7070
void SSD::serialize(std::shared_ptr<ov::Model> ov_model) {
71+
if (utils::model_has_embedded_processing(ov_model)) {
72+
std::cout << "model already was serialized" << std::endl;
73+
return;
74+
}
7175
auto output_mode = ov_model->outputs().size() > 1 ? SSDOutputMode::multi : SSDOutputMode::single;
7276

7377
auto input_tensor = ov_model->inputs()[0];

src/cpp/src/tasks/instance_segmentation.cpp

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,10 @@ cv::Mat segm_postprocess(const SegmentedObject& box, const cv::Mat& unpadded, in
122122
}
123123

124124
void InstanceSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
125+
if (utils::model_has_embedded_processing(ov_model)) {
126+
std::cout << "model already was serialized" << std::endl;
127+
return;
128+
}
125129
if (ov_model->inputs().size() != 1) {
126130
throw std::logic_error("MaskRCNNModel model wrapper supports topologies with only 1 input");
127131
}
@@ -187,23 +191,18 @@ void InstanceSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
187191
}
188192

189193
InstanceSegmentation InstanceSegmentation::load(const std::string& model_path) {
190-
auto core = ov::Core();
191-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
192-
193-
if (model->has_rt_info("model_info", "model_type")) {
194-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
195-
<< std::endl;
196-
} else {
197-
throw std::runtime_error("Incorrect or unsupported model_type");
198-
}
194+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
195+
adapter->loadModel(model_path, "", {}, false);
199196

200-
if (utils::model_has_embedded_processing(model)) {
201-
std::cout << "model already was serialized" << std::endl;
202-
} else {
203-
serialize(model);
197+
std::string model_type;
198+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
199+
200+
if (model_type.empty() || model_type != "MaskRCNN") {
201+
throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN");
204202
}
205-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
206-
adapter->loadModel(model, core, "AUTO");
203+
adapter->applyModelTransform(InstanceSegmentation::serialize);
204+
adapter->compileModel("AUTO", {});
205+
207206
return InstanceSegmentation(adapter);
208207
}
209208

src/cpp/src/tasks/semantic_segmentation.cpp

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -21,27 +21,26 @@ cv::Mat get_activation_map(const cv::Mat& features) {
2121
}
2222

2323
SemanticSegmentation SemanticSegmentation::load(const std::string& model_path) {
24-
auto core = ov::Core();
25-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
24+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
25+
adapter->loadModel(model_path, "", {}, false);
2626

27-
if (model->has_rt_info("model_info", "model_type")) {
28-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
29-
<< std::endl;
30-
} else {
31-
throw std::runtime_error("Incorrect or unsupported model_type");
32-
}
27+
std::string model_type;
28+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
3329

34-
if (utils::model_has_embedded_processing(model)) {
35-
std::cout << "model already was serialized" << std::endl;
36-
} else {
37-
SemanticSegmentation::serialize(model);
30+
if (model_type.empty() || model_type != "Segmentation") {
31+
throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation");
3832
}
39-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
40-
adapter->loadModel(model, core, "AUTO");
33+
adapter->applyModelTransform(SemanticSegmentation::serialize);
34+
adapter->compileModel("AUTO", {});
35+
4136
return SemanticSegmentation(adapter);
4237
}
4338

4439
void SemanticSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
40+
if (utils::model_has_embedded_processing(ov_model)) {
41+
std::cout << "model already was serialized" << std::endl;
42+
return;
43+
}
4544
if (ov_model->inputs().size() != 1) {
4645
throw std::logic_error("Segmentation model wrapper supports topologies with only 1 input");
4746
}

0 commit comments

Comments
 (0)