Skip to content

Commit fbc9c75

Browse files
committed
Merge branch 'feature/cpp_refactoring' into rhecker/semantic_segmentation_tiling
2 parents f40a10d + 4afd3c9 commit fbc9c75

File tree

9 files changed

+111
-88
lines changed

9 files changed

+111
-88
lines changed

src/cpp/include/adapters/inference_adapter.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,11 @@ class InferenceAdapter {
3131
virtual void awaitAll() = 0;
3232
virtual void awaitAny() = 0;
3333
virtual size_t getNumAsyncExecutors() const = 0;
34-
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
35-
ov::Core& core,
34+
virtual void loadModel(const std::string& modelPath,
3635
const std::string& device = "",
37-
const ov::AnyMap& compilationConfig = {},
38-
size_t max_num_requests = 0) = 0;
36+
const ov::AnyMap& adapterConfig = {},
37+
bool preCompile = true) = 0;
38+
virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) = 0;
3939
virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0;
4040
virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0;
4141
virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const = 0;

src/cpp/include/adapters/openvino_adapter.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,11 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
2525
virtual bool isReady();
2626
virtual void awaitAll();
2727
virtual void awaitAny();
28-
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
29-
ov::Core& core,
28+
virtual void loadModel(const std::string& modelPath,
3029
const std::string& device = "",
31-
const ov::AnyMap& compilationConfig = {},
32-
size_t max_num_requests = 1) override;
30+
const ov::AnyMap& adapterConfig = {},
31+
bool preCompile = true) override;
32+
virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) override;
3333
virtual size_t getNumAsyncExecutors() const;
3434
virtual ov::PartialShape getInputShape(const std::string& inputName) const override;
3535
virtual ov::PartialShape getOutputShape(const std::string& outputName) const override;
@@ -39,6 +39,8 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
3939
virtual std::vector<std::string> getOutputNames() const override;
4040
virtual const ov::AnyMap& getModelConfig() const override;
4141

42+
void applyModelTransform(std::function<void(std::shared_ptr<ov::Model>&)> t);
43+
4244
protected:
4345
void initInputsOutputs();
4446

@@ -48,7 +50,6 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
4850
std::vector<std::string> outputNames;
4951
std::unique_ptr<AsyncInferQueue> asyncQueue;
5052
ov::AnyMap modelConfig; // the content of model_info section of rt_info
51-
52-
public:
53+
std::shared_ptr<ov::Model> model;
5354
ov::CompiledModel compiledModel;
5455
};

src/cpp/src/adapters/openvino_adapter.cpp

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,16 @@
99
#include <stdexcept>
1010
#include <vector>
1111

12-
void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>& model,
13-
ov::Core& core,
14-
const std::string& device,
15-
const ov::AnyMap& compilationConfig,
16-
size_t max_num_requests) {
17-
ov::AnyMap customCompilationConfig(compilationConfig);
12+
#include "utils/config.h"
13+
14+
void OpenVINOInferenceAdapter::compileModel(const std::string& device, const ov::AnyMap& adapterConfig) {
15+
if (!model) {
16+
throw std::runtime_error("Model is not loaded");
17+
}
18+
size_t max_num_requests = 1;
19+
max_num_requests = utils::get_from_any_maps("max_num_requests", adapterConfig, {}, max_num_requests);
20+
21+
ov::AnyMap customCompilationConfig(adapterConfig);
1822
if (max_num_requests != 1) {
1923
if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) {
2024
customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::THROUGHPUT;
@@ -30,10 +34,31 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>&
3034
}
3135
}
3236

37+
ov::Core core;
3338
compiledModel = core.compile_model(model, device, customCompilationConfig);
3439
asyncQueue = std::make_unique<AsyncInferQueue>(compiledModel, max_num_requests);
3540
initInputsOutputs();
41+
}
3642

43+
void OpenVINOInferenceAdapter::loadModel(const std::string& modelPath,
44+
const std::string& device,
45+
const ov::AnyMap& adapterConfig,
46+
bool preCompile) {
47+
ov::Core core;
48+
model = core.read_model(modelPath);
49+
if (model->has_rt_info({"model_info"})) {
50+
modelConfig = model->get_rt_info<ov::AnyMap>("model_info");
51+
}
52+
if (preCompile) {
53+
compileModel(device, adapterConfig);
54+
}
55+
}
56+
57+
void OpenVINOInferenceAdapter::applyModelTransform(std::function<void(std::shared_ptr<ov::Model>&)> t) {
58+
if (!model) {
59+
throw std::runtime_error("Model is not loaded");
60+
}
61+
t(model);
3762
if (model->has_rt_info({"model_info"})) {
3863
modelConfig = model->get_rt_info<ov::AnyMap>("model_info");
3964
}

src/cpp/src/tasks/anomaly.cpp

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,11 @@
55
#include "utils/tensor.h"
66

77
void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
8+
if (utils::model_has_embedded_processing(ov_model)) {
9+
std::cout << "model already was serialized" << std::endl;
10+
return;
11+
}
12+
813
auto input = ov_model->inputs().front();
914

1015
auto layout = ov::layout::get_layout(input);
@@ -47,23 +52,21 @@ void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
4752
}
4853

4954
Anomaly Anomaly::load(const std::string& model_path) {
50-
auto core = ov::Core();
51-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
55+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
56+
adapter->loadModel(model_path, "", {}, false);
57+
58+
std::string model_type;
59+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
5260

53-
if (model->has_rt_info("model_info", "model_type")) {
54-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
55-
<< std::endl;
61+
if (!model_type.empty()) {
62+
std::cout << "has model type in info: " << model_type << std::endl;
5663
} else {
5764
throw std::runtime_error("Incorrect or unsupported model_type");
5865
}
5966

60-
if (utils::model_has_embedded_processing(model)) {
61-
std::cout << "model already was serialized" << std::endl;
62-
} else {
63-
serialize(model);
64-
}
65-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
66-
adapter->loadModel(model, core, "AUTO");
67+
adapter->applyModelTransform(Anomaly::serialize);
68+
adapter->compileModel("AUTO", {});
69+
6770
return Anomaly(adapter);
6871
}
6972

src/cpp/src/tasks/classification.cpp

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,10 @@ std::vector<size_t> get_non_xai_output_indices(const std::vector<ov::Output<ov::
8383
} // namespace
8484

8585
void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
86+
if (utils::model_has_embedded_processing(ov_model)) {
87+
std::cout << "model already was serialized" << std::endl;
88+
return;
89+
}
8690
// --------------------------- Configure input & output -------------------------------------------------
8791
// --------------------------- Prepare input ------------------------------------------------------
8892
auto config = ov_model->has_rt_info("model_info") ? ov_model->get_rt_info<ov::AnyMap>("model_info") : ov::AnyMap{};
@@ -177,23 +181,18 @@ void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
177181
}
178182

179183
Classification Classification::load(const std::string& model_path) {
180-
auto core = ov::Core();
181-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
184+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
185+
adapter->loadModel(model_path, "", {}, false);
182186

183-
if (model->has_rt_info("model_info", "model_type")) {
184-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
185-
<< std::endl;
186-
} else {
187-
throw std::runtime_error("Incorrect or unsupported model_type");
188-
}
187+
std::string model_type;
188+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
189189

190-
if (utils::model_has_embedded_processing(model)) {
191-
std::cout << "model already was serialized" << std::endl;
192-
} else {
193-
Classification::serialize(model);
190+
if (model_type.empty() || model_type != "Classification") {
191+
throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification");
194192
}
195-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
196-
adapter->loadModel(model, core, "AUTO");
193+
adapter->applyModelTransform(Classification::serialize);
194+
adapter->compileModel("AUTO", {});
195+
197196
return Classification(adapter);
198197
}
199198

src/cpp/src/tasks/detection.cpp

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,23 +12,19 @@
1212
#include "utils/tensor.h"
1313

1414
DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) {
15-
auto core = ov::Core();
16-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
15+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
16+
adapter->loadModel(model_path, "", {}, false);
1717

18-
if (model->has_rt_info("model_info", "model_type")) {
19-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
20-
<< std::endl;
21-
} else {
22-
throw std::runtime_error("Incorrect or unsupported model_type");
23-
}
18+
std::string model_type;
19+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
20+
transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower);
2421

25-
if (utils::model_has_embedded_processing(model)) {
26-
std::cout << "model already was serialized" << std::endl;
27-
} else {
28-
SSD::serialize(model);
22+
if (model_type.empty() || model_type != "ssd") {
23+
throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd");
2924
}
30-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
31-
adapter->loadModel(model, core, "AUTO");
25+
adapter->applyModelTransform(SSD::serialize);
26+
adapter->compileModel("AUTO", {});
27+
3228
return DetectionModel(std::make_unique<SSD>(adapter), configuration);
3329
}
3430

src/cpp/src/tasks/detection/ssd.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,10 @@ std::map<std::string, ov::Tensor> SSD::preprocess(cv::Mat image) {
6868
}
6969

7070
void SSD::serialize(std::shared_ptr<ov::Model> ov_model) {
71+
if (utils::model_has_embedded_processing(ov_model)) {
72+
std::cout << "model already was serialized" << std::endl;
73+
return;
74+
}
7175
auto output_mode = ov_model->outputs().size() > 1 ? SSDOutputMode::multi : SSDOutputMode::single;
7276

7377
auto input_tensor = ov_model->inputs()[0];

src/cpp/src/tasks/instance_segmentation.cpp

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,10 @@ cv::Mat segm_postprocess(const SegmentedObject& box, const cv::Mat& unpadded, in
122122
}
123123

124124
void InstanceSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
125+
if (utils::model_has_embedded_processing(ov_model)) {
126+
std::cout << "model already was serialized" << std::endl;
127+
return;
128+
}
125129
if (ov_model->inputs().size() != 1) {
126130
throw std::logic_error("MaskRCNNModel model wrapper supports topologies with only 1 input");
127131
}
@@ -188,23 +192,18 @@ void InstanceSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
188192
}
189193

190194
InstanceSegmentation InstanceSegmentation::load(const std::string& model_path, const ov::AnyMap& configuration) {
191-
auto core = ov::Core();
192-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
193-
194-
if (model->has_rt_info("model_info", "model_type")) {
195-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
196-
<< std::endl;
197-
} else {
198-
throw std::runtime_error("Incorrect or unsupported model_type");
199-
}
195+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
196+
adapter->loadModel(model_path, "", {}, false);
200197

201-
if (utils::model_has_embedded_processing(model)) {
202-
std::cout << "model already was serialized" << std::endl;
203-
} else {
204-
serialize(model);
198+
std::string model_type;
199+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
200+
201+
if (model_type.empty() || model_type != "MaskRCNN") {
202+
throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN");
205203
}
206-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
207-
adapter->loadModel(model, core, "AUTO");
204+
adapter->applyModelTransform(InstanceSegmentation::serialize);
205+
adapter->compileModel("AUTO", {});
206+
208207
return InstanceSegmentation(adapter, configuration);
209208
}
210209

@@ -228,11 +227,8 @@ InstanceSegmentationResult InstanceSegmentation::postprocess(InferenceResult& in
228227
float invertedScaleX = floatInputImgWidth / input_shape.width,
229228
invertedScaleY = floatInputImgHeight / input_shape.height;
230229

231-
std::cout << "got an inf result with image: " << infResult.inputImageSize << std::endl;
232-
std::cout << "resize mode: " << resize_mode << std::endl;
233230
int padLeft = 0, padTop = 0;
234231
if (utils::RESIZE_KEEP_ASPECT == resize_mode || utils::RESIZE_KEEP_ASPECT_LETTERBOX == resize_mode) {
235-
std::cout << "using some other resize mode..." << std::endl;
236232
invertedScaleX = invertedScaleY = std::max(invertedScaleX, invertedScaleY);
237233
if (utils::RESIZE_KEEP_ASPECT_LETTERBOX == resize_mode) {
238234
padLeft = (input_shape.width - int(std::round(floatInputImgWidth / invertedScaleX))) / 2;

src/cpp/src/tasks/semantic_segmentation.cpp

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -42,27 +42,26 @@ void normalize_soft_prediction(cv::Mat& soft_prediction, const cv::Mat& normaliz
4242
} // namespace
4343

4444
SemanticSegmentation SemanticSegmentation::load(const std::string& model_path, const ov::AnyMap& configuration) {
45-
auto core = ov::Core();
46-
std::shared_ptr<ov::Model> model = core.read_model(model_path);
45+
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
46+
adapter->loadModel(model_path, "", {}, false);
4747

48-
if (model->has_rt_info("model_info", "model_type")) {
49-
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
50-
<< std::endl;
51-
} else {
52-
throw std::runtime_error("Incorrect or unsupported model_type");
53-
}
48+
std::string model_type;
49+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
5450

55-
if (utils::model_has_embedded_processing(model)) {
56-
std::cout << "model already was serialized" << std::endl;
57-
} else {
58-
SemanticSegmentation::serialize(model);
51+
if (model_type.empty() || model_type != "Segmentation") {
52+
throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation");
5953
}
60-
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
61-
adapter->loadModel(model, core, "AUTO");
54+
adapter->applyModelTransform(SemanticSegmentation::serialize);
55+
adapter->compileModel("AUTO", {});
56+
6257
return SemanticSegmentation(adapter, configuration);
6358
}
6459

6560
void SemanticSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
61+
if (utils::model_has_embedded_processing(ov_model)) {
62+
std::cout << "model already was serialized" << std::endl;
63+
return;
64+
}
6665
if (ov_model->inputs().size() != 1) {
6766
throw std::logic_error("Segmentation model wrapper supports topologies with only 1 input");
6867
}

0 commit comments

Comments
 (0)