From 2c8140b734b3024e2b04523560d1c91b3663d5c2 Mon Sep 17 00:00:00 2001 From: Vladisalv Sovrasov Date: Wed, 18 Jun 2025 00:29:23 +0900 Subject: [PATCH 1/6] Hide ov::Model in classification --- src/cpp/include/adapters/inference_adapter.h | 5 ++ src/cpp/include/adapters/openvino_adapter.h | 10 +++- src/cpp/src/adapters/openvino_adapter.cpp | 52 ++++++++++++++++++++ src/cpp/src/tasks/classification.cpp | 27 +++++----- 4 files changed, 78 insertions(+), 16 deletions(-) diff --git a/src/cpp/include/adapters/inference_adapter.h b/src/cpp/include/adapters/inference_adapter.h index 9911f655..1cda5525 100644 --- a/src/cpp/include/adapters/inference_adapter.h +++ b/src/cpp/include/adapters/inference_adapter.h @@ -36,6 +36,11 @@ class InferenceAdapter { const std::string& device = "", const ov::AnyMap& compilationConfig = {}, size_t max_num_requests = 0) = 0; + virtual void loadModelFile(const std::string& modelPath, + const std::string& device = "", + const ov::AnyMap& adapterConfig = {}, + bool preCompile = true) = 0; + virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) = 0; virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0; virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0; virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const = 0; diff --git a/src/cpp/include/adapters/openvino_adapter.h b/src/cpp/include/adapters/openvino_adapter.h index 7713ae93..7c078936 100644 --- a/src/cpp/include/adapters/openvino_adapter.h +++ b/src/cpp/include/adapters/openvino_adapter.h @@ -30,6 +30,11 @@ class OpenVINOInferenceAdapter : public InferenceAdapter { const std::string& device = "", const ov::AnyMap& compilationConfig = {}, size_t max_num_requests = 1) override; + virtual void loadModelFile(const std::string& modelPath, + const std::string& device = "", + const ov::AnyMap& adapterConfig = {}, + bool preCompile = true) override; + virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) override; virtual size_t getNumAsyncExecutors() const; virtual ov::PartialShape getInputShape(const std::string& inputName) const override; virtual ov::PartialShape getOutputShape(const std::string& outputName) const override; @@ -39,6 +44,8 @@ class OpenVINOInferenceAdapter : public InferenceAdapter { virtual std::vector getOutputNames() const override; virtual const ov::AnyMap& getModelConfig() const override; + void applyModelTransform(std::function&)> func); + protected: void initInputsOutputs(); @@ -48,7 +55,6 @@ class OpenVINOInferenceAdapter : public InferenceAdapter { std::vector outputNames; std::unique_ptr asyncQueue; ov::AnyMap modelConfig; // the content of model_info section of rt_info - -public: + std::shared_ptr model; ov::CompiledModel compiledModel; }; diff --git a/src/cpp/src/adapters/openvino_adapter.cpp b/src/cpp/src/adapters/openvino_adapter.cpp index 920e5378..9a7035e7 100644 --- a/src/cpp/src/adapters/openvino_adapter.cpp +++ b/src/cpp/src/adapters/openvino_adapter.cpp @@ -9,6 +9,8 @@ #include #include +#include "utils/config.h" + void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr& model, ov::Core& core, const std::string& device, @@ -39,6 +41,56 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr& } } +void OpenVINOInferenceAdapter::compileModel(const std::string& device, const ov::AnyMap& adapterConfig) { + if (!model) { + throw std::runtime_error("Model is not loaded"); + } + size_t max_num_requests = 1; + max_num_requests = utils::get_from_any_maps("max_num_requests", adapterConfig, {}, max_num_requests); + + ov::AnyMap customCompilationConfig(adapterConfig); + if (max_num_requests != 1) { + if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) { + customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::THROUGHPUT; + } + if (max_num_requests > 0) { + if (customCompilationConfig.find("PERFORMANCE_HINT_NUM_REQUESTS") == customCompilationConfig.end()) { + customCompilationConfig["PERFORMANCE_HINT_NUM_REQUESTS"] = ov::hint::num_requests(max_num_requests); + } + } + } else { + if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) { + customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::LATENCY; + } + } + + ov::Core core; + compiledModel = core.compile_model(model, device, customCompilationConfig); + asyncQueue = std::make_unique(compiledModel, max_num_requests); + initInputsOutputs(); +} + +void OpenVINOInferenceAdapter::loadModelFile(const std::string& modelPath, + const std::string& device, + const ov::AnyMap& adapterConfig, + bool preCompile) { + ov::Core core; + model = core.read_model(modelPath); + if (model->has_rt_info({"model_info"})) { + modelConfig = model->get_rt_info("model_info"); + } + if (preCompile) { + compileModel(device, adapterConfig); + } +} + +void OpenVINOInferenceAdapter::applyModelTransform(std::function&)> func) { + if (!model) { + throw std::runtime_error("Model is not loaded"); + } + func(model); +} + void OpenVINOInferenceAdapter::infer(const InferenceInput& input, InferenceOutput& output) { auto request = asyncQueue->operator[](asyncQueue->get_idle_request_id()); for (const auto& [name, tensor] : input) { diff --git a/src/cpp/src/tasks/classification.cpp b/src/cpp/src/tasks/classification.cpp index 0f93912f..4bede9cb 100644 --- a/src/cpp/src/tasks/classification.cpp +++ b/src/cpp/src/tasks/classification.cpp @@ -83,6 +83,10 @@ std::vector get_non_xai_output_indices(const std::vector& ov_model) { + if (utils::model_has_embedded_processing(ov_model)) { + std::cout << "model already was serialized" << std::endl; + return; + } // --------------------------- Configure input & output ------------------------------------------------- // --------------------------- Prepare input ------------------------------------------------------ auto config = ov_model->has_rt_info("model_info") ? ov_model->get_rt_info("model_info") : ov::AnyMap{}; @@ -177,23 +181,18 @@ void Classification::serialize(std::shared_ptr& ov_model) { } Classification Classification::load(const std::string& model_path) { - auto core = ov::Core(); - std::shared_ptr model = core.read_model(model_path); + auto adapter = std::make_shared(); + adapter->loadModelFile(model_path, "", {}, false); - if (model->has_rt_info("model_info", "model_type")) { - std::cout << "has model type in info: " << model->get_rt_info("model_info", "model_type") - << std::endl; - } else { - throw std::runtime_error("Incorrect or unsupported model_type"); - } + std::string model_type; + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); - if (utils::model_has_embedded_processing(model)) { - std::cout << "model already was serialized" << std::endl; - } else { - Classification::serialize(model); + if (model_type.empty() || model_type != "Classification") { + throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification"); } - auto adapter = std::make_shared(); - adapter->loadModel(model, core, "AUTO"); + adapter->applyModelTransform(Classification::serialize); + adapter->compileModel("AUTO", {}); + return Classification(adapter); } From ef82334ece855d59061f1414783ff9e35c99a877 Mon Sep 17 00:00:00 2001 From: Vladisalv Sovrasov Date: Wed, 18 Jun 2025 00:46:04 +0900 Subject: [PATCH 2/6] Cover segmentation --- src/cpp/src/tasks/semantic_segmentation.cpp | 27 ++++++++++----------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/cpp/src/tasks/semantic_segmentation.cpp b/src/cpp/src/tasks/semantic_segmentation.cpp index 39de5309..90cb4ea9 100644 --- a/src/cpp/src/tasks/semantic_segmentation.cpp +++ b/src/cpp/src/tasks/semantic_segmentation.cpp @@ -21,27 +21,26 @@ cv::Mat get_activation_map(const cv::Mat& features) { } SemanticSegmentation SemanticSegmentation::load(const std::string& model_path) { - auto core = ov::Core(); - std::shared_ptr model = core.read_model(model_path); + auto adapter = std::make_shared(); + adapter->loadModelFile(model_path, "", {}, false); - if (model->has_rt_info("model_info", "model_type")) { - std::cout << "has model type in info: " << model->get_rt_info("model_info", "model_type") - << std::endl; - } else { - throw std::runtime_error("Incorrect or unsupported model_type"); - } + std::string model_type; + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); - if (utils::model_has_embedded_processing(model)) { - std::cout << "model already was serialized" << std::endl; - } else { - SemanticSegmentation::serialize(model); + if (model_type.empty() || model_type != "Segmentation") { + throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation"); } - auto adapter = std::make_shared(); - adapter->loadModel(model, core, "AUTO"); + adapter->applyModelTransform(SemanticSegmentation::serialize); + adapter->compileModel("AUTO", {}); + return SemanticSegmentation(adapter); } void SemanticSegmentation::serialize(std::shared_ptr& ov_model) { + if (utils::model_has_embedded_processing(ov_model)) { + std::cout << "model already was serialized" << std::endl; + return; + } if (ov_model->inputs().size() != 1) { throw std::logic_error("Segmentation model wrapper supports topologies with only 1 input"); } From b39541e9988cdcccc0a5311b03b2625e6dfc1fa4 Mon Sep 17 00:00:00 2001 From: Vladisalv Sovrasov Date: Wed, 18 Jun 2025 00:53:24 +0900 Subject: [PATCH 3/6] Cover MRCNN --- src/cpp/src/tasks/instance_segmentation.cpp | 29 ++++++++++----------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/cpp/src/tasks/instance_segmentation.cpp b/src/cpp/src/tasks/instance_segmentation.cpp index c58290c2..6e51057f 100644 --- a/src/cpp/src/tasks/instance_segmentation.cpp +++ b/src/cpp/src/tasks/instance_segmentation.cpp @@ -122,6 +122,10 @@ cv::Mat segm_postprocess(const SegmentedObject& box, const cv::Mat& unpadded, in } void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { + if (utils::model_has_embedded_processing(ov_model)) { + std::cout << "model already was serialized" << std::endl; + return; + } if (ov_model->inputs().size() != 1) { throw std::logic_error("MaskRCNNModel model wrapper supports topologies with only 1 input"); } @@ -187,23 +191,18 @@ void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { } InstanceSegmentation InstanceSegmentation::load(const std::string& model_path) { - auto core = ov::Core(); - std::shared_ptr model = core.read_model(model_path); - - if (model->has_rt_info("model_info", "model_type")) { - std::cout << "has model type in info: " << model->get_rt_info("model_info", "model_type") - << std::endl; - } else { - throw std::runtime_error("Incorrect or unsupported model_type"); - } + auto adapter = std::make_shared(); + adapter->loadModelFile(model_path, "", {}, false); - if (utils::model_has_embedded_processing(model)) { - std::cout << "model already was serialized" << std::endl; - } else { - serialize(model); + std::string model_type; + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + + if (model_type.empty() || model_type != "MaskRCNN") { + throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN"); } - auto adapter = std::make_shared(); - adapter->loadModel(model, core, "AUTO"); + adapter->applyModelTransform(InstanceSegmentation::serialize); + adapter->compileModel("AUTO", {}); + return InstanceSegmentation(adapter); } From 43152fcf333e951825ea29a84a1b63a263c9d69c Mon Sep 17 00:00:00 2001 From: Vladisalv Sovrasov Date: Wed, 18 Jun 2025 01:08:01 +0900 Subject: [PATCH 4/6] Cover detection --- src/cpp/src/tasks/detection.cpp | 24 ++++++++++-------------- src/cpp/src/tasks/detection/ssd.cpp | 4 ++++ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/cpp/src/tasks/detection.cpp b/src/cpp/src/tasks/detection.cpp index 3a4432a9..48272dbc 100644 --- a/src/cpp/src/tasks/detection.cpp +++ b/src/cpp/src/tasks/detection.cpp @@ -12,23 +12,19 @@ #include "utils/tensor.h" DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) { - auto core = ov::Core(); - std::shared_ptr model = core.read_model(model_path); + auto adapter = std::make_shared(); + adapter->loadModelFile(model_path, "", {}, false); - if (model->has_rt_info("model_info", "model_type")) { - std::cout << "has model type in info: " << model->get_rt_info("model_info", "model_type") - << std::endl; - } else { - throw std::runtime_error("Incorrect or unsupported model_type"); - } + std::string model_type; + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower); - if (utils::model_has_embedded_processing(model)) { - std::cout << "model already was serialized" << std::endl; - } else { - SSD::serialize(model); + if (model_type.empty() || model_type != "ssd") { + throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd"); } - auto adapter = std::make_shared(); - adapter->loadModel(model, core, "AUTO"); + adapter->applyModelTransform(SSD::serialize); + adapter->compileModel("AUTO", {}); + return DetectionModel(std::make_unique(adapter), configuration); } diff --git a/src/cpp/src/tasks/detection/ssd.cpp b/src/cpp/src/tasks/detection/ssd.cpp index 2d656cad..3fb551dc 100644 --- a/src/cpp/src/tasks/detection/ssd.cpp +++ b/src/cpp/src/tasks/detection/ssd.cpp @@ -68,6 +68,10 @@ std::map SSD::preprocess(cv::Mat image) { } void SSD::serialize(std::shared_ptr ov_model) { + if (utils::model_has_embedded_processing(ov_model)) { + std::cout << "model already was serialized" << std::endl; + return; + } auto output_mode = ov_model->outputs().size() > 1 ? SSDOutputMode::multi : SSDOutputMode::single; auto input_tensor = ov_model->inputs()[0]; From f9ac4f457bd6ee3746563b7ce9fe59093ced9344 Mon Sep 17 00:00:00 2001 From: Vladisalv Sovrasov Date: Wed, 18 Jun 2025 01:16:55 +0900 Subject: [PATCH 5/6] Cover anomaly --- src/cpp/src/tasks/anomaly.cpp | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/cpp/src/tasks/anomaly.cpp b/src/cpp/src/tasks/anomaly.cpp index 4a0aa993..550ea407 100644 --- a/src/cpp/src/tasks/anomaly.cpp +++ b/src/cpp/src/tasks/anomaly.cpp @@ -5,6 +5,11 @@ #include "utils/tensor.h" void Anomaly::serialize(std::shared_ptr& ov_model) { + if (utils::model_has_embedded_processing(ov_model)) { + std::cout << "model already was serialized" << std::endl; + return; + } + auto input = ov_model->inputs().front(); auto layout = ov::layout::get_layout(input); @@ -47,23 +52,21 @@ void Anomaly::serialize(std::shared_ptr& ov_model) { } Anomaly Anomaly::load(const std::string& model_path) { - auto core = ov::Core(); - std::shared_ptr model = core.read_model(model_path); + auto adapter = std::make_shared(); + adapter->loadModelFile(model_path, "", {}, false); + + std::string model_type; + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); - if (model->has_rt_info("model_info", "model_type")) { - std::cout << "has model type in info: " << model->get_rt_info("model_info", "model_type") - << std::endl; + if (!model_type.empty()) { + std::cout << "has model type in info: " << model_type << std::endl; } else { throw std::runtime_error("Incorrect or unsupported model_type"); } - if (utils::model_has_embedded_processing(model)) { - std::cout << "model already was serialized" << std::endl; - } else { - serialize(model); - } - auto adapter = std::make_shared(); - adapter->loadModel(model, core, "AUTO"); + adapter->applyModelTransform(Anomaly::serialize); + adapter->compileModel("AUTO", {}); + return Anomaly(adapter); } From 8f76b40a91ea0e0fbce53f4b406aacbfdd936585 Mon Sep 17 00:00:00 2001 From: Vladisalv Sovrasov Date: Wed, 18 Jun 2025 02:22:52 +0900 Subject: [PATCH 6/6] Cleanup --- src/cpp/include/adapters/inference_adapter.h | 11 ++--- src/cpp/include/adapters/openvino_adapter.h | 13 ++---- src/cpp/src/adapters/openvino_adapter.cpp | 45 ++++---------------- src/cpp/src/tasks/anomaly.cpp | 2 +- src/cpp/src/tasks/classification.cpp | 2 +- src/cpp/src/tasks/detection.cpp | 2 +- src/cpp/src/tasks/instance_segmentation.cpp | 2 +- src/cpp/src/tasks/semantic_segmentation.cpp | 2 +- 8 files changed, 21 insertions(+), 58 deletions(-) diff --git a/src/cpp/include/adapters/inference_adapter.h b/src/cpp/include/adapters/inference_adapter.h index 1cda5525..e38a7780 100644 --- a/src/cpp/include/adapters/inference_adapter.h +++ b/src/cpp/include/adapters/inference_adapter.h @@ -31,15 +31,10 @@ class InferenceAdapter { virtual void awaitAll() = 0; virtual void awaitAny() = 0; virtual size_t getNumAsyncExecutors() const = 0; - virtual void loadModel(const std::shared_ptr& model, - ov::Core& core, + virtual void loadModel(const std::string& modelPath, const std::string& device = "", - const ov::AnyMap& compilationConfig = {}, - size_t max_num_requests = 0) = 0; - virtual void loadModelFile(const std::string& modelPath, - const std::string& device = "", - const ov::AnyMap& adapterConfig = {}, - bool preCompile = true) = 0; + const ov::AnyMap& adapterConfig = {}, + bool preCompile = true) = 0; virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) = 0; virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0; virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0; diff --git a/src/cpp/include/adapters/openvino_adapter.h b/src/cpp/include/adapters/openvino_adapter.h index 7c078936..31697cb1 100644 --- a/src/cpp/include/adapters/openvino_adapter.h +++ b/src/cpp/include/adapters/openvino_adapter.h @@ -25,15 +25,10 @@ class OpenVINOInferenceAdapter : public InferenceAdapter { virtual bool isReady(); virtual void awaitAll(); virtual void awaitAny(); - virtual void loadModel(const std::shared_ptr& model, - ov::Core& core, + virtual void loadModel(const std::string& modelPath, const std::string& device = "", - const ov::AnyMap& compilationConfig = {}, - size_t max_num_requests = 1) override; - virtual void loadModelFile(const std::string& modelPath, - const std::string& device = "", - const ov::AnyMap& adapterConfig = {}, - bool preCompile = true) override; + const ov::AnyMap& adapterConfig = {}, + bool preCompile = true) override; virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) override; virtual size_t getNumAsyncExecutors() const; virtual ov::PartialShape getInputShape(const std::string& inputName) const override; @@ -44,7 +39,7 @@ class OpenVINOInferenceAdapter : public InferenceAdapter { virtual std::vector getOutputNames() const override; virtual const ov::AnyMap& getModelConfig() const override; - void applyModelTransform(std::function&)> func); + void applyModelTransform(std::function&)> t); protected: void initInputsOutputs(); diff --git a/src/cpp/src/adapters/openvino_adapter.cpp b/src/cpp/src/adapters/openvino_adapter.cpp index 9a7035e7..beabbb68 100644 --- a/src/cpp/src/adapters/openvino_adapter.cpp +++ b/src/cpp/src/adapters/openvino_adapter.cpp @@ -11,36 +11,6 @@ #include "utils/config.h" -void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr& model, - ov::Core& core, - const std::string& device, - const ov::AnyMap& compilationConfig, - size_t max_num_requests) { - ov::AnyMap customCompilationConfig(compilationConfig); - if (max_num_requests != 1) { - if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) { - customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::THROUGHPUT; - } - if (max_num_requests > 0) { - if (customCompilationConfig.find("PERFORMANCE_HINT_NUM_REQUESTS") == customCompilationConfig.end()) { - customCompilationConfig["PERFORMANCE_HINT_NUM_REQUESTS"] = ov::hint::num_requests(max_num_requests); - } - } - } else { - if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) { - customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::LATENCY; - } - } - - compiledModel = core.compile_model(model, device, customCompilationConfig); - asyncQueue = std::make_unique(compiledModel, max_num_requests); - initInputsOutputs(); - - if (model->has_rt_info({"model_info"})) { - modelConfig = model->get_rt_info("model_info"); - } -} - void OpenVINOInferenceAdapter::compileModel(const std::string& device, const ov::AnyMap& adapterConfig) { if (!model) { throw std::runtime_error("Model is not loaded"); @@ -70,10 +40,10 @@ void OpenVINOInferenceAdapter::compileModel(const std::string& device, const ov: initInputsOutputs(); } -void OpenVINOInferenceAdapter::loadModelFile(const std::string& modelPath, - const std::string& device, - const ov::AnyMap& adapterConfig, - bool preCompile) { +void OpenVINOInferenceAdapter::loadModel(const std::string& modelPath, + const std::string& device, + const ov::AnyMap& adapterConfig, + bool preCompile) { ov::Core core; model = core.read_model(modelPath); if (model->has_rt_info({"model_info"})) { @@ -84,11 +54,14 @@ void OpenVINOInferenceAdapter::loadModelFile(const std::string& modelPath, } } -void OpenVINOInferenceAdapter::applyModelTransform(std::function&)> func) { +void OpenVINOInferenceAdapter::applyModelTransform(std::function&)> t) { if (!model) { throw std::runtime_error("Model is not loaded"); } - func(model); + t(model); + if (model->has_rt_info({"model_info"})) { + modelConfig = model->get_rt_info("model_info"); + } } void OpenVINOInferenceAdapter::infer(const InferenceInput& input, InferenceOutput& output) { diff --git a/src/cpp/src/tasks/anomaly.cpp b/src/cpp/src/tasks/anomaly.cpp index 550ea407..29f42d46 100644 --- a/src/cpp/src/tasks/anomaly.cpp +++ b/src/cpp/src/tasks/anomaly.cpp @@ -53,7 +53,7 @@ void Anomaly::serialize(std::shared_ptr& ov_model) { Anomaly Anomaly::load(const std::string& model_path) { auto adapter = std::make_shared(); - adapter->loadModelFile(model_path, "", {}, false); + adapter->loadModel(model_path, "", {}, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); diff --git a/src/cpp/src/tasks/classification.cpp b/src/cpp/src/tasks/classification.cpp index 4bede9cb..3a74d341 100644 --- a/src/cpp/src/tasks/classification.cpp +++ b/src/cpp/src/tasks/classification.cpp @@ -182,7 +182,7 @@ void Classification::serialize(std::shared_ptr& ov_model) { Classification Classification::load(const std::string& model_path) { auto adapter = std::make_shared(); - adapter->loadModelFile(model_path, "", {}, false); + adapter->loadModel(model_path, "", {}, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); diff --git a/src/cpp/src/tasks/detection.cpp b/src/cpp/src/tasks/detection.cpp index 48272dbc..d05433fe 100644 --- a/src/cpp/src/tasks/detection.cpp +++ b/src/cpp/src/tasks/detection.cpp @@ -13,7 +13,7 @@ DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) { auto adapter = std::make_shared(); - adapter->loadModelFile(model_path, "", {}, false); + adapter->loadModel(model_path, "", {}, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); diff --git a/src/cpp/src/tasks/instance_segmentation.cpp b/src/cpp/src/tasks/instance_segmentation.cpp index 6e51057f..a98cbc32 100644 --- a/src/cpp/src/tasks/instance_segmentation.cpp +++ b/src/cpp/src/tasks/instance_segmentation.cpp @@ -192,7 +192,7 @@ void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { InstanceSegmentation InstanceSegmentation::load(const std::string& model_path) { auto adapter = std::make_shared(); - adapter->loadModelFile(model_path, "", {}, false); + adapter->loadModel(model_path, "", {}, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); diff --git a/src/cpp/src/tasks/semantic_segmentation.cpp b/src/cpp/src/tasks/semantic_segmentation.cpp index 90cb4ea9..588045ad 100644 --- a/src/cpp/src/tasks/semantic_segmentation.cpp +++ b/src/cpp/src/tasks/semantic_segmentation.cpp @@ -22,7 +22,7 @@ cv::Mat get_activation_map(const cv::Mat& features) { SemanticSegmentation SemanticSegmentation::load(const std::string& model_path) { auto adapter = std::make_shared(); - adapter->loadModelFile(model_path, "", {}, false); + adapter->loadModel(model_path, "", {}, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);