Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/cpp/include/adapters/inference_adapter.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ class InferenceAdapter {
virtual void awaitAll() = 0;
virtual void awaitAny() = 0;
virtual size_t getNumAsyncExecutors() const = 0;
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
ov::Core& core,
virtual void loadModel(const std::string& modelPath,
const std::string& device = "",
const ov::AnyMap& compilationConfig = {},
size_t max_num_requests = 0) = 0;
const ov::AnyMap& adapterConfig = {},
bool preCompile = true) = 0;
virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) = 0;
virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0;
virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0;
virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const = 0;
Expand Down
13 changes: 7 additions & 6 deletions src/cpp/include/adapters/openvino_adapter.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
virtual bool isReady();
virtual void awaitAll();
virtual void awaitAny();
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
ov::Core& core,
virtual void loadModel(const std::string& modelPath,
const std::string& device = "",
const ov::AnyMap& compilationConfig = {},
size_t max_num_requests = 1) override;
const ov::AnyMap& adapterConfig = {},
bool preCompile = true) override;
virtual void compileModel(const std::string& device = "", const ov::AnyMap& adapterConfig = {}) override;
virtual size_t getNumAsyncExecutors() const;
virtual ov::PartialShape getInputShape(const std::string& inputName) const override;
virtual ov::PartialShape getOutputShape(const std::string& outputName) const override;
Expand All @@ -39,6 +39,8 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
virtual std::vector<std::string> getOutputNames() const override;
virtual const ov::AnyMap& getModelConfig() const override;

void applyModelTransform(std::function<void(std::shared_ptr<ov::Model>&)> t);

protected:
void initInputsOutputs();

Expand All @@ -48,7 +50,6 @@ class OpenVINOInferenceAdapter : public InferenceAdapter {
std::vector<std::string> outputNames;
std::unique_ptr<AsyncInferQueue> asyncQueue;
ov::AnyMap modelConfig; // the content of model_info section of rt_info

public:
std::shared_ptr<ov::Model> model;
ov::CompiledModel compiledModel;
};
37 changes: 31 additions & 6 deletions src/cpp/src/adapters/openvino_adapter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,16 @@
#include <stdexcept>
#include <vector>

void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>& model,
ov::Core& core,
const std::string& device,
const ov::AnyMap& compilationConfig,
size_t max_num_requests) {
ov::AnyMap customCompilationConfig(compilationConfig);
#include "utils/config.h"

void OpenVINOInferenceAdapter::compileModel(const std::string& device, const ov::AnyMap& adapterConfig) {
if (!model) {
throw std::runtime_error("Model is not loaded");
}
size_t max_num_requests = 1;
max_num_requests = utils::get_from_any_maps("max_num_requests", adapterConfig, {}, max_num_requests);

ov::AnyMap customCompilationConfig(adapterConfig);
if (max_num_requests != 1) {
if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) {
customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::THROUGHPUT;
Expand All @@ -30,10 +34,31 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>&
}
}

ov::Core core;
compiledModel = core.compile_model(model, device, customCompilationConfig);
asyncQueue = std::make_unique<AsyncInferQueue>(compiledModel, max_num_requests);
initInputsOutputs();
}

void OpenVINOInferenceAdapter::loadModel(const std::string& modelPath,
const std::string& device,
const ov::AnyMap& adapterConfig,
bool preCompile) {
ov::Core core;
model = core.read_model(modelPath);
if (model->has_rt_info({"model_info"})) {
modelConfig = model->get_rt_info<ov::AnyMap>("model_info");
}
if (preCompile) {
compileModel(device, adapterConfig);
}
}

void OpenVINOInferenceAdapter::applyModelTransform(std::function<void(std::shared_ptr<ov::Model>&)> t) {
if (!model) {
throw std::runtime_error("Model is not loaded");
}
t(model);
if (model->has_rt_info({"model_info"})) {
modelConfig = model->get_rt_info<ov::AnyMap>("model_info");
}
Expand Down
27 changes: 15 additions & 12 deletions src/cpp/src/tasks/anomaly.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@
#include "utils/tensor.h"

void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
if (utils::model_has_embedded_processing(ov_model)) {
std::cout << "model already was serialized" << std::endl;
return;
}

auto input = ov_model->inputs().front();

auto layout = ov::layout::get_layout(input);
Expand Down Expand Up @@ -47,23 +52,21 @@ void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
}

Anomaly Anomaly::load(const std::string& model_path) {
auto core = ov::Core();
std::shared_ptr<ov::Model> model = core.read_model(model_path);
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);

std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);

if (model->has_rt_info("model_info", "model_type")) {
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
<< std::endl;
if (!model_type.empty()) {
std::cout << "has model type in info: " << model_type << std::endl;
} else {
throw std::runtime_error("Incorrect or unsupported model_type");
}

if (utils::model_has_embedded_processing(model)) {
std::cout << "model already was serialized" << std::endl;
} else {
serialize(model);
}
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model, core, "AUTO");
adapter->applyModelTransform(Anomaly::serialize);
adapter->compileModel("AUTO", {});

return Anomaly(adapter);
}

Expand Down
27 changes: 13 additions & 14 deletions src/cpp/src/tasks/classification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ std::vector<size_t> get_non_xai_output_indices(const std::vector<ov::Output<ov::
} // namespace

void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
if (utils::model_has_embedded_processing(ov_model)) {
std::cout << "model already was serialized" << std::endl;
return;
}
// --------------------------- Configure input & output -------------------------------------------------
// --------------------------- Prepare input ------------------------------------------------------
auto config = ov_model->has_rt_info("model_info") ? ov_model->get_rt_info<ov::AnyMap>("model_info") : ov::AnyMap{};
Expand Down Expand Up @@ -177,23 +181,18 @@ void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
}

Classification Classification::load(const std::string& model_path) {
auto core = ov::Core();
std::shared_ptr<ov::Model> model = core.read_model(model_path);
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);

if (model->has_rt_info("model_info", "model_type")) {
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
<< std::endl;
} else {
throw std::runtime_error("Incorrect or unsupported model_type");
}
std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);

if (utils::model_has_embedded_processing(model)) {
std::cout << "model already was serialized" << std::endl;
} else {
Classification::serialize(model);
if (model_type.empty() || model_type != "Classification") {
throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification");
}
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model, core, "AUTO");
adapter->applyModelTransform(Classification::serialize);
adapter->compileModel("AUTO", {});

return Classification(adapter);
}

Expand Down
24 changes: 10 additions & 14 deletions src/cpp/src/tasks/detection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,19 @@
#include "utils/tensor.h"

DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) {
auto core = ov::Core();
std::shared_ptr<ov::Model> model = core.read_model(model_path);
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);

if (model->has_rt_info("model_info", "model_type")) {
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
<< std::endl;
} else {
throw std::runtime_error("Incorrect or unsupported model_type");
}
std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower);

if (utils::model_has_embedded_processing(model)) {
std::cout << "model already was serialized" << std::endl;
} else {
SSD::serialize(model);
if (model_type.empty() || model_type != "ssd") {
throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd");
}
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model, core, "AUTO");
adapter->applyModelTransform(SSD::serialize);
adapter->compileModel("AUTO", {});

return DetectionModel(std::make_unique<SSD>(adapter), configuration);
}

Expand Down
4 changes: 4 additions & 0 deletions src/cpp/src/tasks/detection/ssd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,10 @@ std::map<std::string, ov::Tensor> SSD::preprocess(cv::Mat image) {
}

void SSD::serialize(std::shared_ptr<ov::Model> ov_model) {
if (utils::model_has_embedded_processing(ov_model)) {
std::cout << "model already was serialized" << std::endl;
return;
}
auto output_mode = ov_model->outputs().size() > 1 ? SSDOutputMode::multi : SSDOutputMode::single;

auto input_tensor = ov_model->inputs()[0];
Expand Down
29 changes: 14 additions & 15 deletions src/cpp/src/tasks/instance_segmentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,10 @@ cv::Mat segm_postprocess(const SegmentedObject& box, const cv::Mat& unpadded, in
}

void InstanceSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
if (utils::model_has_embedded_processing(ov_model)) {
std::cout << "model already was serialized" << std::endl;
return;
}
if (ov_model->inputs().size() != 1) {
throw std::logic_error("MaskRCNNModel model wrapper supports topologies with only 1 input");
}
Expand Down Expand Up @@ -187,23 +191,18 @@ void InstanceSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
}

InstanceSegmentation InstanceSegmentation::load(const std::string& model_path) {
auto core = ov::Core();
std::shared_ptr<ov::Model> model = core.read_model(model_path);

if (model->has_rt_info("model_info", "model_type")) {
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
<< std::endl;
} else {
throw std::runtime_error("Incorrect or unsupported model_type");
}
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);

if (utils::model_has_embedded_processing(model)) {
std::cout << "model already was serialized" << std::endl;
} else {
serialize(model);
std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);

if (model_type.empty() || model_type != "MaskRCNN") {
throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN");
}
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model, core, "AUTO");
adapter->applyModelTransform(InstanceSegmentation::serialize);
adapter->compileModel("AUTO", {});

return InstanceSegmentation(adapter);
}

Expand Down
27 changes: 13 additions & 14 deletions src/cpp/src/tasks/semantic_segmentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,27 +21,26 @@ cv::Mat get_activation_map(const cv::Mat& features) {
}

SemanticSegmentation SemanticSegmentation::load(const std::string& model_path) {
auto core = ov::Core();
std::shared_ptr<ov::Model> model = core.read_model(model_path);
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);

if (model->has_rt_info("model_info", "model_type")) {
std::cout << "has model type in info: " << model->get_rt_info<std::string>("model_info", "model_type")
<< std::endl;
} else {
throw std::runtime_error("Incorrect or unsupported model_type");
}
std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);

if (utils::model_has_embedded_processing(model)) {
std::cout << "model already was serialized" << std::endl;
} else {
SemanticSegmentation::serialize(model);
if (model_type.empty() || model_type != "Segmentation") {
throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation");
}
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model, core, "AUTO");
adapter->applyModelTransform(SemanticSegmentation::serialize);
adapter->compileModel("AUTO", {});

return SemanticSegmentation(adapter);
}

void SemanticSegmentation::serialize(std::shared_ptr<ov::Model>& ov_model) {
if (utils::model_has_embedded_processing(ov_model)) {
std::cout << "model already was serialized" << std::endl;
return;
}
if (ov_model->inputs().size() != 1) {
throw std::logic_error("Segmentation model wrapper supports topologies with only 1 input");
}
Expand Down