Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 37 additions & 5 deletions src/cpp/models/include/models/image_model.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,12 @@
#include <memory>
#include <string>

#include "adapters/inference_adapter.h"
#include "models/input_data.h"
#include "models/model_base.h"
#include "models/results.h"
#include "utils/image_utils.h"
#include "utils/ocv_common.hpp"
#include "utils/args_helper.hpp"

namespace ov {
class InferRequest;
Expand All @@ -20,7 +23,7 @@ struct InputData;
struct InternalModelData;

// ImageModel implements preprocess(), ImageModel's direct or indirect children are expected to implement prostprocess()
class ImageModel : public ModelBase {
class ImageModel {
public:
/// Constructor
/// @param modelFile name of model to load
Expand All @@ -33,9 +36,24 @@ class ImageModel : public ModelBase {

ImageModel(std::shared_ptr<ov::Model>& model, const ov::AnyMap& configuration);
ImageModel(std::shared_ptr<InferenceAdapter>& adapter, const ov::AnyMap& configuration = {});
using ModelBase::ModelBase;

std::shared_ptr<InternalModelData> preprocess(const InputData& inputData, InferenceInput& input) override;
virtual std::shared_ptr<InternalModelData> preprocess(const InputData& inputData, InferenceInput& input);
virtual std::unique_ptr<ResultBase> postprocess(InferenceResult& infResult) = 0;

void load(ov::Core& core, const std::string& device, size_t num_infer_requests = 1);

std::shared_ptr<ov::Model> prepare();

virtual size_t getNumAsyncExecutors() const;
virtual bool isReady();
virtual void awaitAll();
virtual void awaitAny();
virtual void setCallback(
std::function<void(std::unique_ptr<ResultBase>, const ov::AnyMap& callback_args)> callback);

std::shared_ptr<ov::Model> getModel();
std::shared_ptr<InferenceAdapter> getInferenceAdapter();

static std::vector<std::string> loadLabels(const std::string& labelFilename);
std::shared_ptr<ov::Model> embedProcessing(std::shared_ptr<ov::Model>& model,
const std::string& inputName,
Expand All @@ -54,7 +72,7 @@ class ImageModel : public ModelBase {

protected:
RESIZE_MODE selectResizeMode(const std::string& resize_type);
void updateModelInfo() override;
virtual void updateModelInfo();
void init_from_config(const ov::AnyMap& top_priority, const ov::AnyMap& mid_priority);

std::string getLabelName(size_t labelID) {
Expand All @@ -73,4 +91,18 @@ class ImageModel : public ModelBase {
bool reverse_input_channels = false;
std::vector<float> scale_values;
std::vector<float> mean_values;

protected:
virtual void prepareInputsOutputs(std::shared_ptr<ov::Model>& model) = 0;

InputTransform inputTransform = InputTransform();

std::shared_ptr<ov::Model> model;
std::vector<std::string> inputNames;
std::vector<std::string> outputNames;
std::string modelFile;
std::shared_ptr<InferenceAdapter> inferenceAdapter;
std::map<std::string, ov::Layout> inputsLayouts;
ov::Layout getInputLayout(const ov::Output<ov::Node>& input);
std::function<void(std::unique_ptr<ResultBase>, const ov::AnyMap&)> lastCallback;
};
73 changes: 0 additions & 73 deletions src/cpp/models/include/models/model_base.h

This file was deleted.

196 changes: 184 additions & 12 deletions src/cpp/models/src/image_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,137 @@
#include <utils/ocv_common.hpp>
#include <vector>

#include "adapters/openvino_adapter.h"
#include "models/input_data.h"
#include "models/internal_model_data.h"
#include "models/results.h"
#include "utils/common.hpp"

namespace {
class TmpCallbackSetter {
public:
ImageModel* model;
std::function<void(std::unique_ptr<ResultBase>, const ov::AnyMap&)> last_callback;
TmpCallbackSetter(ImageModel* model_,
std::function<void(std::unique_ptr<ResultBase>, const ov::AnyMap&)> tmp_callback,
std::function<void(std::unique_ptr<ResultBase>, const ov::AnyMap&)> last_callback_)
: model(model_),
last_callback(last_callback_) {
model->setCallback(tmp_callback);
}
~TmpCallbackSetter() {
if (last_callback) {
model->setCallback(last_callback);
} else {
model->setCallback([](std::unique_ptr<ResultBase>, const ov::AnyMap&) {});
}
}
};
} // namespace

ImageModel::ImageModel(const std::string& modelFile,
const std::string& resize_type,
bool useAutoResize,
const std::string& layout)
: ModelBase(modelFile, layout),
useAutoResize(useAutoResize),
resizeMode(selectResizeMode(resize_type)) {}
: useAutoResize(useAutoResize),
resizeMode(selectResizeMode(resize_type)),
modelFile(modelFile),
inputsLayouts(parseLayoutString(layout)) {
auto core = ov::Core();
model = core.read_model(modelFile);
}


void ImageModel::load(ov::Core& core, const std::string& device, size_t num_infer_requests) {
if (!inferenceAdapter) {
inferenceAdapter = std::make_shared<OpenVINOInferenceAdapter>();
}

// Update model_info erased by pre/postprocessing
updateModelInfo();

inferenceAdapter->loadModel(model, core, device, {}, num_infer_requests);
}

std::shared_ptr<ov::Model> ImageModel::prepare() {
prepareInputsOutputs(model);
logBasicModelInfo(model);
ov::set_batch(model, 1);

return model;
}

ov::Layout ImageModel::getInputLayout(const ov::Output<ov::Node>& input) {
ov::Layout layout = ov::layout::get_layout(input);
if (layout.empty()) {
if (inputsLayouts.empty()) {
layout = getLayoutFromShape(input.get_partial_shape());
slog::warn << "Automatically detected layout '" << layout.to_string() << "' for input '"
<< input.get_any_name() << "' will be used." << slog::endl;
} else if (inputsLayouts.size() == 1) {
layout = inputsLayouts.begin()->second;
} else {
layout = inputsLayouts[input.get_any_name()];
}
}

return layout;
}

size_t ImageModel::getNumAsyncExecutors() const {
return inferenceAdapter->getNumAsyncExecutors();
}

bool ImageModel::isReady() {
return inferenceAdapter->isReady();
}
void ImageModel::awaitAll() {
inferenceAdapter->awaitAll();
}
void ImageModel::awaitAny() {
inferenceAdapter->awaitAny();
}

void ImageModel::setCallback(
std::function<void(std::unique_ptr<ResultBase>, const ov::AnyMap& callback_args)> callback) {
lastCallback = callback;
inferenceAdapter->setCallback([this, callback](ov::InferRequest request, CallbackData args) {
InferenceResult result;

InferenceOutput output;
for (const auto& item : this->getInferenceAdapter()->getOutputNames()) {
output.emplace(item, request.get_tensor(item));
}

result.outputsData = output;
auto model_data_iter = args->find("internalModelData");
if (model_data_iter != args->end()) {
result.internalModelData = std::move(model_data_iter->second.as<std::shared_ptr<InternalModelData>>());
}
auto retVal = this->postprocess(result);
*retVal = static_cast<ResultBase&>(result);
callback(std::move(retVal), args ? *args : ov::AnyMap());
});
}

std::shared_ptr<ov::Model> ImageModel::getModel() {
if (!model) {
throw std::runtime_error(std::string("ov::Model is not accessible for the current model adapter: ") +
typeid(inferenceAdapter).name());
}

updateModelInfo();
return model;
}

std::shared_ptr<InferenceAdapter> ImageModel::getInferenceAdapter() {
if (!inferenceAdapter) {
throw std::runtime_error(std::string("Model wasn't loaded"));
}

return inferenceAdapter;
}


RESIZE_MODE ImageModel::selectResizeMode(const std::string& resize_type) {
RESIZE_MODE resize = RESIZE_FILL;
Expand Down Expand Up @@ -68,36 +188,88 @@ void ImageModel::init_from_config(const ov::AnyMap& top_priority, const ov::AnyM
}

ImageModel::ImageModel(std::shared_ptr<ov::Model>& model, const ov::AnyMap& configuration)
: ModelBase(model, configuration) {
: model(model) {
auto layout_iter = configuration.find("layout");
std::string layout = "";

if (layout_iter != configuration.end()) {
layout = layout_iter->second.as<std::string>();
} else {
if (model->has_rt_info("model_info", "layout")) {
layout = model->get_rt_info<std::string>("model_info", "layout");
}
}
inputsLayouts = parseLayoutString(layout);
init_from_config(configuration,
model->has_rt_info("model_info") ? model->get_rt_info<ov::AnyMap>("model_info") : ov::AnyMap{});
}

ImageModel::ImageModel(std::shared_ptr<InferenceAdapter>& adapter, const ov::AnyMap& configuration)
: ModelBase(adapter, configuration) {
: inferenceAdapter(adapter) {
const ov::AnyMap& adapter_configuration = adapter->getModelConfig();

std::string layout = "";
layout = get_from_any_maps("layout", configuration, adapter_configuration, layout);
inputsLayouts = parseLayoutString(layout);

inputNames = adapter->getInputNames();
outputNames = adapter->getOutputNames();

init_from_config(configuration, adapter->getModelConfig());
}

std::unique_ptr<ResultBase> ImageModel::inferImage(const ImageInputData& inputData) {
return ModelBase::infer(static_cast<const InputData&>(inputData));
;
InferenceInput inputs;
InferenceResult result;
auto internalModelData = this->preprocess(inputData, inputs);

result.outputsData = inferenceAdapter->infer(inputs);
result.internalModelData = std::move(internalModelData);

auto retVal = this->postprocess(result);
*retVal = static_cast<ResultBase&>(result);
return retVal;
}

std::vector<std::unique_ptr<ResultBase>> ImageModel::inferBatchImage(const std::vector<ImageInputData>& inputImgs) {
std::vector<std::reference_wrapper<const InputData>> inputData;
std::vector<std::reference_wrapper<const ImageInputData>> inputData;
inputData.reserve(inputImgs.size());
for (const auto& img : inputImgs) {
inputData.push_back(static_cast<const InputData&>(img));
inputData.push_back(img);
}
auto results = std::vector<std::unique_ptr<ResultBase>>(inputData.size());
auto setter = TmpCallbackSetter(
this,
[&](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args) {
size_t id = callback_args.find("id")->second.as<size_t>();
results[id] = std::move(result);
},
lastCallback);
size_t req_id = 0;
for (const auto& data : inputData) {
inferAsync(data, {{"id", req_id++}});
}
return ModelBase::inferBatch(inputData);
awaitAll();
return results;
}

void ImageModel::inferAsync(const ImageInputData& inputData, const ov::AnyMap& callback_args) {
ModelBase::inferAsync(static_cast<const InputData&>(inputData), callback_args);
InferenceInput inputs;
auto internalModelData = this->preprocess(inputData, inputs);
auto callback_args_ptr = std::make_shared<ov::AnyMap>(callback_args);
(*callback_args_ptr)["internalModelData"] = std::move(internalModelData);
inferenceAdapter->inferAsync(inputs, callback_args_ptr);
}

void ImageModel::updateModelInfo() {
ModelBase::updateModelInfo();
if (!model) {
throw std::runtime_error("The ov::Model object is not accessible");
}

if (!inputsLayouts.empty()) {
auto layouts = formatLayouts(inputsLayouts);
model->set_rt_info(layouts, "model_info", "layout");
}

model->set_rt_info(useAutoResize, "model_info", "auto_resize");
model->set_rt_info(formatResizeMode(resizeMode), "model_info", "resize_type");
Expand Down
Loading
Loading