Skip to content

Commit b3bef98

Browse files
committed
Move in code from POC
1 parent b5d22be commit b3bef98

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+3297
-1671
lines changed

examples/cpp/asynchronous_api/main.cpp

Lines changed: 16 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55
#include <adapters/openvino_adapter.h>
6-
#include <models/detection_model.h>
7-
#include <models/results.h>
6+
#include <tasks/detection.h>
7+
#include <tasks/results.h>
88
#include <stddef.h>
99

1010
#include <cstdint>
@@ -30,55 +30,31 @@ int main(int argc, char* argv[]) try {
3030
}
3131

3232
// Instantiate Object Detection model
33-
auto model = DetectionModel::create_model(argv[1],
34-
{},
35-
"",
36-
false); // works with SSD models. Download it using Python Model API
37-
// Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to
38-
// obtain optimal performance.
39-
size_t num_requests = 0;
40-
static ov::Core core;
41-
model->load(core, "CPU", num_requests);
33+
auto model = DetectionModel::load(argv[1], {}); // works with SSD models. Download it using Python Model API
34+
//// Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to
35+
//// obtain optimal performance.
36+
//size_t num_requests = 0;
37+
//static ov::Core core;
38+
//model->load(core, "CPU", num_requests);
4239

43-
std::cout << "Async inference will be carried out by " << model->getNumAsyncExecutors() << " parallel executors\n";
44-
// Prepare batch data
45-
std::vector<ImageInputData> data;
46-
for (size_t i = 0; i < 3; i++) {
47-
data.push_back(ImageInputData(image));
48-
}
40+
//std::cout << "Async inference will be carried out by " << model->getNumAsyncExecutors() << " parallel executors\n";
41+
//// Prepare batch data
42+
std::vector<cv::Mat> data = {image};
43+
//for (size_t i = 0; i < 3; i++) {
44+
// data.push_back(ImageInputData(image));
45+
//}
4946

5047
// Batch inference is done by processing batch with num_requests parallel infer requests
5148
std::cout << "Starting batch inference\n";
52-
auto results = model->inferBatch(data);
49+
auto results = model.inferBatch(data);
5350

5451
std::cout << "Batch mode inference results:\n";
5552
for (const auto& result : results) {
56-
for (auto& obj : result->objects) {
53+
for (auto& obj : result.objects) {
5754
std::cout << " " << std::left << std::setw(9) << obj.confidence << " " << obj.label << "\n";
5855
}
5956
std::cout << std::string(10, '-') << "\n";
6057
}
61-
std::cout << "Batch mode inference done\n";
62-
std::cout << "Async mode inference results:\n";
63-
64-
// Set callback to grab results once the inference is done
65-
model->setCallback([](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args) {
66-
auto det_result = std::unique_ptr<DetectionResult>(static_cast<DetectionResult*>(result.release()));
67-
68-
// callback_args can contain arbitrary data
69-
size_t id = callback_args.find("id")->second.as<size_t>();
70-
71-
std::cout << "Request with id " << id << " is finished\n";
72-
for (auto& obj : det_result->objects) {
73-
std::cout << " " << std::left << std::setw(9) << obj.confidence << " " << obj.label << "\n";
74-
}
75-
std::cout << std::string(10, '-') << "\n";
76-
});
77-
78-
for (size_t i = 0; i < 3; i++) {
79-
model->inferAsync(image, {{"id", i}});
80-
}
81-
model->awaitAll();
8258
} catch (const std::exception& error) {
8359
std::cerr << error.what() << '\n';
8460
return 1;

examples/cpp/synchronous_api/main.cpp

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
#include <models/detection_model.h>
7-
#include <models/input_data.h>
8-
#include <models/results.h>
6+
#include <tasks/detection.h>
7+
#include <tasks/results.h>
98
#include <stddef.h>
109

1110
#include <cstdint>
@@ -31,13 +30,13 @@ int main(int argc, char* argv[]) try {
3130
}
3231

3332
// Instantiate Object Detection model
34-
auto model = DetectionModel::create_model(argv[1]); // works with SSD models. Download it using Python Model API
33+
auto model = DetectionModel::load(argv[1], {}); // works with SSD models. Download it using Python Model API
3534

3635
// Run the inference
37-
auto result = model->infer(image);
36+
auto result = model.infer(image);
3837

3938
// Process detections
40-
for (auto& obj : result->objects) {
39+
for (auto& obj : result.objects) {
4140
std::cout << " " << std::left << std::setw(9) << obj.label << " | " << std::setw(10) << obj.confidence << " | "
4241
<< std::setw(4) << int(obj.x) << " | " << std::setw(4) << int(obj.y) << " | " << std::setw(4)
4342
<< int(obj.x + obj.width) << " | " << std::setw(4) << int(obj.y + obj.height) << "\n";

src/cpp/CMakeLists.txt

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
cmake_minimum_required(VERSION 3.26)
2+
3+
set(vision_api_VERSION 0.0.0)
4+
5+
project(vision_api
6+
VERSION ${vision_api_VERSION}
7+
DESCRIPTION "OpenVINO Vision API"
8+
HOMEPAGE_URL "https://github.com/openvinotoolkit/model_api/"
9+
LANGUAGES CXX C)
10+
11+
find_package(OpenCV REQUIRED COMPONENTS core imgproc)
12+
13+
find_package(OpenVINO REQUIRED
14+
COMPONENTS Runtime Threading)
15+
16+
file(GLOB TASK_SOURCES src/tasks/**/*.cpp)
17+
file(GLOB TASKS_SOURCES src/tasks/*.cpp)
18+
file(GLOB UTILS_SOURCES src/utils/*.cpp)
19+
file(GLOB ADAPTERS_SOURCES src/adapters/*.cpp)
20+
21+
add_library(model_api STATIC ${TASK_SOURCES} ${TASKS_SOURCES} ${UTILS_SOURCES} ${ADAPTERS_SOURCES} ${TILERS_SOURCES})
22+
23+
target_link_libraries(model_api PUBLIC openvino::runtime opencv_core opencv_imgproc)
24+
target_include_directories(model_api PUBLIC ${PROJECT_SOURCE_DIR}/include)
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
#pragma once
2+
#include <functional>
3+
#include <map>
4+
#include <memory>
5+
#include <openvino/openvino.hpp>
6+
#include <string>
7+
#include <vector>
8+
9+
struct InputData;
10+
struct InferenceResult;
11+
12+
using InferenceOutput = std::map<std::string, ov::Tensor>;
13+
using InferenceInput = std::map<std::string, ov::Tensor>;
14+
using CallbackData = std::shared_ptr<ov::AnyMap>;
15+
16+
// The interface doesn't have implementation
17+
class InferenceAdapter {
18+
public:
19+
virtual ~InferenceAdapter() = default;
20+
21+
virtual InferenceOutput infer(const InferenceInput& input) = 0;
22+
virtual void infer(const InferenceInput& input, InferenceOutput& output) = 0;
23+
virtual void setCallback(std::function<void(ov::InferRequest, CallbackData)> callback) = 0;
24+
virtual void inferAsync(const InferenceInput& input, CallbackData callback_args) = 0;
25+
virtual bool isReady() = 0;
26+
virtual void awaitAll() = 0;
27+
virtual void awaitAny() = 0;
28+
virtual size_t getNumAsyncExecutors() const = 0;
29+
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
30+
ov::Core& core,
31+
const std::string& device = "",
32+
const ov::AnyMap& compilationConfig = {},
33+
size_t max_num_requests = 0) = 0;
34+
virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0;
35+
virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0;
36+
virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const = 0;
37+
virtual ov::element::Type_t getOutputDatatype(const std::string& outputName) const = 0;
38+
virtual std::vector<std::string> getInputNames() const = 0;
39+
virtual std::vector<std::string> getOutputNames() const = 0;
40+
virtual const ov::AnyMap& getModelConfig() const = 0;
41+
};
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
#pragma once
2+
#include <functional>
3+
#include <map>
4+
#include <memory>
5+
#include <queue>
6+
#include <string>
7+
#include <vector>
8+
9+
#include "adapters/inference_adapter.h"
10+
#include "utils/async_infer_queue.h"
11+
12+
class OpenVINOInferenceAdapter : public InferenceAdapter {
13+
public:
14+
OpenVINOInferenceAdapter() = default;
15+
16+
virtual InferenceOutput infer(const InferenceInput& input) override;
17+
virtual void infer(const InferenceInput& input, InferenceOutput& output) override;
18+
virtual void inferAsync(const InferenceInput& input, const CallbackData callback_args) override;
19+
virtual void setCallback(std::function<void(ov::InferRequest, const CallbackData)> callback);
20+
virtual bool isReady();
21+
virtual void awaitAll();
22+
virtual void awaitAny();
23+
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
24+
ov::Core& core,
25+
const std::string& device = "",
26+
const ov::AnyMap& compilationConfig = {},
27+
size_t max_num_requests = 1) override;
28+
virtual size_t getNumAsyncExecutors() const;
29+
virtual ov::PartialShape getInputShape(const std::string& inputName) const override;
30+
virtual ov::PartialShape getOutputShape(const std::string& outputName) const override;
31+
virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const override;
32+
virtual ov::element::Type_t getOutputDatatype(const std::string& outputName) const override;
33+
virtual std::vector<std::string> getInputNames() const override;
34+
virtual std::vector<std::string> getOutputNames() const override;
35+
virtual const ov::AnyMap& getModelConfig() const override;
36+
37+
protected:
38+
void initInputsOutputs();
39+
40+
protected:
41+
// Depends on the implementation details but we should share the model state in this class
42+
std::vector<std::string> inputNames;
43+
std::vector<std::string> outputNames;
44+
std::unique_ptr<AsyncInferQueue> asyncQueue;
45+
ov::AnyMap modelConfig; // the content of model_info section of rt_info
46+
47+
public:
48+
ov::CompiledModel compiledModel;
49+
};

src/cpp/include/tasks/detection.h

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
#pragma once
2+
3+
#include <string>
4+
#include <opencv2/opencv.hpp>
5+
#include <openvino/openvino.hpp>
6+
#include "adapters/inference_adapter.h"
7+
#include "tasks/detection/ssd.h"
8+
#include "tasks/results.h"
9+
#include "utils/vision_pipeline.h"
10+
#include "utils/tiling.h"
11+
12+
class DetectionModel {
13+
public:
14+
std::unique_ptr<Pipeline<DetectionResult>> pipeline;
15+
16+
DetectionModel(std::unique_ptr<SSD> algorithm, const ov::AnyMap& configuration): algorithm(std::move(algorithm)) {
17+
auto config = this->algorithm->adapter->getModelConfig();
18+
if (configuration.count("tiling") && configuration.at("tiling").as<bool>()) {
19+
if (!utils::config_contains_tiling_info(config)) {
20+
throw std::runtime_error("Model config does not contain tiling properties.");
21+
}
22+
pipeline = std::make_unique<TilingPipeline<DetectionResult>>(this->algorithm->adapter,
23+
utils::get_tiling_info_from_config(config),
24+
[&](cv::Mat image) { return preprocess(image);},
25+
[&](InferenceResult result) { return postprocess(result);},
26+
[&](DetectionResult& result, const cv::Rect& coord) { return postprocess_tile(result, coord);},
27+
[&](const std::vector<DetectionResult>& tiles_results, const cv::Size& image_size, const std::vector<cv::Rect>& tile_coords, const utils::TilingInfo& tiling_info) { return merge_tiling_results(tiles_results, image_size, tile_coords, tiling_info);}
28+
);
29+
} else {
30+
pipeline = std::make_unique<VisionPipeline<DetectionResult>>(this->algorithm->adapter,
31+
[&](cv::Mat image) { return preprocess(image);},
32+
[&](InferenceResult result) { return postprocess(result);}
33+
);
34+
}
35+
}
36+
37+
InferenceInput preprocess(cv::Mat);
38+
DetectionResult postprocess(InferenceResult);
39+
DetectionResult postprocess_tile(DetectionResult& result, const cv::Rect& coord);
40+
DetectionResult merge_tiling_results(const std::vector<DetectionResult>& tiles_results, const cv::Size& image_size, const std::vector<cv::Rect>& tile_coords, const utils::TilingInfo& tiling_info);
41+
ov::Tensor merge_saliency_maps(const std::vector<DetectionResult>& tiles_results, const cv::Size& image_size, const std::vector<cv::Rect>& tile_coords, const utils::TilingInfo& tiling_info);
42+
43+
static DetectionModel load(const std::string& model_path, const ov::AnyMap& configuration = {});
44+
45+
DetectionResult infer(cv::Mat image);
46+
void inferAsync(cv::Mat image, ov::AnyMap user_data);
47+
void setCallback(std::function<void(DetectionResult, ov::AnyMap)>);
48+
49+
std::vector<DetectionResult> inferBatch(std::vector<cv::Mat> image);
50+
51+
private:
52+
std::unique_ptr<SSD> algorithm;
53+
};
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
#pragma once
2+
3+
#include <openvino/openvino.hpp>
4+
#include "adapters/inference_adapter.h"
5+
#include "utils/preprocessing.h"
6+
#include "tasks/results.h"
7+
8+
enum SSDOutputMode {
9+
single,
10+
multi
11+
};
12+
13+
class NumAndStep {
14+
public:
15+
size_t detectionsNum, objectSize;
16+
17+
static inline NumAndStep fromSingleOutput(const ov::Shape& shape);
18+
static inline NumAndStep fromMultipleOutputs(const ov::Shape& boxesShape);
19+
};
20+
21+
constexpr float box_area_threshold = 1.0f;
22+
23+
class SSD {
24+
public:
25+
std::shared_ptr<InferenceAdapter> adapter;
26+
27+
SSD(std::shared_ptr<InferenceAdapter> adapter, cv::Size input_shape): adapter(adapter), input_shape(input_shape) {
28+
auto config = adapter->getModelConfig();
29+
{
30+
auto iter = config.find("labels");
31+
if (iter != config.end()) {
32+
labels = iter->second.as<std::vector<std::string>>();
33+
} else {
34+
std::cout << "could not find labels from model config" << std::endl;
35+
}
36+
}
37+
{
38+
auto iter = config.find("confidence_threshold");
39+
if (iter != config.end()) {
40+
confidence_threshold = iter->second.as<float>();
41+
}
42+
}
43+
}
44+
std::map<std::string, ov::Tensor> preprocess(cv::Mat);
45+
DetectionResult postprocess(InferenceResult& infResult);
46+
47+
static cv::Size serialize(std::shared_ptr<ov::Model> ov_model);
48+
49+
SSDOutputMode output_mode;
50+
private:
51+
static void prepareSingleOutput(std::shared_ptr<ov::Model> ov_model);
52+
static void prepareMultipleOutputs(std::shared_ptr<ov::Model> ov_model);
53+
54+
DetectionResult postprocessSingleOutput(InferenceResult& infResult);
55+
DetectionResult postprocessMultipleOutputs(InferenceResult& infResult);
56+
57+
float confidence_threshold = 0.5f;
58+
59+
std::vector<std::string> labels;
60+
std::vector<std::string> filterOutXai(const std::vector<std::string>&);
61+
62+
std::vector<std::string> output_names = {};
63+
utils::RESIZE_MODE resize_mode = utils::RESIZE_FILL;
64+
ov::Layout layout;
65+
cv::InterpolationFlags interpolation_mode;
66+
cv::Size input_shape;
67+
};

0 commit comments

Comments
 (0)