Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions .github/workflows/test_precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,39 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN}}
# missingInclude: cppcheck can't find stl, openvino, opencv
other_options: --suppress=missingInclude -Isrc/cpp/models/include -Isrc/cpp/utils/include -Isrc/cpp/pipelines/include --check-config
CPP-Precommit:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.10"
cache: pip
- name: Create and start a virtual environment
run: |
python -m venv venv
source venv/bin/activate
- name: Install dependencies
run: |
source venv/bin/activate
python -m pip install --upgrade pip
pip install src/python/[tests,build] --extra-index-url https://download.pytorch.org/whl/cpu

sudo bash src/cpp/install_dependencies.sh
- name: Prepare test data
run: |
source venv/bin/activate
python tests/cpp/precommit/prepare_data.py -d data -p tests/cpp/precommit/public_scope.json
- name: Build
run: |
mkdir build && cd build
pip install nanobind==2.4.0
pip install typing_extensions==4.12.2
cmake ../tests/cpp/precommit/
cmake --build . -j $((`nproc`*2+2))
- name: Run test
run: |
build/test_sanity -d data -p tests/cpp/precommit/public_scope.json && build/test_model_config -d data
serving_api:
strategy:
fail-fast: false
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ Training Extensions embed all the metadata required for inference into model fil
```

- Build library:

- Create `build` folder and navigate into it:
<!-- prettier-ignore-start -->

Expand All @@ -61,6 +62,7 @@ Training Extensions embed all the metadata required for inference into model fil
```

<!-- prettier-ignore-end -->

- Run cmake:

```bash
Expand Down
23 changes: 9 additions & 14 deletions examples/cpp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,29 +16,24 @@ This example demonstrates how to use a C++ API of OpenVINO Model API for synchro
```

- Build example:

- Create `build` folder and navigate into it:
<!-- prettier-ignore-start -->

```bash
mkdir build && cd build
```

<!-- prettier-ignore-end -->

- Run cmake:
<!-- prettier-ignore-start -->

```bash
cmake ../
```
- Run cmake:

<!-- prettier-ignore-end -->
```bash
cmake ../
```

- Build:
- Build:

```bash
make -j
```
```bash
make -j
```

- Download a model by running a Python code with Model API, see Python [example](../../python/synchronous_api/README.md):

Expand Down
3 changes: 2 additions & 1 deletion examples/cpp/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ int main(int argc, char* argv[]) try {
}

// Instantiate Object Detection model
auto model = DetectionModel::load(argv[1], {}); // works with SSD models. Download it using Python Model API
auto model =
DetectionModel::create_model(argv[1], {}); // works with SSD models. Download it using Python Model API

// Run the inference
auto result = model.infer(image);
Expand Down
24 changes: 14 additions & 10 deletions src/cpp/include/tasks/anomaly.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class Anomaly {
std::shared_ptr<InferenceAdapter> adapter;
VisionPipeline<AnomalyResult> pipeline;

Anomaly(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
Anomaly(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
pipeline = VisionPipeline<AnomalyResult>(
adapter,
[&](cv::Mat image) {
Expand All @@ -28,18 +28,22 @@ class Anomaly {
return postprocess(result);
});

auto config = adapter->getModelConfig();
image_threshold = utils::get_from_any_maps("image_threshold", config, {}, image_threshold);
pixel_threshold = utils::get_from_any_maps("pixel_threshold", config, {}, pixel_threshold);
normalization_scale = utils::get_from_any_maps("normalization_scale", config, {}, normalization_scale);
task = utils::get_from_any_maps("pixel_threshold", config, {}, task);
labels = utils::get_from_any_maps("labels", config, {}, labels);
input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width);
input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.height);
auto model_config = adapter->getModelConfig();
image_threshold = utils::get_from_any_maps("image_threshold", user_config, model_config, image_threshold);
pixel_threshold = utils::get_from_any_maps("pixel_threshold", user_config, model_config, pixel_threshold);
normalization_scale =
utils::get_from_any_maps("normalization_scale", user_config, model_config, normalization_scale);
task = utils::get_from_any_maps("pixel_threshold", user_config, model_config, task);
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width);
input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.height);
}

static void serialize(std::shared_ptr<ov::Model>& ov_model);
static Anomaly load(const std::string& model_path);
static Anomaly create_model(const std::string& model_path,
const ov::AnyMap& user_config = {},
bool preload = true,
const std::string& device = "AUTO");

AnomalyResult infer(cv::Mat image);
std::vector<AnomalyResult> inferBatch(std::vector<cv::Mat> image);
Expand Down
30 changes: 18 additions & 12 deletions src/cpp/include/tasks/classification.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class Classification {
std::shared_ptr<InferenceAdapter> adapter;
VisionPipeline<ClassificationResult> pipeline;

Classification(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
Classification(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
pipeline = VisionPipeline<ClassificationResult>(
adapter,
[&](cv::Mat image) {
Expand All @@ -29,16 +29,19 @@ class Classification {
return postprocess(result);
});

auto config = adapter->getModelConfig();
labels = utils::get_from_any_maps("labels", config, {}, labels);

topk = utils::get_from_any_maps("topk", config, {}, topk);
multilabel = utils::get_from_any_maps("multilabel", config, {}, multilabel);
output_raw_scores = utils::get_from_any_maps("output_raw_scores", config, {}, output_raw_scores);
confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold);
hierarchical = utils::get_from_any_maps("hierarchical", config, {}, hierarchical);
hierarchical_config = utils::get_from_any_maps("hierarchical_config", config, {}, hierarchical_config);
hierarchical_postproc = utils::get_from_any_maps("hierarchical_postproc", config, {}, hierarchical_postproc);
auto model_config = adapter->getModelConfig();
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);

topk = utils::get_from_any_maps("topk", user_config, model_config, topk);
multilabel = utils::get_from_any_maps("multilabel", user_config, model_config, multilabel);
output_raw_scores = utils::get_from_any_maps("output_raw_scores", user_config, model_config, output_raw_scores);
confidence_threshold =
utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold);
hierarchical = utils::get_from_any_maps("hierarchical", user_config, model_config, hierarchical);
hierarchical_config =
utils::get_from_any_maps("hierarchical_config", user_config, model_config, hierarchical_config);
hierarchical_postproc =
utils::get_from_any_maps("hierarchical_postproc", user_config, model_config, hierarchical_postproc);
if (hierarchical) {
if (hierarchical_config.empty()) {
throw std::runtime_error("Error: empty hierarchical classification config");
Expand All @@ -55,7 +58,10 @@ class Classification {
}

static void serialize(std::shared_ptr<ov::Model>& ov_model);
static Classification load(const std::string& model_path);
static Classification create_model(const std::string& model_path,
const ov::AnyMap& user_config = {},
bool preload = true,
const std::string& device = "AUTO");

ClassificationResult infer(cv::Mat image);
std::vector<ClassificationResult> inferBatch(std::vector<cv::Mat> image);
Expand Down
13 changes: 7 additions & 6 deletions src/cpp/include/tasks/detection.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,11 @@
class DetectionModel {
public:
std::unique_ptr<Pipeline<DetectionResult>> pipeline;
std::unique_ptr<SSD> algorithm;

DetectionModel(std::unique_ptr<SSD> algorithm, const ov::AnyMap& configuration) : algorithm(std::move(algorithm)) {
DetectionModel(std::unique_ptr<SSD> algorithm, const ov::AnyMap& user_config) : algorithm(std::move(algorithm)) {
auto config = this->algorithm->adapter->getModelConfig();
if (configuration.count("tiling") && configuration.at("tiling").as<bool>()) {
if (user_config.count("tiling") && user_config.at("tiling").as<bool>()) {
if (!utils::config_contains_tiling_info(config)) {
throw std::runtime_error("Model config does not contain tiling properties.");
}
Expand Down Expand Up @@ -67,11 +68,11 @@ class DetectionModel {
const std::vector<cv::Rect>& tile_coords,
const utils::TilingInfo& tiling_info);

static DetectionModel load(const std::string& model_path, const ov::AnyMap& configuration = {});
static DetectionModel create_model(const std::string& model_path,
const ov::AnyMap& user_config = {},
bool preload = true,
const std::string& device = "AUTO");

DetectionResult infer(cv::Mat image);
std::vector<DetectionResult> inferBatch(std::vector<cv::Mat> image);

private:
std::unique_ptr<SSD> algorithm;
};
18 changes: 11 additions & 7 deletions src/cpp/include/tasks/instance_segmentation.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class InstanceSegmentation {
std::shared_ptr<InferenceAdapter> adapter;
VisionPipeline<InstanceSegmentationResult> pipeline;

InstanceSegmentation(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
InstanceSegmentation(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
pipeline = VisionPipeline<InstanceSegmentationResult>(
adapter,
[&](cv::Mat image) {
Expand All @@ -28,15 +28,19 @@ class InstanceSegmentation {
return postprocess(result);
});

auto config = adapter->getModelConfig();
labels = utils::get_from_any_maps("labels", config, {}, labels);
confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold);
input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width);
input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.width);
auto model_config = adapter->getModelConfig();
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
confidence_threshold =
utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold);
input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width);
input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.width);
}

static void serialize(std::shared_ptr<ov::Model>& ov_model);
static InstanceSegmentation load(const std::string& model_path);
static InstanceSegmentation create_model(const std::string& model_path,
const ov::AnyMap& user_config = {},
bool preload = true,
const std::string& device = "AUTO");

InstanceSegmentationResult infer(cv::Mat image);
std::vector<InstanceSegmentationResult> inferBatch(std::vector<cv::Mat> image);
Expand Down
15 changes: 9 additions & 6 deletions src/cpp/include/tasks/semantic_segmentation.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class SemanticSegmentation {
public:
VisionPipeline<SemanticSegmentationResult> pipeline;
std::shared_ptr<InferenceAdapter> adapter;
SemanticSegmentation(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
SemanticSegmentation(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
pipeline = VisionPipeline<SemanticSegmentationResult>(
adapter,
[&](cv::Mat image) {
Expand All @@ -27,14 +27,17 @@ class SemanticSegmentation {
return postprocess(result);
});

auto config = adapter->getModelConfig();
labels = utils::get_from_any_maps("labels", config, {}, labels);
soft_threshold = utils::get_from_any_maps("soft_threshold", config, {}, soft_threshold);
blur_strength = utils::get_from_any_maps("blur_strength", config, {}, blur_strength);
auto model_config = adapter->getModelConfig();
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
soft_threshold = utils::get_from_any_maps("soft_threshold", user_config, model_config, soft_threshold);
blur_strength = utils::get_from_any_maps("blur_strength", user_config, model_config, blur_strength);
}

static void serialize(std::shared_ptr<ov::Model>& ov_model);
static SemanticSegmentation load(const std::string& model_path);
static SemanticSegmentation create_model(const std::string& model_path,
const ov::AnyMap& user_config = {},
bool preload = true,
const std::string& device = "AUTO");

std::map<std::string, ov::Tensor> preprocess(cv::Mat);
SemanticSegmentationResult postprocess(InferenceResult& infResult);
Expand Down
16 changes: 11 additions & 5 deletions src/cpp/src/tasks/anomaly.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,20 @@ void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
mean_values,
scale_values);

ov_model->set_rt_info(true, "model_info", "embedded_processing");
ov_model->set_rt_info(input_shape[0], "model_info", "orig_width");
ov_model->set_rt_info(input_shape[1], "model_info", "orig_height");
}

Anomaly Anomaly::load(const std::string& model_path) {
Anomaly Anomaly::create_model(const std::string& model_path,
const ov::AnyMap& user_config,
bool preload,
const std::string& device) {
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);
adapter->loadModel(model_path, device, user_config, false);

std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type);

if (!model_type.empty()) {
std::cout << "has model type in info: " << model_type << std::endl;
Expand All @@ -65,9 +69,11 @@ Anomaly Anomaly::load(const std::string& model_path) {
}

adapter->applyModelTransform(Anomaly::serialize);
adapter->compileModel("AUTO", {});
if (preload) {
adapter->compileModel(device, user_config);
}

return Anomaly(adapter);
return Anomaly(adapter, user_config);
}

AnomalyResult Anomaly::infer(cv::Mat image) {
Expand Down
16 changes: 11 additions & 5 deletions src/cpp/src/tasks/classification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,24 +176,30 @@ void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
addOrFindSoftmaxAndTopkOutputs(ov_model, topk, output_raw_scores);
}

ov_model->set_rt_info(true, "model_info", "embedded_processing");
ov_model->set_rt_info(input_shape[0], "model_info", "orig_width");
ov_model->set_rt_info(input_shape[1], "model_info", "orig_height");
}

Classification Classification::load(const std::string& model_path) {
Classification Classification::create_model(const std::string& model_path,
const ov::AnyMap& user_config,
bool preload,
const std::string& device) {
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);
adapter->loadModel(model_path, device, user_config, false);

std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type);

if (model_type.empty() || model_type != "Classification") {
throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification");
}
adapter->applyModelTransform(Classification::serialize);
adapter->compileModel("AUTO", {});
if (preload) {
adapter->compileModel(device, user_config);
}

return Classification(adapter);
return Classification(adapter, user_config);
}

ClassificationResult Classification::infer(cv::Mat image) {
Expand Down
15 changes: 10 additions & 5 deletions src/cpp/src/tasks/detection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,26 @@
#include "utils/nms.h"
#include "utils/tensor.h"

DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) {
DetectionModel DetectionModel::create_model(const std::string& model_path,
const ov::AnyMap& user_config,
bool preload,
const std::string& device) {
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
adapter->loadModel(model_path, "", {}, false);
adapter->loadModel(model_path, device, user_config, false);

std::string model_type;
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type);
std::transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower);

if (model_type.empty() || model_type != "ssd") {
throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd");
}
adapter->applyModelTransform(SSD::serialize);
adapter->compileModel("AUTO", {});
if (preload) {
adapter->compileModel(device, user_config);
}

return DetectionModel(std::make_unique<SSD>(adapter), configuration);
return DetectionModel(std::make_unique<SSD>(adapter), user_config);
}

InferenceInput DetectionModel::preprocess(cv::Mat image) {
Expand Down
Loading
Loading