Skip to content

Commit ddda762

Browse files
authored
CPP Refactor - Instantiate all tasks with user config and replicate old api (#319)
* Instantiate all tasks with user config and replicate old api Use old create_model so that api doesn't change. * Adds preload tests back in. Also fix the way the model loads Preload is basically calling the compileModel so dont pass it in initially unless we're sure that it doesnt need transforming * Adds test_model_config tests partially back Missing serialization. Need to allow for that again. * Enable precommit tests in github actions again * Disable windows for now * Fix examples readme for pretteir * Cleanup comment and default arg in example * Undo changes to readme * README :( * Remove unneccesary pip installs
1 parent b5aa3b5 commit ddda762

File tree

18 files changed

+663
-85
lines changed

18 files changed

+663
-85
lines changed

.github/workflows/test_precommit.yml

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,37 @@ jobs:
5151
github_token: ${{ secrets.GITHUB_TOKEN}}
5252
# missingInclude: cppcheck can't find stl, openvino, opencv
5353
other_options: --suppress=missingInclude -Isrc/cpp/models/include -Isrc/cpp/utils/include -Isrc/cpp/pipelines/include --check-config
54+
CPP-Precommit:
55+
runs-on: ubuntu-22.04
56+
steps:
57+
- uses: actions/checkout@v3
58+
- uses: actions/setup-python@v4
59+
with:
60+
python-version: "3.10"
61+
cache: pip
62+
- name: Create and start a virtual environment
63+
run: |
64+
python -m venv venv
65+
source venv/bin/activate
66+
- name: Install dependencies
67+
run: |
68+
source venv/bin/activate
69+
python -m pip install --upgrade pip
70+
pip install src/python/[tests,build] --extra-index-url https://download.pytorch.org/whl/cpu
71+
72+
sudo bash src/cpp/install_dependencies.sh
73+
- name: Prepare test data
74+
run: |
75+
source venv/bin/activate
76+
python tests/cpp/precommit/prepare_data.py -d data -p tests/cpp/precommit/public_scope.json
77+
- name: Build
78+
run: |
79+
mkdir build && cd build
80+
cmake ../tests/cpp/precommit/
81+
cmake --build . -j $((`nproc`*2+2))
82+
- name: Run test
83+
run: |
84+
build/test_sanity -d data -p tests/cpp/precommit/public_scope.json && build/test_model_config -d data
5485
serving_api:
5586
strategy:
5687
fail-fast: false

examples/cpp/main.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ int main(int argc, char* argv[]) try {
3030
}
3131

3232
// Instantiate Object Detection model
33-
auto model = DetectionModel::load(argv[1], {}); // works with SSD models. Download it using Python Model API
33+
auto model = DetectionModel::create_model(argv[1]);
3434

3535
// Run the inference
3636
auto result = model.infer(image);

src/cpp/include/tasks/anomaly.h

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class Anomaly {
1818
std::shared_ptr<InferenceAdapter> adapter;
1919
VisionPipeline<AnomalyResult> pipeline;
2020

21-
Anomaly(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
21+
Anomaly(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
2222
pipeline = VisionPipeline<AnomalyResult>(
2323
adapter,
2424
[&](cv::Mat image) {
@@ -28,18 +28,22 @@ class Anomaly {
2828
return postprocess(result);
2929
});
3030

31-
auto config = adapter->getModelConfig();
32-
image_threshold = utils::get_from_any_maps("image_threshold", config, {}, image_threshold);
33-
pixel_threshold = utils::get_from_any_maps("pixel_threshold", config, {}, pixel_threshold);
34-
normalization_scale = utils::get_from_any_maps("normalization_scale", config, {}, normalization_scale);
35-
task = utils::get_from_any_maps("pixel_threshold", config, {}, task);
36-
labels = utils::get_from_any_maps("labels", config, {}, labels);
37-
input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width);
38-
input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.height);
31+
auto model_config = adapter->getModelConfig();
32+
image_threshold = utils::get_from_any_maps("image_threshold", user_config, model_config, image_threshold);
33+
pixel_threshold = utils::get_from_any_maps("pixel_threshold", user_config, model_config, pixel_threshold);
34+
normalization_scale =
35+
utils::get_from_any_maps("normalization_scale", user_config, model_config, normalization_scale);
36+
task = utils::get_from_any_maps("pixel_threshold", user_config, model_config, task);
37+
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
38+
input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width);
39+
input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.height);
3940
}
4041

4142
static void serialize(std::shared_ptr<ov::Model>& ov_model);
42-
static Anomaly load(const std::string& model_path);
43+
static Anomaly create_model(const std::string& model_path,
44+
const ov::AnyMap& user_config = {},
45+
bool preload = true,
46+
const std::string& device = "AUTO");
4347

4448
AnomalyResult infer(cv::Mat image);
4549
std::vector<AnomalyResult> inferBatch(std::vector<cv::Mat> image);

src/cpp/include/tasks/classification.h

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class Classification {
1919
std::shared_ptr<InferenceAdapter> adapter;
2020
VisionPipeline<ClassificationResult> pipeline;
2121

22-
Classification(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
22+
Classification(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
2323
pipeline = VisionPipeline<ClassificationResult>(
2424
adapter,
2525
[&](cv::Mat image) {
@@ -29,16 +29,19 @@ class Classification {
2929
return postprocess(result);
3030
});
3131

32-
auto config = adapter->getModelConfig();
33-
labels = utils::get_from_any_maps("labels", config, {}, labels);
34-
35-
topk = utils::get_from_any_maps("topk", config, {}, topk);
36-
multilabel = utils::get_from_any_maps("multilabel", config, {}, multilabel);
37-
output_raw_scores = utils::get_from_any_maps("output_raw_scores", config, {}, output_raw_scores);
38-
confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold);
39-
hierarchical = utils::get_from_any_maps("hierarchical", config, {}, hierarchical);
40-
hierarchical_config = utils::get_from_any_maps("hierarchical_config", config, {}, hierarchical_config);
41-
hierarchical_postproc = utils::get_from_any_maps("hierarchical_postproc", config, {}, hierarchical_postproc);
32+
auto model_config = adapter->getModelConfig();
33+
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
34+
35+
topk = utils::get_from_any_maps("topk", user_config, model_config, topk);
36+
multilabel = utils::get_from_any_maps("multilabel", user_config, model_config, multilabel);
37+
output_raw_scores = utils::get_from_any_maps("output_raw_scores", user_config, model_config, output_raw_scores);
38+
confidence_threshold =
39+
utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold);
40+
hierarchical = utils::get_from_any_maps("hierarchical", user_config, model_config, hierarchical);
41+
hierarchical_config =
42+
utils::get_from_any_maps("hierarchical_config", user_config, model_config, hierarchical_config);
43+
hierarchical_postproc =
44+
utils::get_from_any_maps("hierarchical_postproc", user_config, model_config, hierarchical_postproc);
4245
if (hierarchical) {
4346
if (hierarchical_config.empty()) {
4447
throw std::runtime_error("Error: empty hierarchical classification config");
@@ -55,7 +58,10 @@ class Classification {
5558
}
5659

5760
static void serialize(std::shared_ptr<ov::Model>& ov_model);
58-
static Classification load(const std::string& model_path);
61+
static Classification create_model(const std::string& model_path,
62+
const ov::AnyMap& user_config = {},
63+
bool preload = true,
64+
const std::string& device = "AUTO");
5965

6066
ClassificationResult infer(cv::Mat image);
6167
std::vector<ClassificationResult> inferBatch(std::vector<cv::Mat> image);

src/cpp/include/tasks/detection.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,11 @@
1818
class DetectionModel {
1919
public:
2020
std::unique_ptr<Pipeline<DetectionResult>> pipeline;
21+
std::unique_ptr<SSD> algorithm;
2122

22-
DetectionModel(std::unique_ptr<SSD> algorithm, const ov::AnyMap& configuration) : algorithm(std::move(algorithm)) {
23+
DetectionModel(std::unique_ptr<SSD> algorithm, const ov::AnyMap& user_config) : algorithm(std::move(algorithm)) {
2324
auto config = this->algorithm->adapter->getModelConfig();
24-
if (configuration.count("tiling") && configuration.at("tiling").as<bool>()) {
25+
if (user_config.count("tiling") && user_config.at("tiling").as<bool>()) {
2526
if (!utils::config_contains_tiling_info(config)) {
2627
throw std::runtime_error("Model config does not contain tiling properties.");
2728
}
@@ -67,11 +68,11 @@ class DetectionModel {
6768
const std::vector<cv::Rect>& tile_coords,
6869
const utils::TilingInfo& tiling_info);
6970

70-
static DetectionModel load(const std::string& model_path, const ov::AnyMap& configuration = {});
71+
static DetectionModel create_model(const std::string& model_path,
72+
const ov::AnyMap& user_config = {},
73+
bool preload = true,
74+
const std::string& device = "AUTO");
7175

7276
DetectionResult infer(cv::Mat image);
7377
std::vector<DetectionResult> inferBatch(std::vector<cv::Mat> image);
74-
75-
private:
76-
std::unique_ptr<SSD> algorithm;
7778
};

src/cpp/include/tasks/instance_segmentation.h

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class InstanceSegmentation {
1818
std::shared_ptr<InferenceAdapter> adapter;
1919
VisionPipeline<InstanceSegmentationResult> pipeline;
2020

21-
InstanceSegmentation(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
21+
InstanceSegmentation(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
2222
pipeline = VisionPipeline<InstanceSegmentationResult>(
2323
adapter,
2424
[&](cv::Mat image) {
@@ -28,15 +28,19 @@ class InstanceSegmentation {
2828
return postprocess(result);
2929
});
3030

31-
auto config = adapter->getModelConfig();
32-
labels = utils::get_from_any_maps("labels", config, {}, labels);
33-
confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold);
34-
input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width);
35-
input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.width);
31+
auto model_config = adapter->getModelConfig();
32+
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
33+
confidence_threshold =
34+
utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold);
35+
input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width);
36+
input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.width);
3637
}
3738

3839
static void serialize(std::shared_ptr<ov::Model>& ov_model);
39-
static InstanceSegmentation load(const std::string& model_path);
40+
static InstanceSegmentation create_model(const std::string& model_path,
41+
const ov::AnyMap& user_config = {},
42+
bool preload = true,
43+
const std::string& device = "AUTO");
4044

4145
InstanceSegmentationResult infer(cv::Mat image);
4246
std::vector<InstanceSegmentationResult> inferBatch(std::vector<cv::Mat> image);

src/cpp/include/tasks/semantic_segmentation.h

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class SemanticSegmentation {
1717
public:
1818
VisionPipeline<SemanticSegmentationResult> pipeline;
1919
std::shared_ptr<InferenceAdapter> adapter;
20-
SemanticSegmentation(std::shared_ptr<InferenceAdapter> adapter) : adapter(adapter) {
20+
SemanticSegmentation(std::shared_ptr<InferenceAdapter> adapter, const ov::AnyMap& user_config) : adapter(adapter) {
2121
pipeline = VisionPipeline<SemanticSegmentationResult>(
2222
adapter,
2323
[&](cv::Mat image) {
@@ -27,14 +27,17 @@ class SemanticSegmentation {
2727
return postprocess(result);
2828
});
2929

30-
auto config = adapter->getModelConfig();
31-
labels = utils::get_from_any_maps("labels", config, {}, labels);
32-
soft_threshold = utils::get_from_any_maps("soft_threshold", config, {}, soft_threshold);
33-
blur_strength = utils::get_from_any_maps("blur_strength", config, {}, blur_strength);
30+
auto model_config = adapter->getModelConfig();
31+
labels = utils::get_from_any_maps("labels", user_config, model_config, labels);
32+
soft_threshold = utils::get_from_any_maps("soft_threshold", user_config, model_config, soft_threshold);
33+
blur_strength = utils::get_from_any_maps("blur_strength", user_config, model_config, blur_strength);
3434
}
3535

3636
static void serialize(std::shared_ptr<ov::Model>& ov_model);
37-
static SemanticSegmentation load(const std::string& model_path);
37+
static SemanticSegmentation create_model(const std::string& model_path,
38+
const ov::AnyMap& user_config = {},
39+
bool preload = true,
40+
const std::string& device = "AUTO");
3841

3942
std::map<std::string, ov::Tensor> preprocess(cv::Mat);
4043
SemanticSegmentationResult postprocess(InferenceResult& infResult);

src/cpp/src/tasks/anomaly.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,20 @@ void Anomaly::serialize(std::shared_ptr<ov::Model>& ov_model) {
4747
mean_values,
4848
scale_values);
4949

50+
ov_model->set_rt_info(true, "model_info", "embedded_processing");
5051
ov_model->set_rt_info(input_shape[0], "model_info", "orig_width");
5152
ov_model->set_rt_info(input_shape[1], "model_info", "orig_height");
5253
}
5354

54-
Anomaly Anomaly::load(const std::string& model_path) {
55+
Anomaly Anomaly::create_model(const std::string& model_path,
56+
const ov::AnyMap& user_config,
57+
bool preload,
58+
const std::string& device) {
5559
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
56-
adapter->loadModel(model_path, "", {}, false);
60+
adapter->loadModel(model_path, device, user_config, false);
5761

5862
std::string model_type;
59-
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
63+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type);
6064

6165
if (!model_type.empty()) {
6266
std::cout << "has model type in info: " << model_type << std::endl;
@@ -65,9 +69,11 @@ Anomaly Anomaly::load(const std::string& model_path) {
6569
}
6670

6771
adapter->applyModelTransform(Anomaly::serialize);
68-
adapter->compileModel("AUTO", {});
72+
if (preload) {
73+
adapter->compileModel(device, user_config);
74+
}
6975

70-
return Anomaly(adapter);
76+
return Anomaly(adapter, user_config);
7177
}
7278

7379
AnomalyResult Anomaly::infer(cv::Mat image) {

src/cpp/src/tasks/classification.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -176,24 +176,30 @@ void Classification::serialize(std::shared_ptr<ov::Model>& ov_model) {
176176
addOrFindSoftmaxAndTopkOutputs(ov_model, topk, output_raw_scores);
177177
}
178178

179+
ov_model->set_rt_info(true, "model_info", "embedded_processing");
179180
ov_model->set_rt_info(input_shape[0], "model_info", "orig_width");
180181
ov_model->set_rt_info(input_shape[1], "model_info", "orig_height");
181182
}
182183

183-
Classification Classification::load(const std::string& model_path) {
184+
Classification Classification::create_model(const std::string& model_path,
185+
const ov::AnyMap& user_config,
186+
bool preload,
187+
const std::string& device) {
184188
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
185-
adapter->loadModel(model_path, "", {}, false);
189+
adapter->loadModel(model_path, device, user_config, false);
186190

187191
std::string model_type;
188-
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
192+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type);
189193

190194
if (model_type.empty() || model_type != "Classification") {
191195
throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification");
192196
}
193197
adapter->applyModelTransform(Classification::serialize);
194-
adapter->compileModel("AUTO", {});
198+
if (preload) {
199+
adapter->compileModel(device, user_config);
200+
}
195201

196-
return Classification(adapter);
202+
return Classification(adapter, user_config);
197203
}
198204

199205
ClassificationResult Classification::infer(cv::Mat image) {

src/cpp/src/tasks/detection.cpp

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,21 +13,26 @@
1313
#include "utils/nms.h"
1414
#include "utils/tensor.h"
1515

16-
DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) {
16+
DetectionModel DetectionModel::create_model(const std::string& model_path,
17+
const ov::AnyMap& user_config,
18+
bool preload,
19+
const std::string& device) {
1720
auto adapter = std::make_shared<OpenVINOInferenceAdapter>();
18-
adapter->loadModel(model_path, "", {}, false);
21+
adapter->loadModel(model_path, device, user_config, false);
1922

2023
std::string model_type;
21-
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type);
24+
model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type);
2225
std::transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower);
2326

2427
if (model_type.empty() || model_type != "ssd") {
2528
throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd");
2629
}
2730
adapter->applyModelTransform(SSD::serialize);
28-
adapter->compileModel("AUTO", {});
31+
if (preload) {
32+
adapter->compileModel(device, user_config);
33+
}
2934

30-
return DetectionModel(std::make_unique<SSD>(adapter), configuration);
35+
return DetectionModel(std::make_unique<SSD>(adapter), user_config);
3136
}
3237

3338
InferenceInput DetectionModel::preprocess(cv::Mat image) {

0 commit comments

Comments
 (0)