From abf9dcaa5a0cb9a8d01aa3bc23b05f113b0ee352 Mon Sep 17 00:00:00 2001 From: Ronald Hecker Date: Mon, 30 Jun 2025 06:54:43 +0200 Subject: [PATCH 01/10] Instantiate all tasks with user config and replicate old api Use old create_model so that api doesn't change. --- README.md | 2 ++ examples/cpp/README.md | 1 + examples/cpp/main.cpp | 3 +- src/cpp/include/tasks/anomaly.h | 21 ++++++------- src/cpp/include/tasks/classification.h | 27 +++++++++-------- src/cpp/include/tasks/detection.h | 6 ++-- src/cpp/include/tasks/instance_segmentation.h | 15 +++++----- src/cpp/include/tasks/semantic_segmentation.h | 12 ++++---- src/cpp/src/tasks/anomaly.cpp | 10 +++---- src/cpp/src/tasks/classification.cpp | 10 +++---- src/cpp/src/tasks/detection.cpp | 10 +++---- src/cpp/src/tasks/instance_segmentation.cpp | 10 +++---- src/cpp/src/tasks/semantic_segmentation.cpp | 10 +++---- tests/cpp/test_accuracy.cpp | 30 +++++++++---------- 14 files changed, 88 insertions(+), 79 deletions(-) diff --git a/README.md b/README.md index 3d33d80f..3e9ebc41 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,7 @@ Training Extensions embed all the metadata required for inference into model fil ``` - Build library: + - Create `build` folder and navigate into it: @@ -61,6 +62,7 @@ Training Extensions embed all the metadata required for inference into model fil ``` + - Run cmake: ```bash diff --git a/examples/cpp/README.md b/examples/cpp/README.md index 8b5b196a..a4cc5cfb 100644 --- a/examples/cpp/README.md +++ b/examples/cpp/README.md @@ -16,6 +16,7 @@ This example demonstrates how to use a C++ API of OpenVINO Model API for synchro ``` - Build example: + - Create `build` folder and navigate into it: diff --git a/examples/cpp/main.cpp b/examples/cpp/main.cpp index 47d2eaf9..5aba471c 100644 --- a/examples/cpp/main.cpp +++ b/examples/cpp/main.cpp @@ -30,7 +30,8 @@ int main(int argc, char* argv[]) try { } // Instantiate Object Detection model - auto model = DetectionModel::load(argv[1], {}); // works with SSD models. Download it using Python Model API + auto model = + DetectionModel::create_model(argv[1], {}); // works with SSD models. Download it using Python Model API // Run the inference auto result = model.infer(image); diff --git a/src/cpp/include/tasks/anomaly.h b/src/cpp/include/tasks/anomaly.h index 7e9a0d7c..38532a05 100644 --- a/src/cpp/include/tasks/anomaly.h +++ b/src/cpp/include/tasks/anomaly.h @@ -18,7 +18,7 @@ class Anomaly { std::shared_ptr adapter; VisionPipeline pipeline; - Anomaly(std::shared_ptr adapter) : adapter(adapter) { + Anomaly(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -28,18 +28,19 @@ class Anomaly { return postprocess(result); }); - auto config = adapter->getModelConfig(); - image_threshold = utils::get_from_any_maps("image_threshold", config, {}, image_threshold); - pixel_threshold = utils::get_from_any_maps("pixel_threshold", config, {}, pixel_threshold); - normalization_scale = utils::get_from_any_maps("normalization_scale", config, {}, normalization_scale); - task = utils::get_from_any_maps("pixel_threshold", config, {}, task); - labels = utils::get_from_any_maps("labels", config, {}, labels); - input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width); - input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.height); + auto model_config = adapter->getModelConfig(); + image_threshold = utils::get_from_any_maps("image_threshold", user_config, model_config, image_threshold); + pixel_threshold = utils::get_from_any_maps("pixel_threshold", user_config, model_config, pixel_threshold); + normalization_scale = + utils::get_from_any_maps("normalization_scale", user_config, model_config, normalization_scale); + task = utils::get_from_any_maps("pixel_threshold", user_config, model_config, task); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width); + input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.height); } static void serialize(std::shared_ptr& ov_model); - static Anomaly load(const std::string& model_path); + static Anomaly create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); AnomalyResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/classification.h b/src/cpp/include/tasks/classification.h index 528de18e..ff923b13 100644 --- a/src/cpp/include/tasks/classification.h +++ b/src/cpp/include/tasks/classification.h @@ -19,7 +19,7 @@ class Classification { std::shared_ptr adapter; VisionPipeline pipeline; - Classification(std::shared_ptr adapter) : adapter(adapter) { + Classification(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -29,16 +29,19 @@ class Classification { return postprocess(result); }); - auto config = adapter->getModelConfig(); - labels = utils::get_from_any_maps("labels", config, {}, labels); - - topk = utils::get_from_any_maps("topk", config, {}, topk); - multilabel = utils::get_from_any_maps("multilabel", config, {}, multilabel); - output_raw_scores = utils::get_from_any_maps("output_raw_scores", config, {}, output_raw_scores); - confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold); - hierarchical = utils::get_from_any_maps("hierarchical", config, {}, hierarchical); - hierarchical_config = utils::get_from_any_maps("hierarchical_config", config, {}, hierarchical_config); - hierarchical_postproc = utils::get_from_any_maps("hierarchical_postproc", config, {}, hierarchical_postproc); + auto model_config = adapter->getModelConfig(); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + + topk = utils::get_from_any_maps("topk", user_config, model_config, topk); + multilabel = utils::get_from_any_maps("multilabel", user_config, model_config, multilabel); + output_raw_scores = utils::get_from_any_maps("output_raw_scores", user_config, model_config, output_raw_scores); + confidence_threshold = + utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold); + hierarchical = utils::get_from_any_maps("hierarchical", user_config, model_config, hierarchical); + hierarchical_config = + utils::get_from_any_maps("hierarchical_config", user_config, model_config, hierarchical_config); + hierarchical_postproc = + utils::get_from_any_maps("hierarchical_postproc", user_config, model_config, hierarchical_postproc); if (hierarchical) { if (hierarchical_config.empty()) { throw std::runtime_error("Error: empty hierarchical classification config"); @@ -55,7 +58,7 @@ class Classification { } static void serialize(std::shared_ptr& ov_model); - static Classification load(const std::string& model_path); + static Classification create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); ClassificationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/detection.h b/src/cpp/include/tasks/detection.h index e5405133..3d25bd29 100644 --- a/src/cpp/include/tasks/detection.h +++ b/src/cpp/include/tasks/detection.h @@ -19,9 +19,9 @@ class DetectionModel { public: std::unique_ptr> pipeline; - DetectionModel(std::unique_ptr algorithm, const ov::AnyMap& configuration) : algorithm(std::move(algorithm)) { + DetectionModel(std::unique_ptr algorithm, const ov::AnyMap& user_config) : algorithm(std::move(algorithm)) { auto config = this->algorithm->adapter->getModelConfig(); - if (configuration.count("tiling") && configuration.at("tiling").as()) { + if (user_config.count("tiling") && user_config.at("tiling").as()) { if (!utils::config_contains_tiling_info(config)) { throw std::runtime_error("Model config does not contain tiling properties."); } @@ -67,7 +67,7 @@ class DetectionModel { const std::vector& tile_coords, const utils::TilingInfo& tiling_info); - static DetectionModel load(const std::string& model_path, const ov::AnyMap& configuration = {}); + static DetectionModel create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); DetectionResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/instance_segmentation.h b/src/cpp/include/tasks/instance_segmentation.h index bc1905f1..524d8b3e 100644 --- a/src/cpp/include/tasks/instance_segmentation.h +++ b/src/cpp/include/tasks/instance_segmentation.h @@ -18,7 +18,7 @@ class InstanceSegmentation { std::shared_ptr adapter; VisionPipeline pipeline; - InstanceSegmentation(std::shared_ptr adapter) : adapter(adapter) { + InstanceSegmentation(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -28,15 +28,16 @@ class InstanceSegmentation { return postprocess(result); }); - auto config = adapter->getModelConfig(); - labels = utils::get_from_any_maps("labels", config, {}, labels); - confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold); - input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width); - input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.width); + auto model_config = adapter->getModelConfig(); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + confidence_threshold = + utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold); + input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width); + input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.width); } static void serialize(std::shared_ptr& ov_model); - static InstanceSegmentation load(const std::string& model_path); + static InstanceSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); InstanceSegmentationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/semantic_segmentation.h b/src/cpp/include/tasks/semantic_segmentation.h index 47d836dc..962cd468 100644 --- a/src/cpp/include/tasks/semantic_segmentation.h +++ b/src/cpp/include/tasks/semantic_segmentation.h @@ -17,7 +17,7 @@ class SemanticSegmentation { public: VisionPipeline pipeline; std::shared_ptr adapter; - SemanticSegmentation(std::shared_ptr adapter) : adapter(adapter) { + SemanticSegmentation(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -27,14 +27,14 @@ class SemanticSegmentation { return postprocess(result); }); - auto config = adapter->getModelConfig(); - labels = utils::get_from_any_maps("labels", config, {}, labels); - soft_threshold = utils::get_from_any_maps("soft_threshold", config, {}, soft_threshold); - blur_strength = utils::get_from_any_maps("blur_strength", config, {}, blur_strength); + auto model_config = adapter->getModelConfig(); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + soft_threshold = utils::get_from_any_maps("soft_threshold", user_config, model_config, soft_threshold); + blur_strength = utils::get_from_any_maps("blur_strength", user_config, model_config, blur_strength); } static void serialize(std::shared_ptr& ov_model); - static SemanticSegmentation load(const std::string& model_path); + static SemanticSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); std::map preprocess(cv::Mat); SemanticSegmentationResult postprocess(InferenceResult& infResult); diff --git a/src/cpp/src/tasks/anomaly.cpp b/src/cpp/src/tasks/anomaly.cpp index 29f42d46..db42c639 100644 --- a/src/cpp/src/tasks/anomaly.cpp +++ b/src/cpp/src/tasks/anomaly.cpp @@ -51,12 +51,12 @@ void Anomaly::serialize(std::shared_ptr& ov_model) { ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Anomaly Anomaly::load(const std::string& model_path) { +Anomaly Anomaly::create_model(const std::string& model_path, const ov::AnyMap& user_config) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, "", user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); if (!model_type.empty()) { std::cout << "has model type in info: " << model_type << std::endl; @@ -65,9 +65,9 @@ Anomaly Anomaly::load(const std::string& model_path) { } adapter->applyModelTransform(Anomaly::serialize); - adapter->compileModel("AUTO", {}); + adapter->compileModel("AUTO", user_config); - return Anomaly(adapter); + return Anomaly(adapter, user_config); } AnomalyResult Anomaly::infer(cv::Mat image) { diff --git a/src/cpp/src/tasks/classification.cpp b/src/cpp/src/tasks/classification.cpp index 3a74d341..79a81041 100644 --- a/src/cpp/src/tasks/classification.cpp +++ b/src/cpp/src/tasks/classification.cpp @@ -180,20 +180,20 @@ void Classification::serialize(std::shared_ptr& ov_model) { ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Classification Classification::load(const std::string& model_path) { +Classification Classification::create_model(const std::string& model_path, const ov::AnyMap& user_config) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, "", user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); if (model_type.empty() || model_type != "Classification") { throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification"); } adapter->applyModelTransform(Classification::serialize); - adapter->compileModel("AUTO", {}); + adapter->compileModel("AUTO", user_config); - return Classification(adapter); + return Classification(adapter, user_config); } ClassificationResult Classification::infer(cv::Mat image) { diff --git a/src/cpp/src/tasks/detection.cpp b/src/cpp/src/tasks/detection.cpp index e5f555c2..45e4bd93 100644 --- a/src/cpp/src/tasks/detection.cpp +++ b/src/cpp/src/tasks/detection.cpp @@ -13,21 +13,21 @@ #include "utils/nms.h" #include "utils/tensor.h" -DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) { +DetectionModel DetectionModel::create_model(const std::string& model_path, const ov::AnyMap& user_config) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, "", user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); std::transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower); if (model_type.empty() || model_type != "ssd") { throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd"); } adapter->applyModelTransform(SSD::serialize); - adapter->compileModel("AUTO", {}); + adapter->compileModel("AUTO", user_config); - return DetectionModel(std::make_unique(adapter), configuration); + return DetectionModel(std::make_unique(adapter), user_config); } InferenceInput DetectionModel::preprocess(cv::Mat image) { diff --git a/src/cpp/src/tasks/instance_segmentation.cpp b/src/cpp/src/tasks/instance_segmentation.cpp index a98cbc32..2e63cb4d 100644 --- a/src/cpp/src/tasks/instance_segmentation.cpp +++ b/src/cpp/src/tasks/instance_segmentation.cpp @@ -190,20 +190,20 @@ void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { ov_model->set_rt_info(input_shape.height, "model_info", "orig_height"); } -InstanceSegmentation InstanceSegmentation::load(const std::string& model_path) { +InstanceSegmentation InstanceSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, "", user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", user_config, adapter->getModelConfig(), model_type); if (model_type.empty() || model_type != "MaskRCNN") { throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN"); } adapter->applyModelTransform(InstanceSegmentation::serialize); - adapter->compileModel("AUTO", {}); + adapter->compileModel("AUTO", user_config); - return InstanceSegmentation(adapter); + return InstanceSegmentation(adapter, user_config); } InstanceSegmentationResult InstanceSegmentation::infer(cv::Mat image) { diff --git a/src/cpp/src/tasks/semantic_segmentation.cpp b/src/cpp/src/tasks/semantic_segmentation.cpp index 588045ad..c0301b71 100644 --- a/src/cpp/src/tasks/semantic_segmentation.cpp +++ b/src/cpp/src/tasks/semantic_segmentation.cpp @@ -20,20 +20,20 @@ cv::Mat get_activation_map(const cv::Mat& features) { return int_act_map; } -SemanticSegmentation SemanticSegmentation::load(const std::string& model_path) { +SemanticSegmentation SemanticSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, "", user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", user_config, adapter->getModelConfig(), model_type); if (model_type.empty() || model_type != "Segmentation") { throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation"); } adapter->applyModelTransform(SemanticSegmentation::serialize); - adapter->compileModel("AUTO", {}); + adapter->compileModel("AUTO", user_config); - return SemanticSegmentation(adapter); + return SemanticSegmentation(adapter, user_config); } void SemanticSegmentation::serialize(std::shared_ptr& ov_model) { diff --git a/tests/cpp/test_accuracy.cpp b/tests/cpp/test_accuracy.cpp index 6233128b..559465d2 100644 --- a/tests/cpp/test_accuracy.cpp +++ b/tests/cpp/test_accuracy.cpp @@ -86,7 +86,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { if (data.type == "DetectionModel") { auto use_tiling = !data.input_res.empty(); - auto model = DetectionModel::load(model_path, {{"tiling", use_tiling}}); + auto model = DetectionModel::create_model(model_path, {{"tiling", use_tiling}}); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -99,7 +99,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { } } else if (data.type == "SegmentationModel") { - auto model = SemanticSegmentation::load(model_path); + auto model = SemanticSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -109,7 +109,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "MaskRCNNModel") { - auto model = InstanceSegmentation::load(model_path); + auto model = InstanceSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -119,7 +119,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "ClassificationModel") { - auto model = Classification::load(model_path); + auto model = Classification::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -127,7 +127,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { EXPECT_EQ(std::string{result}, test_data.reference[0]); } } else if (data.type == "AnomalyDetection") { - auto model = Anomaly::load(model_path); + auto model = Anomaly::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -153,7 +153,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { if (data.type == "DetectionModel") { auto use_tiling = !data.input_res.empty(); - auto model = DetectionModel::load(model_path, {{"tiling", use_tiling}}); + auto model = DetectionModel::create_model(model_path, {{"tiling", use_tiling}}); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -164,7 +164,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(std::string{result}, test_data.reference[0]); } } else if (data.type == "SegmentationModel") { - auto model = SemanticSegmentation::load(model_path); + auto model = SemanticSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -174,7 +174,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "MaskRCNNModel") { - auto model = InstanceSegmentation::load(model_path); + auto model = InstanceSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -184,7 +184,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "ClassificationModel") { - auto model = Classification::load(model_path); + auto model = Classification::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -193,7 +193,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(std::string{result}, test_data.reference[0]); } } else if (data.type == "AnomalyDetection") { - auto model = Anomaly::load(model_path); + auto model = Anomaly::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -218,7 +218,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { if (data.type == "DetectionModel") { auto use_tiling = !data.input_res.empty(); - auto model = DetectionModel::load(model_path, {{"tiling", use_tiling}}); + auto model = DetectionModel::create_model(model_path, {{"tiling", use_tiling}}); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -231,7 +231,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(std::string{result[0]}, test_data.reference[0]); } } else if (data.type == "SegmentationModel") { - auto model = SemanticSegmentation::load(model_path); + auto model = SemanticSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -241,7 +241,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(format_test_output_to_string(model, result[0]), test_data.reference[0]); } } else if (data.type == "MaskRCNNModel") { - auto model = InstanceSegmentation::load(model_path); + auto model = InstanceSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -252,7 +252,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(format_test_output_to_string(model, result[0]), test_data.reference[0]); } } else if (data.type == "ClassificationModel") { - auto model = Classification::load(model_path); + auto model = Classification::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -263,7 +263,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(std::string{result[0]}, test_data.reference[0]); } } else if (data.type == "AnomalyDetection") { - auto model = Anomaly::load(model_path); + auto model = Anomaly::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; From 8168c276177fe61e6fb9e25cbe60fb03939d6084 Mon Sep 17 00:00:00 2001 From: Ronald Hecker Date: Mon, 30 Jun 2025 11:12:35 +0200 Subject: [PATCH 02/10] Adds preload tests back in. Also fix the way the model loads Preload is basically calling the compileModel so dont pass it in initially unless we're sure that it doesnt need transforming --- src/cpp/include/tasks/anomaly.h | 2 +- src/cpp/include/tasks/classification.h | 2 +- src/cpp/include/tasks/detection.h | 2 +- src/cpp/include/tasks/instance_segmentation.h | 2 +- src/cpp/include/tasks/semantic_segmentation.h | 2 +- src/cpp/src/tasks/anomaly.cpp | 8 +- src/cpp/src/tasks/classification.cpp | 8 +- src/cpp/src/tasks/detection.cpp | 8 +- src/cpp/src/tasks/instance_segmentation.cpp | 8 +- src/cpp/src/tasks/semantic_segmentation.cpp | 8 +- tests/cpp/CMakeLists.txt | 3 - tests/cpp/precommit/CMakeLists.txt | 22 +++ tests/cpp/precommit/test_sanity.cpp | 146 ++++++++++++++++++ 13 files changed, 198 insertions(+), 23 deletions(-) create mode 100644 tests/cpp/precommit/CMakeLists.txt create mode 100644 tests/cpp/precommit/test_sanity.cpp diff --git a/src/cpp/include/tasks/anomaly.h b/src/cpp/include/tasks/anomaly.h index 38532a05..d6abd836 100644 --- a/src/cpp/include/tasks/anomaly.h +++ b/src/cpp/include/tasks/anomaly.h @@ -40,7 +40,7 @@ class Anomaly { } static void serialize(std::shared_ptr& ov_model); - static Anomaly create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); + static Anomaly create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); AnomalyResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/classification.h b/src/cpp/include/tasks/classification.h index ff923b13..a79a5cdb 100644 --- a/src/cpp/include/tasks/classification.h +++ b/src/cpp/include/tasks/classification.h @@ -58,7 +58,7 @@ class Classification { } static void serialize(std::shared_ptr& ov_model); - static Classification create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); + static Classification create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); ClassificationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/detection.h b/src/cpp/include/tasks/detection.h index 3d25bd29..4f2f899c 100644 --- a/src/cpp/include/tasks/detection.h +++ b/src/cpp/include/tasks/detection.h @@ -67,7 +67,7 @@ class DetectionModel { const std::vector& tile_coords, const utils::TilingInfo& tiling_info); - static DetectionModel create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); + static DetectionModel create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); DetectionResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/instance_segmentation.h b/src/cpp/include/tasks/instance_segmentation.h index 524d8b3e..226d1d0a 100644 --- a/src/cpp/include/tasks/instance_segmentation.h +++ b/src/cpp/include/tasks/instance_segmentation.h @@ -37,7 +37,7 @@ class InstanceSegmentation { } static void serialize(std::shared_ptr& ov_model); - static InstanceSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); + static InstanceSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); InstanceSegmentationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/semantic_segmentation.h b/src/cpp/include/tasks/semantic_segmentation.h index 962cd468..544b2e73 100644 --- a/src/cpp/include/tasks/semantic_segmentation.h +++ b/src/cpp/include/tasks/semantic_segmentation.h @@ -34,7 +34,7 @@ class SemanticSegmentation { } static void serialize(std::shared_ptr& ov_model); - static SemanticSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}); + static SemanticSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); std::map preprocess(cv::Mat); SemanticSegmentationResult postprocess(InferenceResult& infResult); diff --git a/src/cpp/src/tasks/anomaly.cpp b/src/cpp/src/tasks/anomaly.cpp index db42c639..2d824aa7 100644 --- a/src/cpp/src/tasks/anomaly.cpp +++ b/src/cpp/src/tasks/anomaly.cpp @@ -51,9 +51,9 @@ void Anomaly::serialize(std::shared_ptr& ov_model) { ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Anomaly Anomaly::create_model(const std::string& model_path, const ov::AnyMap& user_config) { +Anomaly Anomaly::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", user_config, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); @@ -65,7 +65,9 @@ Anomaly Anomaly::create_model(const std::string& model_path, const ov::AnyMap& u } adapter->applyModelTransform(Anomaly::serialize); - adapter->compileModel("AUTO", user_config); + if (preload) { + adapter->compileModel(device, user_config); + } return Anomaly(adapter, user_config); } diff --git a/src/cpp/src/tasks/classification.cpp b/src/cpp/src/tasks/classification.cpp index 79a81041..a44a595a 100644 --- a/src/cpp/src/tasks/classification.cpp +++ b/src/cpp/src/tasks/classification.cpp @@ -180,9 +180,9 @@ void Classification::serialize(std::shared_ptr& ov_model) { ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Classification Classification::create_model(const std::string& model_path, const ov::AnyMap& user_config) { +Classification Classification::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", user_config, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); @@ -191,7 +191,9 @@ Classification Classification::create_model(const std::string& model_path, const throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification"); } adapter->applyModelTransform(Classification::serialize); - adapter->compileModel("AUTO", user_config); + if (preload) { + adapter->compileModel(device, user_config); + } return Classification(adapter, user_config); } diff --git a/src/cpp/src/tasks/detection.cpp b/src/cpp/src/tasks/detection.cpp index 45e4bd93..8e947778 100644 --- a/src/cpp/src/tasks/detection.cpp +++ b/src/cpp/src/tasks/detection.cpp @@ -13,9 +13,9 @@ #include "utils/nms.h" #include "utils/tensor.h" -DetectionModel DetectionModel::create_model(const std::string& model_path, const ov::AnyMap& user_config) { +DetectionModel DetectionModel::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", user_config, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); @@ -25,7 +25,9 @@ DetectionModel DetectionModel::create_model(const std::string& model_path, const throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd"); } adapter->applyModelTransform(SSD::serialize); - adapter->compileModel("AUTO", user_config); + if (preload) { + adapter->compileModel(device, user_config); + } return DetectionModel(std::make_unique(adapter), user_config); } diff --git a/src/cpp/src/tasks/instance_segmentation.cpp b/src/cpp/src/tasks/instance_segmentation.cpp index 2e63cb4d..b6047827 100644 --- a/src/cpp/src/tasks/instance_segmentation.cpp +++ b/src/cpp/src/tasks/instance_segmentation.cpp @@ -190,9 +190,9 @@ void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { ov_model->set_rt_info(input_shape.height, "model_info", "orig_height"); } -InstanceSegmentation InstanceSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config) { +InstanceSegmentation InstanceSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", user_config, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", user_config, adapter->getModelConfig(), model_type); @@ -201,7 +201,9 @@ InstanceSegmentation InstanceSegmentation::create_model(const std::string& model throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN"); } adapter->applyModelTransform(InstanceSegmentation::serialize); - adapter->compileModel("AUTO", user_config); + if (preload) { + adapter->compileModel(device, user_config); + } return InstanceSegmentation(adapter, user_config); } diff --git a/src/cpp/src/tasks/semantic_segmentation.cpp b/src/cpp/src/tasks/semantic_segmentation.cpp index c0301b71..3e3f82c3 100644 --- a/src/cpp/src/tasks/semantic_segmentation.cpp +++ b/src/cpp/src/tasks/semantic_segmentation.cpp @@ -20,9 +20,9 @@ cv::Mat get_activation_map(const cv::Mat& features) { return int_act_map; } -SemanticSegmentation SemanticSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config) { +SemanticSegmentation SemanticSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", user_config, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; model_type = utils::get_from_any_maps("model_type", user_config, adapter->getModelConfig(), model_type); @@ -31,7 +31,9 @@ SemanticSegmentation SemanticSegmentation::create_model(const std::string& model throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation"); } adapter->applyModelTransform(SemanticSegmentation::serialize); - adapter->compileModel("AUTO", user_config); + if (preload) { + adapter->compileModel(device, user_config); + } return SemanticSegmentation(adapter, user_config); } diff --git a/tests/cpp/CMakeLists.txt b/tests/cpp/CMakeLists.txt index 90c1b0ab..919dcec5 100644 --- a/tests/cpp/CMakeLists.txt +++ b/tests/cpp/CMakeLists.txt @@ -20,6 +20,3 @@ find_package(OpenCV REQUIRED COMPONENTS imgcodecs) add_executable(test_accuracy test_accuracy.cpp) target_link_libraries(test_accuracy gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) - -#include(GoogleTest) -#gtest_discover_tests(test_accuracy) diff --git a/tests/cpp/precommit/CMakeLists.txt b/tests/cpp/precommit/CMakeLists.txt new file mode 100644 index 00000000..e9f71a21 --- /dev/null +++ b/tests/cpp/precommit/CMakeLists.txt @@ -0,0 +1,22 @@ +cmake_minimum_required(VERSION 3.26) + + +project(tests) + +add_subdirectory(../../../src/cpp ${tests_BINARY_DIR}/src/cpp) + +include(FetchContent) + +FetchContent_Declare(json GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG d41ca94fa85d5119852e2f7a3f94335cc7cb0486 # PR #4709, fixes cmake deprecation warnings + ) +FetchContent_Declare(googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG a7f443b80b105f940225332ed3c31f2790092f47 # latest main +) +FetchContent_MakeAvailable(json googletest) + +find_package(OpenCV REQUIRED COMPONENTS imgcodecs) + +add_executable(test_sanity test_sanity.cpp) +target_link_libraries(test_sanity gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) diff --git a/tests/cpp/precommit/test_sanity.cpp b/tests/cpp/precommit/test_sanity.cpp new file mode 100644 index 00000000..177520cd --- /dev/null +++ b/tests/cpp/precommit/test_sanity.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2020-2024 Intel Corporation + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +std::string PUBLIC_SCOPE_PATH = "../../tests/cpp/precommit/public_scope.json"; +std::string DATA_DIR = "../data"; +std::string MODEL_PATH_TEMPLATE = "public/%s/FP16/%s.xml"; +std::string IMAGE_PATH = "coco128/images/train2017/000000000074.jpg"; + +struct ModelData { + std::string name; + std::string type; +}; + +class ModelParameterizedTest : public testing::TestWithParam {}; + +template +std::string string_format(const std::string& fmt, Args... args) { + size_t size = snprintf(nullptr, 0, fmt.c_str(), args...); + std::string buf; + buf.reserve(size + 1); + buf.resize(size); + snprintf(&buf[0], size + 1, fmt.c_str(), args...); + return buf; +} + +inline void from_json(const nlohmann::json& j, ModelData& test) { + test.name = j.at("name").get(); + test.type = j.at("type").get(); +} + +std::vector GetTestData(const std::string& path) { + std::ifstream input(path); + nlohmann::json j; + input >> j; + return j; +} +TEST_P(ModelParameterizedTest, SynchronousInference) { + cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); + if (!image.data) { + throw std::runtime_error{"Failed to read the image"}; + } + + std::string model_path; + const std::string& name = GetParam().name; + if (name.substr(name.size() - 4) == ".xml") { + model_path = name; + } else { + model_path = string_format(MODEL_PATH_TEMPLATE, name.c_str(), name.c_str()); + } + + if ("DetectionModel" == GetParam().type) { + bool preload = true; + auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + auto result = model.infer(image); + EXPECT_GT(result.objects.size(), 0); + } else if ("ClassificationModel" == GetParam().type) { + bool preload = true; + auto model = Classification::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + auto result = model.infer(image); + ASSERT_GT(result.topLabels.size(), 0); + EXPECT_GT(result.topLabels.front().score, 0.0f); + } else if ("SegmentationModel" == GetParam().type) { + bool preload = true; + auto model = SemanticSegmentation::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + auto result = model.infer(image); + ASSERT_GT(model.getContours(result).size(), 0); + } +} + +INSTANTIATE_TEST_SUITE_P(TestSanityPublic, ModelParameterizedTest, testing::ValuesIn(GetTestData(PUBLIC_SCOPE_PATH))); + +class InputParser { +public: + InputParser(int& argc, char** argv) { + for (int i = 1; i < argc; ++i) + this->tokens.push_back(std::string(argv[i])); + } + + const std::string& getCmdOption(const std::string& option) const { + std::vector::const_iterator itr; + itr = std::find(this->tokens.begin(), this->tokens.end(), option); + if (itr != this->tokens.end() && ++itr != this->tokens.end()) { + return *itr; + } + static const std::string empty_string(""); + return empty_string; + } + + bool cmdOptionExists(const std::string& option) const { + return std::find(this->tokens.begin(), this->tokens.end(), option) != this->tokens.end(); + } + +private: + std::vector tokens; +}; + +void print_help(const char* program_name) { + std::cout << "Usage: " << program_name << " -p -d " << std::endl; +} + +int main(int argc, char** argv) { + InputParser input(argc, argv); + + if (input.cmdOptionExists("-h")) { + print_help(argv[0]); + return 1; + } + const std::string& public_scope = input.getCmdOption("-p"); + if (!public_scope.empty()) { + PUBLIC_SCOPE_PATH = public_scope; + } else { + print_help(argv[0]); + return 1; + } + const std::string& data_dir = input.getCmdOption("-d"); + if (!data_dir.empty()) { + DATA_DIR = data_dir; + } else { + print_help(argv[0]); + return 1; + } + + testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} From fbf8250ece4e7d130da4fe0d04dd714c271354c3 Mon Sep 17 00:00:00 2001 From: Ronald Hecker Date: Tue, 1 Jul 2025 07:25:42 +0200 Subject: [PATCH 03/10] Adds test_model_config tests partially back Missing serialization. Need to allow for that again. --- src/cpp/include/tasks/anomaly.h | 5 +- src/cpp/include/tasks/classification.h | 5 +- src/cpp/include/tasks/detection.h | 9 +- src/cpp/include/tasks/instance_segmentation.h | 5 +- src/cpp/include/tasks/semantic_segmentation.h | 5 +- src/cpp/src/tasks/anomaly.cpp | 6 +- src/cpp/src/tasks/classification.cpp | 6 +- src/cpp/src/tasks/detection.cpp | 5 +- src/cpp/src/tasks/detection/ssd.cpp | 1 + src/cpp/src/tasks/instance_segmentation.cpp | 6 +- src/cpp/src/tasks/semantic_segmentation.cpp | 6 +- tests/cpp/precommit/CMakeLists.txt | 3 + tests/cpp/precommit/test_model_config.cpp | 331 ++++++++++++++++++ tests/cpp/precommit/test_sanity.cpp | 2 +- 14 files changed, 381 insertions(+), 14 deletions(-) create mode 100644 tests/cpp/precommit/test_model_config.cpp diff --git a/src/cpp/include/tasks/anomaly.h b/src/cpp/include/tasks/anomaly.h index d6abd836..949f8e56 100644 --- a/src/cpp/include/tasks/anomaly.h +++ b/src/cpp/include/tasks/anomaly.h @@ -40,7 +40,10 @@ class Anomaly { } static void serialize(std::shared_ptr& ov_model); - static Anomaly create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); + static Anomaly create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); AnomalyResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/classification.h b/src/cpp/include/tasks/classification.h index a79a5cdb..bb3ecc8d 100644 --- a/src/cpp/include/tasks/classification.h +++ b/src/cpp/include/tasks/classification.h @@ -58,7 +58,10 @@ class Classification { } static void serialize(std::shared_ptr& ov_model); - static Classification create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); + static Classification create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); ClassificationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/detection.h b/src/cpp/include/tasks/detection.h index 4f2f899c..dead9cba 100644 --- a/src/cpp/include/tasks/detection.h +++ b/src/cpp/include/tasks/detection.h @@ -18,6 +18,7 @@ class DetectionModel { public: std::unique_ptr> pipeline; + std::unique_ptr algorithm; DetectionModel(std::unique_ptr algorithm, const ov::AnyMap& user_config) : algorithm(std::move(algorithm)) { auto config = this->algorithm->adapter->getModelConfig(); @@ -67,11 +68,11 @@ class DetectionModel { const std::vector& tile_coords, const utils::TilingInfo& tiling_info); - static DetectionModel create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); + static DetectionModel create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); DetectionResult infer(cv::Mat image); std::vector inferBatch(std::vector image); - -private: - std::unique_ptr algorithm; }; diff --git a/src/cpp/include/tasks/instance_segmentation.h b/src/cpp/include/tasks/instance_segmentation.h index 226d1d0a..513de093 100644 --- a/src/cpp/include/tasks/instance_segmentation.h +++ b/src/cpp/include/tasks/instance_segmentation.h @@ -37,7 +37,10 @@ class InstanceSegmentation { } static void serialize(std::shared_ptr& ov_model); - static InstanceSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); + static InstanceSegmentation create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); InstanceSegmentationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/semantic_segmentation.h b/src/cpp/include/tasks/semantic_segmentation.h index 544b2e73..ee54409c 100644 --- a/src/cpp/include/tasks/semantic_segmentation.h +++ b/src/cpp/include/tasks/semantic_segmentation.h @@ -34,7 +34,10 @@ class SemanticSegmentation { } static void serialize(std::shared_ptr& ov_model); - static SemanticSegmentation create_model(const std::string& model_path, const ov::AnyMap& user_config = {}, bool preload = true, const std::string& device = "AUTO"); + static SemanticSegmentation create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); std::map preprocess(cv::Mat); SemanticSegmentationResult postprocess(InferenceResult& infResult); diff --git a/src/cpp/src/tasks/anomaly.cpp b/src/cpp/src/tasks/anomaly.cpp index 2d824aa7..234b54ed 100644 --- a/src/cpp/src/tasks/anomaly.cpp +++ b/src/cpp/src/tasks/anomaly.cpp @@ -47,11 +47,15 @@ void Anomaly::serialize(std::shared_ptr& ov_model) { mean_values, scale_values); + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape[0], "model_info", "orig_width"); ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Anomaly Anomaly::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { +Anomaly Anomaly::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); adapter->loadModel(model_path, device, user_config, false); diff --git a/src/cpp/src/tasks/classification.cpp b/src/cpp/src/tasks/classification.cpp index a44a595a..208b8d38 100644 --- a/src/cpp/src/tasks/classification.cpp +++ b/src/cpp/src/tasks/classification.cpp @@ -176,11 +176,15 @@ void Classification::serialize(std::shared_ptr& ov_model) { addOrFindSoftmaxAndTopkOutputs(ov_model, topk, output_raw_scores); } + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape[0], "model_info", "orig_width"); ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Classification Classification::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { +Classification Classification::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); adapter->loadModel(model_path, device, user_config, false); diff --git a/src/cpp/src/tasks/detection.cpp b/src/cpp/src/tasks/detection.cpp index 8e947778..d4f5cfc6 100644 --- a/src/cpp/src/tasks/detection.cpp +++ b/src/cpp/src/tasks/detection.cpp @@ -13,7 +13,10 @@ #include "utils/nms.h" #include "utils/tensor.h" -DetectionModel DetectionModel::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { +DetectionModel DetectionModel::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); adapter->loadModel(model_path, device, user_config, false); diff --git a/src/cpp/src/tasks/detection/ssd.cpp b/src/cpp/src/tasks/detection/ssd.cpp index 681e7d18..27f5497d 100644 --- a/src/cpp/src/tasks/detection/ssd.cpp +++ b/src/cpp/src/tasks/detection/ssd.cpp @@ -115,6 +115,7 @@ void SSD::serialize(std::shared_ptr& ov_model) { // prepareMultipleOutputs(ov_model); //This does nothing from what I can see. } + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape[0], "model_info", "orig_width"); ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } diff --git a/src/cpp/src/tasks/instance_segmentation.cpp b/src/cpp/src/tasks/instance_segmentation.cpp index b6047827..12cb4405 100644 --- a/src/cpp/src/tasks/instance_segmentation.cpp +++ b/src/cpp/src/tasks/instance_segmentation.cpp @@ -186,11 +186,15 @@ void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { saliency_map_name + ", " + feature_vector_name + " and 3 or 4 other outputs"); } + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape.width, "model_info", "orig_width"); ov_model->set_rt_info(input_shape.height, "model_info", "orig_height"); } -InstanceSegmentation InstanceSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { +InstanceSegmentation InstanceSegmentation::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); adapter->loadModel(model_path, device, user_config, false); diff --git a/src/cpp/src/tasks/semantic_segmentation.cpp b/src/cpp/src/tasks/semantic_segmentation.cpp index 3e3f82c3..3b2386c5 100644 --- a/src/cpp/src/tasks/semantic_segmentation.cpp +++ b/src/cpp/src/tasks/semantic_segmentation.cpp @@ -20,7 +20,10 @@ cv::Mat get_activation_map(const cv::Mat& features) { return int_act_map; } -SemanticSegmentation SemanticSegmentation::create_model(const std::string& model_path, const ov::AnyMap& user_config, bool preload, const std::string& device) { +SemanticSegmentation SemanticSegmentation::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); adapter->loadModel(model_path, device, user_config, false); @@ -114,6 +117,7 @@ void SemanticSegmentation::serialize(std::shared_ptr& ov_model) { ov_model = ppp.build(); cv::Size input_shape(shape[ov::layout::width_idx(layout)], shape[ov::layout::height_idx(layout)]); + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape.width, "model_info", "orig_width"); ov_model->set_rt_info(input_shape.height, "model_info", "orig_height"); } diff --git a/tests/cpp/precommit/CMakeLists.txt b/tests/cpp/precommit/CMakeLists.txt index e9f71a21..ce79f5eb 100644 --- a/tests/cpp/precommit/CMakeLists.txt +++ b/tests/cpp/precommit/CMakeLists.txt @@ -20,3 +20,6 @@ find_package(OpenCV REQUIRED COMPONENTS imgcodecs) add_executable(test_sanity test_sanity.cpp) target_link_libraries(test_sanity gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) + +add_executable(test_model_config test_model_config.cpp) +target_link_libraries(test_model_config gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) diff --git a/tests/cpp/precommit/test_model_config.cpp b/tests/cpp/precommit/test_model_config.cpp new file mode 100644 index 00000000..43cd0a88 --- /dev/null +++ b/tests/cpp/precommit/test_model_config.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2020-2024 Intel Corporation + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils/config.h" + +using json = nlohmann::json; + +std::string DATA_DIR = "../data"; +std::string MODEL_PATH_TEMPLATE = "otx_models/%s.xml"; +std::string IMAGE_PATH = "coco128/images/train2017/000000000074.jpg"; + +std::string TMP_MODEL_FILE = "tmp_model.xml"; + +struct ModelData { + std::string name; + ModelData(const std::string& name) : name(name) {} +}; + +class MockAdapter : public OpenVINOInferenceAdapter { +public: + MockAdapter(const std::string& modelPath) : OpenVINOInferenceAdapter() { + loadModel(modelPath, "CPU"); + } +}; + +class ClassificationModelParameterizedTest : public testing::TestWithParam {}; + +class SSDModelParameterizedTest : public testing::TestWithParam {}; + +class ClassificationModelParameterizedTestSaveLoad : public testing::TestWithParam { +protected: + void TearDown() override { + auto fileName = TMP_MODEL_FILE; + std::remove(fileName.c_str()); + std::remove(fileName.replace(fileName.end() - 4, fileName.end(), ".bin").c_str()); + } +}; + +class DetectionModelParameterizedTestSaveLoad : public ClassificationModelParameterizedTestSaveLoad {}; + +template +std::string string_format(const std::string& fmt, Args... args) { + size_t size = snprintf(nullptr, 0, fmt.c_str(), args...); + std::string buf; + buf.reserve(size + 1); + buf.resize(size); + snprintf(&buf[0], size + 1, fmt.c_str(), args...); + return buf; +} + +TEST_P(ClassificationModelParameterizedTest, TestClassificationDefaultConfig) { + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + bool preload = true; + auto model = Classification::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + + auto config = model.adapter->getModelConfig(); + + std::string model_type; + model_type = utils::get_from_any_maps("model_type", config, {}, model_type); + EXPECT_EQ(model_type, "Classification"); + + bool embedded_processing; + embedded_processing = utils::get_from_any_maps("embedded_processing", config, {}, embedded_processing); + EXPECT_TRUE(embedded_processing); +} + +TEST_P(ClassificationModelParameterizedTest, TestClassificationCustomConfig) { + GTEST_SKIP() << "Classification config tests fail on CI"; + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + std::vector mock_labels; + size_t num_classes = 1000; + for (size_t i = 0; i < num_classes; i++) { + mock_labels.push_back(std::to_string(i)); + } + ov::AnyMap configuration = {{"layout", "data:HWC"}, {"resize_type", "fit_to_window"}, {"labels", mock_labels}}; + bool preload = true; + auto model = Classification::create_model(DATA_DIR + "/" + model_path, configuration, preload, "CPU"); + + auto config = model.adapter->getModelConfig(); + std::string layout; + layout = utils::get_from_any_maps("layout", config, {}, layout); + EXPECT_EQ(layout, configuration.at("layout").as()); + + std::string resize_type; + resize_type = utils::get_from_any_maps("resize_type", config, {}, resize_type); + EXPECT_EQ(resize_type, configuration.at("resize_type").as()); + + std::vector labels; + labels = utils::get_from_any_maps("labels", config, {}, labels); + for (size_t i = 0; i < num_classes; i++) { + EXPECT_EQ(labels[i], mock_labels[i]); + } +} + +// TEST_P(ClassificationModelParameterizedTestSaveLoad, TestClassificationCorrectnessAfterSaveLoad) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// std::cout << model_path << "\n"; +// bool preload = true; +// auto model = Classification::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); +// +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// +// auto result = model->infer(image)->topLabels; +// +// auto model_restored = ClassificationModel::create_model(TMP_MODEL_FILE, {}, preload, "CPU"); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->topLabels; +// +// EXPECT_EQ(result_restored[0].id, result[0].id); +// EXPECT_EQ(result_restored[0].score, result[0].score); +// } +// +// TEST_P(ClassificationModelParameterizedTestSaveLoad, TestClassificationCorrectnessAfterSaveLoadWithAdapter) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// bool preload = true; +// auto model = ClassificationModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// auto result = model->infer(image)->topLabels; +// +// std::shared_ptr adapter = std::make_shared(TMP_MODEL_FILE); +// auto model_restored = ClassificationModel::create_model(adapter); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->topLabels; +// +// EXPECT_EQ(result_restored[0].id, result[0].id); +// EXPECT_EQ(result_restored[0].score, result[0].score); +// } + +TEST_P(SSDModelParameterizedTest, TestDetectionDefaultConfig) { + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + bool preload = true; + auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + + auto config = model.algorithm->adapter->getModelConfig(); + + std::string model_type; + model_type = utils::get_from_any_maps("model_type", config, {}, model_type); + EXPECT_EQ(model_type, "ssd"); + + bool embedded_processing; + embedded_processing = utils::get_from_any_maps("embedded_processing", config, {}, embedded_processing); + EXPECT_TRUE(embedded_processing); +} + +TEST_P(SSDModelParameterizedTest, TestDetectionCustomConfig) { + GTEST_SKIP() << "Detection config tests fail on CI"; + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + std::vector mock_labels; + size_t num_classes = 80; + for (size_t i = 0; i < num_classes; i++) { + mock_labels.push_back(std::to_string(i)); + } + ov::AnyMap configuration = {{"layout", "data:HWC"}, {"resize_type", "fit_to_window"}, {"labels", mock_labels}}; + bool preload = true; + auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, configuration, preload, "CPU"); + + auto config = model.algorithm->adapter->getModelConfig(); + std::string layout; + layout = utils::get_from_any_maps("layout", config, {}, layout); + EXPECT_EQ(layout, configuration.at("layout").as()); + + std::string resize_type; + resize_type = utils::get_from_any_maps("resize_type", config, {}, resize_type); + EXPECT_EQ(resize_type, configuration.at("resize_type").as()); + + std::vector labels; + labels = utils::get_from_any_maps("labels", config, {}, labels); + for (size_t i = 0; i < num_classes; i++) { + EXPECT_EQ(labels[i], mock_labels[i]); + } +} + +// TEST_P(DetectionModelParameterizedTestSaveLoad, TestDetctionCorrectnessAfterSaveLoad) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// bool preload = true; +// auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); +// +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// +// auto result = model->infer(image)->objects; +// +// image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// auto model_restored = DetectionModel::create_model(TMP_MODEL_FILE, {}, "", preload, "CPU"); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->objects; +// +// ASSERT_EQ(result.size(), result_restored.size()); +// +// for (size_t i = 0; i < result.size(); i++) { +// ASSERT_EQ(result[i].x, result_restored[i].x); +// ASSERT_EQ(result[i].y, result_restored[i].y); +// ASSERT_EQ(result[i].width, result_restored[i].width); +// ASSERT_EQ(result[i].height, result_restored[i].height); +// } +// } +// +// TEST_P(DetectionModelParameterizedTestSaveLoad, TestDetctionCorrectnessAfterSaveLoadWithAdapter) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// bool preload = true; +// auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, "", preload, "CPU"); +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// auto result = model->infer(image)->objects; +// +// image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// std::shared_ptr adapter = std::make_shared(TMP_MODEL_FILE); +// auto model_restored = DetectionModel::create_model(adapter); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->objects; +// +// ASSERT_EQ(result.size(), result_restored.size()); +// +// for (size_t i = 0; i < result.size(); i++) { +// ASSERT_EQ(result[i].x, result_restored[i].x); +// ASSERT_EQ(result[i].y, result_restored[i].y); +// ASSERT_EQ(result[i].width, result_restored[i].width); +// ASSERT_EQ(result[i].height, result_restored[i].height); +// } +// } + +INSTANTIATE_TEST_SUITE_P(ClassificationTestInstance, + ClassificationModelParameterizedTest, + ::testing::Values(ModelData("mlc_mobilenetv3_large_voc"))); +// INSTANTIATE_TEST_SUITE_P(ClassificationTestInstance, +// ClassificationModelParameterizedTestSaveLoad, +// ::testing::Values(ModelData("mlc_mobilenetv3_large_voc"))); +INSTANTIATE_TEST_SUITE_P(SSDTestInstance, + SSDModelParameterizedTest, + ::testing::Values(ModelData("detection_model_with_xai_head"))); +// INSTANTIATE_TEST_SUITE_P(SSDTestInstance, +// DetectionModelParameterizedTestSaveLoad, +// ::testing::Values(ModelData("detection_model_with_xai_head"))); + +class InputParser { +public: + InputParser(int& argc, char** argv) { + for (int i = 1; i < argc; ++i) + this->tokens.push_back(std::string(argv[i])); + } + + const std::string& getCmdOption(const std::string& option) const { + std::vector::const_iterator itr; + itr = std::find(this->tokens.begin(), this->tokens.end(), option); + if (itr != this->tokens.end() && ++itr != this->tokens.end()) { + return *itr; + } + static const std::string empty_string(""); + return empty_string; + } + + bool cmdOptionExists(const std::string& option) const { + return std::find(this->tokens.begin(), this->tokens.end(), option) != this->tokens.end(); + } + +private: + std::vector tokens; +}; + +void print_help(const char* program_name) { + std::cout << "Usage: " << program_name << "-d " << std::endl; +} + +int main(int argc, char** argv) { + InputParser input(argc, argv); + + if (input.cmdOptionExists("-h")) { + print_help(argv[0]); + return 1; + } + + const std::string& data_dir = input.getCmdOption("-d"); + if (!data_dir.empty()) { + DATA_DIR = data_dir; + } else { + print_help(argv[0]); + return 1; + } + + testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/tests/cpp/precommit/test_sanity.cpp b/tests/cpp/precommit/test_sanity.cpp index 177520cd..106d1219 100644 --- a/tests/cpp/precommit/test_sanity.cpp +++ b/tests/cpp/precommit/test_sanity.cpp @@ -3,11 +3,11 @@ * SPDX-License-Identifier: Apache-2.0 */ #include +#include #include #include #include #include -#include #include #include From 8c1aa81121017ea714b6827593f84d672eb39128 Mon Sep 17 00:00:00 2001 From: Ronald Hecker Date: Tue, 1 Jul 2025 09:16:54 +0200 Subject: [PATCH 04/10] Enable precommit tests in github actions again --- .github/workflows/test_precommit.yml | 75 ++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/.github/workflows/test_precommit.yml b/.github/workflows/test_precommit.yml index ef61ca6f..96ac0268 100644 --- a/.github/workflows/test_precommit.yml +++ b/.github/workflows/test_precommit.yml @@ -51,6 +51,81 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN}} # missingInclude: cppcheck can't find stl, openvino, opencv other_options: --suppress=missingInclude -Isrc/cpp/models/include -Isrc/cpp/utils/include -Isrc/cpp/pipelines/include --check-config + CPP-Precommit: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.10" + cache: pip + - name: Create and start a virtual environment + run: | + python -m venv venv + source venv/bin/activate + - name: Install dependencies + run: | + source venv/bin/activate + python -m pip install --upgrade pip + pip install src/python/[tests,build] --extra-index-url https://download.pytorch.org/whl/cpu + + sudo bash src/cpp/install_dependencies.sh + - name: Prepare test data + run: | + source venv/bin/activate + python tests/cpp/precommit/prepare_data.py -d data -p tests/cpp/precommit/public_scope.json + - name: Build + run: | + mkdir build && cd build + pip install nanobind==2.4.0 + pip install typing_extensions==4.12.2 + cmake ../tests/cpp/precommit/ + cmake --build . -j $((`nproc`*2+2)) + - name: Run test + run: | + build/test_sanity -d data -p tests/cpp/precommit/public_scope.json && build/test_model_config -d data + CPP-Windows-Precommit: + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: pip + - name: Create and start a virtual environment + shell: bash + run: | + python -m venv venv + source venv/Scripts/activate + - name: Install dependencies + shell: bash + run: | + source venv/Scripts/activate + python -m pip install --upgrade pip + pip install src/python/[tests,build] --extra-index-url https://download.pytorch.org/whl/cpu + curl https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows/w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64.zip --output w_openvino_toolkit_windows.zip + unzip w_openvino_toolkit_windows.zip + rm w_openvino_toolkit_windows.zip + curl -L https://github.com/opencv/opencv/releases/download/4.10.0/opencv-4.10.0-windows.exe --output opencv-4.10.0-windows.exe + ./opencv-4.10.0-windows.exe -oopencv -y + rm opencv-4.10.0-windows.exe + - name: Prepare test data + shell: bash + run: | + source venv/Scripts/activate + python tests/cpp/precommit/prepare_data.py -d data -p tests/cpp/precommit/public_scope.json + - name: Build + shell: bash + run: | + mkdir build && cd build + MSYS_NO_PATHCONV=1 cmake ../examples/cpp/ -DOpenVINO_DIR=$GITHUB_WORKSPACE/w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64/runtime/cmake -DOpenCV_DIR=$GITHUB_WORKSPACE/opencv/opencv/build -DCMAKE_CXX_FLAGS=/WX + cmake --build . --config Release -j $((`nproc`*2+2)) + - name: Run sync sample + shell: cmd + # .\w_openvino_toolkit_windows_2023.0.0.10926.b4452d56304_x86_64\setupvars.bat exits with 0 code without moving to a next command. Set PATH manually + run: | + set PATH=opencv\opencv\build\x64\vc16\bin;w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64\runtime\bin\intel64\Release;w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64\runtime\3rdparty\tbb\bin;%PATH% + .\build\Release\synchronous_api.exe .\data\otx_models\detection_model_with_xai_head.xml .\data\BloodImage_00007.jpg serving_api: strategy: fail-fast: false From 4a5b964a76a84e63c45495fe93528365c21148e6 Mon Sep 17 00:00:00 2001 From: Ronald Hecker Date: Tue, 1 Jul 2025 10:04:14 +0200 Subject: [PATCH 05/10] Disable windows for now --- .github/workflows/test_precommit.yml | 42 ---------------------------- 1 file changed, 42 deletions(-) diff --git a/.github/workflows/test_precommit.yml b/.github/workflows/test_precommit.yml index 96ac0268..f1f18e09 100644 --- a/.github/workflows/test_precommit.yml +++ b/.github/workflows/test_precommit.yml @@ -84,48 +84,6 @@ jobs: - name: Run test run: | build/test_sanity -d data -p tests/cpp/precommit/public_scope.json && build/test_model_config -d data - CPP-Windows-Precommit: - runs-on: windows-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: 3.9 - cache: pip - - name: Create and start a virtual environment - shell: bash - run: | - python -m venv venv - source venv/Scripts/activate - - name: Install dependencies - shell: bash - run: | - source venv/Scripts/activate - python -m pip install --upgrade pip - pip install src/python/[tests,build] --extra-index-url https://download.pytorch.org/whl/cpu - curl https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows/w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64.zip --output w_openvino_toolkit_windows.zip - unzip w_openvino_toolkit_windows.zip - rm w_openvino_toolkit_windows.zip - curl -L https://github.com/opencv/opencv/releases/download/4.10.0/opencv-4.10.0-windows.exe --output opencv-4.10.0-windows.exe - ./opencv-4.10.0-windows.exe -oopencv -y - rm opencv-4.10.0-windows.exe - - name: Prepare test data - shell: bash - run: | - source venv/Scripts/activate - python tests/cpp/precommit/prepare_data.py -d data -p tests/cpp/precommit/public_scope.json - - name: Build - shell: bash - run: | - mkdir build && cd build - MSYS_NO_PATHCONV=1 cmake ../examples/cpp/ -DOpenVINO_DIR=$GITHUB_WORKSPACE/w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64/runtime/cmake -DOpenCV_DIR=$GITHUB_WORKSPACE/opencv/opencv/build -DCMAKE_CXX_FLAGS=/WX - cmake --build . --config Release -j $((`nproc`*2+2)) - - name: Run sync sample - shell: cmd - # .\w_openvino_toolkit_windows_2023.0.0.10926.b4452d56304_x86_64\setupvars.bat exits with 0 code without moving to a next command. Set PATH manually - run: | - set PATH=opencv\opencv\build\x64\vc16\bin;w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64\runtime\bin\intel64\Release;w_openvino_toolkit_windows_2024.6.0.17404.4c0f47d2335_x86_64\runtime\3rdparty\tbb\bin;%PATH% - .\build\Release\synchronous_api.exe .\data\otx_models\detection_model_with_xai_head.xml .\data\BloodImage_00007.jpg serving_api: strategy: fail-fast: false From 236ea05c7698f0e71bdd6d6e3bc4b8f1d79d9589 Mon Sep 17 00:00:00 2001 From: Ronald Hecker Date: Tue, 1 Jul 2025 10:19:08 +0200 Subject: [PATCH 06/10] Fix examples readme for pretteir --- examples/cpp/README.md | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/examples/cpp/README.md b/examples/cpp/README.md index a4cc5cfb..d391467f 100644 --- a/examples/cpp/README.md +++ b/examples/cpp/README.md @@ -18,28 +18,22 @@ This example demonstrates how to use a C++ API of OpenVINO Model API for synchro - Build example: - Create `build` folder and navigate into it: - ```bash mkdir build && cd build ``` - + - Run cmake: -- Run cmake: - - -```bash -cmake ../ -``` - - + ```bash + cmake ../ + ``` -- Build: + - Build: -```bash -make -j -``` + ```bash + make -j + ``` - Download a model by running a Python code with Model API, see Python [example](../../python/synchronous_api/README.md): From 164c7d085bd6d927ce0c5b0fed55283e0dd8c59a Mon Sep 17 00:00:00 2001 From: "Hecker, Ronald" Date: Wed, 2 Jul 2025 09:50:12 +0200 Subject: [PATCH 07/10] Cleanup comment and default arg in example --- examples/cpp/main.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/cpp/main.cpp b/examples/cpp/main.cpp index 5aba471c..fb18e8b5 100644 --- a/examples/cpp/main.cpp +++ b/examples/cpp/main.cpp @@ -30,8 +30,7 @@ int main(int argc, char* argv[]) try { } // Instantiate Object Detection model - auto model = - DetectionModel::create_model(argv[1], {}); // works with SSD models. Download it using Python Model API + auto model = DetectionModel::create_model(argv[1]); // Run the inference auto result = model.infer(image); From 413866651db9087e538ab973456dc84ade98c2b3 Mon Sep 17 00:00:00 2001 From: "Hecker, Ronald" Date: Wed, 2 Jul 2025 10:23:51 +0200 Subject: [PATCH 08/10] Undo changes to readme --- examples/cpp/README.md | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/examples/cpp/README.md b/examples/cpp/README.md index d391467f..8b5b196a 100644 --- a/examples/cpp/README.md +++ b/examples/cpp/README.md @@ -16,24 +16,29 @@ This example demonstrates how to use a C++ API of OpenVINO Model API for synchro ``` - Build example: - - Create `build` folder and navigate into it: + ```bash mkdir build && cd build ``` - - Run cmake: + - ```bash - cmake ../ - ``` +- Run cmake: + - - Build: +```bash +cmake ../ +``` - ```bash - make -j - ``` + + +- Build: + +```bash +make -j +``` - Download a model by running a Python code with Model API, see Python [example](../../python/synchronous_api/README.md): From b17dfdb75c7842cc1346c53f41dd6edcfcac14e0 Mon Sep 17 00:00:00 2001 From: "Hecker, Ronald" Date: Wed, 2 Jul 2025 13:58:10 +0200 Subject: [PATCH 09/10] README :( --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 25f654d1..fb997934 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,6 @@ Training Extensions embed all the metadata required for inference into model fil ``` - Build library: - - Create `build` folder and navigate into it: ```bash From 94846c685906c747975d883a8d0adf719b03d1fd Mon Sep 17 00:00:00 2001 From: "Hecker, Ronald" Date: Wed, 2 Jul 2025 13:59:51 +0200 Subject: [PATCH 10/10] Remove unneccesary pip installs --- .github/workflows/test_precommit.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/test_precommit.yml b/.github/workflows/test_precommit.yml index f1f18e09..cdf55e2f 100644 --- a/.github/workflows/test_precommit.yml +++ b/.github/workflows/test_precommit.yml @@ -77,8 +77,6 @@ jobs: - name: Build run: | mkdir build && cd build - pip install nanobind==2.4.0 - pip install typing_extensions==4.12.2 cmake ../tests/cpp/precommit/ cmake --build . -j $((`nproc`*2+2)) - name: Run test