diff --git a/.github/workflows/test_precommit.yml b/.github/workflows/test_precommit.yml index ef61ca6f..cdf55e2f 100644 --- a/.github/workflows/test_precommit.yml +++ b/.github/workflows/test_precommit.yml @@ -51,6 +51,37 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN}} # missingInclude: cppcheck can't find stl, openvino, opencv other_options: --suppress=missingInclude -Isrc/cpp/models/include -Isrc/cpp/utils/include -Isrc/cpp/pipelines/include --check-config + CPP-Precommit: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.10" + cache: pip + - name: Create and start a virtual environment + run: | + python -m venv venv + source venv/bin/activate + - name: Install dependencies + run: | + source venv/bin/activate + python -m pip install --upgrade pip + pip install src/python/[tests,build] --extra-index-url https://download.pytorch.org/whl/cpu + + sudo bash src/cpp/install_dependencies.sh + - name: Prepare test data + run: | + source venv/bin/activate + python tests/cpp/precommit/prepare_data.py -d data -p tests/cpp/precommit/public_scope.json + - name: Build + run: | + mkdir build && cd build + cmake ../tests/cpp/precommit/ + cmake --build . -j $((`nproc`*2+2)) + - name: Run test + run: | + build/test_sanity -d data -p tests/cpp/precommit/public_scope.json && build/test_model_config -d data serving_api: strategy: fail-fast: false diff --git a/examples/cpp/main.cpp b/examples/cpp/main.cpp index 47d2eaf9..fb18e8b5 100644 --- a/examples/cpp/main.cpp +++ b/examples/cpp/main.cpp @@ -30,7 +30,7 @@ int main(int argc, char* argv[]) try { } // Instantiate Object Detection model - auto model = DetectionModel::load(argv[1], {}); // works with SSD models. Download it using Python Model API + auto model = DetectionModel::create_model(argv[1]); // Run the inference auto result = model.infer(image); diff --git a/src/cpp/include/tasks/anomaly.h b/src/cpp/include/tasks/anomaly.h index 7e9a0d7c..949f8e56 100644 --- a/src/cpp/include/tasks/anomaly.h +++ b/src/cpp/include/tasks/anomaly.h @@ -18,7 +18,7 @@ class Anomaly { std::shared_ptr adapter; VisionPipeline pipeline; - Anomaly(std::shared_ptr adapter) : adapter(adapter) { + Anomaly(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -28,18 +28,22 @@ class Anomaly { return postprocess(result); }); - auto config = adapter->getModelConfig(); - image_threshold = utils::get_from_any_maps("image_threshold", config, {}, image_threshold); - pixel_threshold = utils::get_from_any_maps("pixel_threshold", config, {}, pixel_threshold); - normalization_scale = utils::get_from_any_maps("normalization_scale", config, {}, normalization_scale); - task = utils::get_from_any_maps("pixel_threshold", config, {}, task); - labels = utils::get_from_any_maps("labels", config, {}, labels); - input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width); - input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.height); + auto model_config = adapter->getModelConfig(); + image_threshold = utils::get_from_any_maps("image_threshold", user_config, model_config, image_threshold); + pixel_threshold = utils::get_from_any_maps("pixel_threshold", user_config, model_config, pixel_threshold); + normalization_scale = + utils::get_from_any_maps("normalization_scale", user_config, model_config, normalization_scale); + task = utils::get_from_any_maps("pixel_threshold", user_config, model_config, task); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width); + input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.height); } static void serialize(std::shared_ptr& ov_model); - static Anomaly load(const std::string& model_path); + static Anomaly create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); AnomalyResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/classification.h b/src/cpp/include/tasks/classification.h index 528de18e..bb3ecc8d 100644 --- a/src/cpp/include/tasks/classification.h +++ b/src/cpp/include/tasks/classification.h @@ -19,7 +19,7 @@ class Classification { std::shared_ptr adapter; VisionPipeline pipeline; - Classification(std::shared_ptr adapter) : adapter(adapter) { + Classification(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -29,16 +29,19 @@ class Classification { return postprocess(result); }); - auto config = adapter->getModelConfig(); - labels = utils::get_from_any_maps("labels", config, {}, labels); - - topk = utils::get_from_any_maps("topk", config, {}, topk); - multilabel = utils::get_from_any_maps("multilabel", config, {}, multilabel); - output_raw_scores = utils::get_from_any_maps("output_raw_scores", config, {}, output_raw_scores); - confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold); - hierarchical = utils::get_from_any_maps("hierarchical", config, {}, hierarchical); - hierarchical_config = utils::get_from_any_maps("hierarchical_config", config, {}, hierarchical_config); - hierarchical_postproc = utils::get_from_any_maps("hierarchical_postproc", config, {}, hierarchical_postproc); + auto model_config = adapter->getModelConfig(); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + + topk = utils::get_from_any_maps("topk", user_config, model_config, topk); + multilabel = utils::get_from_any_maps("multilabel", user_config, model_config, multilabel); + output_raw_scores = utils::get_from_any_maps("output_raw_scores", user_config, model_config, output_raw_scores); + confidence_threshold = + utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold); + hierarchical = utils::get_from_any_maps("hierarchical", user_config, model_config, hierarchical); + hierarchical_config = + utils::get_from_any_maps("hierarchical_config", user_config, model_config, hierarchical_config); + hierarchical_postproc = + utils::get_from_any_maps("hierarchical_postproc", user_config, model_config, hierarchical_postproc); if (hierarchical) { if (hierarchical_config.empty()) { throw std::runtime_error("Error: empty hierarchical classification config"); @@ -55,7 +58,10 @@ class Classification { } static void serialize(std::shared_ptr& ov_model); - static Classification load(const std::string& model_path); + static Classification create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); ClassificationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/detection.h b/src/cpp/include/tasks/detection.h index e5405133..dead9cba 100644 --- a/src/cpp/include/tasks/detection.h +++ b/src/cpp/include/tasks/detection.h @@ -18,10 +18,11 @@ class DetectionModel { public: std::unique_ptr> pipeline; + std::unique_ptr algorithm; - DetectionModel(std::unique_ptr algorithm, const ov::AnyMap& configuration) : algorithm(std::move(algorithm)) { + DetectionModel(std::unique_ptr algorithm, const ov::AnyMap& user_config) : algorithm(std::move(algorithm)) { auto config = this->algorithm->adapter->getModelConfig(); - if (configuration.count("tiling") && configuration.at("tiling").as()) { + if (user_config.count("tiling") && user_config.at("tiling").as()) { if (!utils::config_contains_tiling_info(config)) { throw std::runtime_error("Model config does not contain tiling properties."); } @@ -67,11 +68,11 @@ class DetectionModel { const std::vector& tile_coords, const utils::TilingInfo& tiling_info); - static DetectionModel load(const std::string& model_path, const ov::AnyMap& configuration = {}); + static DetectionModel create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); DetectionResult infer(cv::Mat image); std::vector inferBatch(std::vector image); - -private: - std::unique_ptr algorithm; }; diff --git a/src/cpp/include/tasks/instance_segmentation.h b/src/cpp/include/tasks/instance_segmentation.h index bc1905f1..513de093 100644 --- a/src/cpp/include/tasks/instance_segmentation.h +++ b/src/cpp/include/tasks/instance_segmentation.h @@ -18,7 +18,7 @@ class InstanceSegmentation { std::shared_ptr adapter; VisionPipeline pipeline; - InstanceSegmentation(std::shared_ptr adapter) : adapter(adapter) { + InstanceSegmentation(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -28,15 +28,19 @@ class InstanceSegmentation { return postprocess(result); }); - auto config = adapter->getModelConfig(); - labels = utils::get_from_any_maps("labels", config, {}, labels); - confidence_threshold = utils::get_from_any_maps("confidence_threshold", config, {}, confidence_threshold); - input_shape.width = utils::get_from_any_maps("orig_width", config, {}, input_shape.width); - input_shape.height = utils::get_from_any_maps("orig_height", config, {}, input_shape.width); + auto model_config = adapter->getModelConfig(); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + confidence_threshold = + utils::get_from_any_maps("confidence_threshold", user_config, model_config, confidence_threshold); + input_shape.width = utils::get_from_any_maps("orig_width", user_config, model_config, input_shape.width); + input_shape.height = utils::get_from_any_maps("orig_height", user_config, model_config, input_shape.width); } static void serialize(std::shared_ptr& ov_model); - static InstanceSegmentation load(const std::string& model_path); + static InstanceSegmentation create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); InstanceSegmentationResult infer(cv::Mat image); std::vector inferBatch(std::vector image); diff --git a/src/cpp/include/tasks/semantic_segmentation.h b/src/cpp/include/tasks/semantic_segmentation.h index 47d836dc..ee54409c 100644 --- a/src/cpp/include/tasks/semantic_segmentation.h +++ b/src/cpp/include/tasks/semantic_segmentation.h @@ -17,7 +17,7 @@ class SemanticSegmentation { public: VisionPipeline pipeline; std::shared_ptr adapter; - SemanticSegmentation(std::shared_ptr adapter) : adapter(adapter) { + SemanticSegmentation(std::shared_ptr adapter, const ov::AnyMap& user_config) : adapter(adapter) { pipeline = VisionPipeline( adapter, [&](cv::Mat image) { @@ -27,14 +27,17 @@ class SemanticSegmentation { return postprocess(result); }); - auto config = adapter->getModelConfig(); - labels = utils::get_from_any_maps("labels", config, {}, labels); - soft_threshold = utils::get_from_any_maps("soft_threshold", config, {}, soft_threshold); - blur_strength = utils::get_from_any_maps("blur_strength", config, {}, blur_strength); + auto model_config = adapter->getModelConfig(); + labels = utils::get_from_any_maps("labels", user_config, model_config, labels); + soft_threshold = utils::get_from_any_maps("soft_threshold", user_config, model_config, soft_threshold); + blur_strength = utils::get_from_any_maps("blur_strength", user_config, model_config, blur_strength); } static void serialize(std::shared_ptr& ov_model); - static SemanticSegmentation load(const std::string& model_path); + static SemanticSegmentation create_model(const std::string& model_path, + const ov::AnyMap& user_config = {}, + bool preload = true, + const std::string& device = "AUTO"); std::map preprocess(cv::Mat); SemanticSegmentationResult postprocess(InferenceResult& infResult); diff --git a/src/cpp/src/tasks/anomaly.cpp b/src/cpp/src/tasks/anomaly.cpp index 29f42d46..234b54ed 100644 --- a/src/cpp/src/tasks/anomaly.cpp +++ b/src/cpp/src/tasks/anomaly.cpp @@ -47,16 +47,20 @@ void Anomaly::serialize(std::shared_ptr& ov_model) { mean_values, scale_values); + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape[0], "model_info", "orig_width"); ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Anomaly Anomaly::load(const std::string& model_path) { +Anomaly Anomaly::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); if (!model_type.empty()) { std::cout << "has model type in info: " << model_type << std::endl; @@ -65,9 +69,11 @@ Anomaly Anomaly::load(const std::string& model_path) { } adapter->applyModelTransform(Anomaly::serialize); - adapter->compileModel("AUTO", {}); + if (preload) { + adapter->compileModel(device, user_config); + } - return Anomaly(adapter); + return Anomaly(adapter, user_config); } AnomalyResult Anomaly::infer(cv::Mat image) { diff --git a/src/cpp/src/tasks/classification.cpp b/src/cpp/src/tasks/classification.cpp index 3a74d341..208b8d38 100644 --- a/src/cpp/src/tasks/classification.cpp +++ b/src/cpp/src/tasks/classification.cpp @@ -176,24 +176,30 @@ void Classification::serialize(std::shared_ptr& ov_model) { addOrFindSoftmaxAndTopkOutputs(ov_model, topk, output_raw_scores); } + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape[0], "model_info", "orig_width"); ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } -Classification Classification::load(const std::string& model_path) { +Classification Classification::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); if (model_type.empty() || model_type != "Classification") { throw std::runtime_error("Incorrect or unsupported model_type, expected: Classification"); } adapter->applyModelTransform(Classification::serialize); - adapter->compileModel("AUTO", {}); + if (preload) { + adapter->compileModel(device, user_config); + } - return Classification(adapter); + return Classification(adapter, user_config); } ClassificationResult Classification::infer(cv::Mat image) { diff --git a/src/cpp/src/tasks/detection.cpp b/src/cpp/src/tasks/detection.cpp index e5f555c2..d4f5cfc6 100644 --- a/src/cpp/src/tasks/detection.cpp +++ b/src/cpp/src/tasks/detection.cpp @@ -13,21 +13,26 @@ #include "utils/nms.h" #include "utils/tensor.h" -DetectionModel DetectionModel::load(const std::string& model_path, const ov::AnyMap& configuration) { +DetectionModel DetectionModel::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), user_config, model_type); std::transform(model_type.begin(), model_type.end(), model_type.begin(), ::tolower); if (model_type.empty() || model_type != "ssd") { throw std::runtime_error("Incorrect or unsupported model_type, expected: ssd"); } adapter->applyModelTransform(SSD::serialize); - adapter->compileModel("AUTO", {}); + if (preload) { + adapter->compileModel(device, user_config); + } - return DetectionModel(std::make_unique(adapter), configuration); + return DetectionModel(std::make_unique(adapter), user_config); } InferenceInput DetectionModel::preprocess(cv::Mat image) { diff --git a/src/cpp/src/tasks/detection/ssd.cpp b/src/cpp/src/tasks/detection/ssd.cpp index 681e7d18..27f5497d 100644 --- a/src/cpp/src/tasks/detection/ssd.cpp +++ b/src/cpp/src/tasks/detection/ssd.cpp @@ -115,6 +115,7 @@ void SSD::serialize(std::shared_ptr& ov_model) { // prepareMultipleOutputs(ov_model); //This does nothing from what I can see. } + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape[0], "model_info", "orig_width"); ov_model->set_rt_info(input_shape[1], "model_info", "orig_height"); } diff --git a/src/cpp/src/tasks/instance_segmentation.cpp b/src/cpp/src/tasks/instance_segmentation.cpp index a98cbc32..12cb4405 100644 --- a/src/cpp/src/tasks/instance_segmentation.cpp +++ b/src/cpp/src/tasks/instance_segmentation.cpp @@ -186,24 +186,30 @@ void InstanceSegmentation::serialize(std::shared_ptr& ov_model) { saliency_map_name + ", " + feature_vector_name + " and 3 or 4 other outputs"); } + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape.width, "model_info", "orig_width"); ov_model->set_rt_info(input_shape.height, "model_info", "orig_height"); } -InstanceSegmentation InstanceSegmentation::load(const std::string& model_path) { +InstanceSegmentation InstanceSegmentation::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", user_config, adapter->getModelConfig(), model_type); if (model_type.empty() || model_type != "MaskRCNN") { throw std::runtime_error("Incorrect or unsupported model_type, expected: MaskRCNN"); } adapter->applyModelTransform(InstanceSegmentation::serialize); - adapter->compileModel("AUTO", {}); + if (preload) { + adapter->compileModel(device, user_config); + } - return InstanceSegmentation(adapter); + return InstanceSegmentation(adapter, user_config); } InstanceSegmentationResult InstanceSegmentation::infer(cv::Mat image) { diff --git a/src/cpp/src/tasks/semantic_segmentation.cpp b/src/cpp/src/tasks/semantic_segmentation.cpp index 588045ad..3b2386c5 100644 --- a/src/cpp/src/tasks/semantic_segmentation.cpp +++ b/src/cpp/src/tasks/semantic_segmentation.cpp @@ -20,20 +20,25 @@ cv::Mat get_activation_map(const cv::Mat& features) { return int_act_map; } -SemanticSegmentation SemanticSegmentation::load(const std::string& model_path) { +SemanticSegmentation SemanticSegmentation::create_model(const std::string& model_path, + const ov::AnyMap& user_config, + bool preload, + const std::string& device) { auto adapter = std::make_shared(); - adapter->loadModel(model_path, "", {}, false); + adapter->loadModel(model_path, device, user_config, false); std::string model_type; - model_type = utils::get_from_any_maps("model_type", adapter->getModelConfig(), {}, model_type); + model_type = utils::get_from_any_maps("model_type", user_config, adapter->getModelConfig(), model_type); if (model_type.empty() || model_type != "Segmentation") { throw std::runtime_error("Incorrect or unsupported model_type, expected: Segmentation"); } adapter->applyModelTransform(SemanticSegmentation::serialize); - adapter->compileModel("AUTO", {}); + if (preload) { + adapter->compileModel(device, user_config); + } - return SemanticSegmentation(adapter); + return SemanticSegmentation(adapter, user_config); } void SemanticSegmentation::serialize(std::shared_ptr& ov_model) { @@ -112,6 +117,7 @@ void SemanticSegmentation::serialize(std::shared_ptr& ov_model) { ov_model = ppp.build(); cv::Size input_shape(shape[ov::layout::width_idx(layout)], shape[ov::layout::height_idx(layout)]); + ov_model->set_rt_info(true, "model_info", "embedded_processing"); ov_model->set_rt_info(input_shape.width, "model_info", "orig_width"); ov_model->set_rt_info(input_shape.height, "model_info", "orig_height"); } diff --git a/tests/cpp/CMakeLists.txt b/tests/cpp/CMakeLists.txt index 90c1b0ab..919dcec5 100644 --- a/tests/cpp/CMakeLists.txt +++ b/tests/cpp/CMakeLists.txt @@ -20,6 +20,3 @@ find_package(OpenCV REQUIRED COMPONENTS imgcodecs) add_executable(test_accuracy test_accuracy.cpp) target_link_libraries(test_accuracy gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) - -#include(GoogleTest) -#gtest_discover_tests(test_accuracy) diff --git a/tests/cpp/precommit/CMakeLists.txt b/tests/cpp/precommit/CMakeLists.txt new file mode 100644 index 00000000..ce79f5eb --- /dev/null +++ b/tests/cpp/precommit/CMakeLists.txt @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.26) + + +project(tests) + +add_subdirectory(../../../src/cpp ${tests_BINARY_DIR}/src/cpp) + +include(FetchContent) + +FetchContent_Declare(json GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG d41ca94fa85d5119852e2f7a3f94335cc7cb0486 # PR #4709, fixes cmake deprecation warnings + ) +FetchContent_Declare(googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG a7f443b80b105f940225332ed3c31f2790092f47 # latest main +) +FetchContent_MakeAvailable(json googletest) + +find_package(OpenCV REQUIRED COMPONENTS imgcodecs) + +add_executable(test_sanity test_sanity.cpp) +target_link_libraries(test_sanity gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) + +add_executable(test_model_config test_model_config.cpp) +target_link_libraries(test_model_config gtest_main nlohmann_json::nlohmann_json model_api ${OpenCV_LIBRARIES}) diff --git a/tests/cpp/precommit/test_model_config.cpp b/tests/cpp/precommit/test_model_config.cpp new file mode 100644 index 00000000..43cd0a88 --- /dev/null +++ b/tests/cpp/precommit/test_model_config.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2020-2024 Intel Corporation + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils/config.h" + +using json = nlohmann::json; + +std::string DATA_DIR = "../data"; +std::string MODEL_PATH_TEMPLATE = "otx_models/%s.xml"; +std::string IMAGE_PATH = "coco128/images/train2017/000000000074.jpg"; + +std::string TMP_MODEL_FILE = "tmp_model.xml"; + +struct ModelData { + std::string name; + ModelData(const std::string& name) : name(name) {} +}; + +class MockAdapter : public OpenVINOInferenceAdapter { +public: + MockAdapter(const std::string& modelPath) : OpenVINOInferenceAdapter() { + loadModel(modelPath, "CPU"); + } +}; + +class ClassificationModelParameterizedTest : public testing::TestWithParam {}; + +class SSDModelParameterizedTest : public testing::TestWithParam {}; + +class ClassificationModelParameterizedTestSaveLoad : public testing::TestWithParam { +protected: + void TearDown() override { + auto fileName = TMP_MODEL_FILE; + std::remove(fileName.c_str()); + std::remove(fileName.replace(fileName.end() - 4, fileName.end(), ".bin").c_str()); + } +}; + +class DetectionModelParameterizedTestSaveLoad : public ClassificationModelParameterizedTestSaveLoad {}; + +template +std::string string_format(const std::string& fmt, Args... args) { + size_t size = snprintf(nullptr, 0, fmt.c_str(), args...); + std::string buf; + buf.reserve(size + 1); + buf.resize(size); + snprintf(&buf[0], size + 1, fmt.c_str(), args...); + return buf; +} + +TEST_P(ClassificationModelParameterizedTest, TestClassificationDefaultConfig) { + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + bool preload = true; + auto model = Classification::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + + auto config = model.adapter->getModelConfig(); + + std::string model_type; + model_type = utils::get_from_any_maps("model_type", config, {}, model_type); + EXPECT_EQ(model_type, "Classification"); + + bool embedded_processing; + embedded_processing = utils::get_from_any_maps("embedded_processing", config, {}, embedded_processing); + EXPECT_TRUE(embedded_processing); +} + +TEST_P(ClassificationModelParameterizedTest, TestClassificationCustomConfig) { + GTEST_SKIP() << "Classification config tests fail on CI"; + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + std::vector mock_labels; + size_t num_classes = 1000; + for (size_t i = 0; i < num_classes; i++) { + mock_labels.push_back(std::to_string(i)); + } + ov::AnyMap configuration = {{"layout", "data:HWC"}, {"resize_type", "fit_to_window"}, {"labels", mock_labels}}; + bool preload = true; + auto model = Classification::create_model(DATA_DIR + "/" + model_path, configuration, preload, "CPU"); + + auto config = model.adapter->getModelConfig(); + std::string layout; + layout = utils::get_from_any_maps("layout", config, {}, layout); + EXPECT_EQ(layout, configuration.at("layout").as()); + + std::string resize_type; + resize_type = utils::get_from_any_maps("resize_type", config, {}, resize_type); + EXPECT_EQ(resize_type, configuration.at("resize_type").as()); + + std::vector labels; + labels = utils::get_from_any_maps("labels", config, {}, labels); + for (size_t i = 0; i < num_classes; i++) { + EXPECT_EQ(labels[i], mock_labels[i]); + } +} + +// TEST_P(ClassificationModelParameterizedTestSaveLoad, TestClassificationCorrectnessAfterSaveLoad) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// std::cout << model_path << "\n"; +// bool preload = true; +// auto model = Classification::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); +// +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// +// auto result = model->infer(image)->topLabels; +// +// auto model_restored = ClassificationModel::create_model(TMP_MODEL_FILE, {}, preload, "CPU"); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->topLabels; +// +// EXPECT_EQ(result_restored[0].id, result[0].id); +// EXPECT_EQ(result_restored[0].score, result[0].score); +// } +// +// TEST_P(ClassificationModelParameterizedTestSaveLoad, TestClassificationCorrectnessAfterSaveLoadWithAdapter) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// bool preload = true; +// auto model = ClassificationModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// auto result = model->infer(image)->topLabels; +// +// std::shared_ptr adapter = std::make_shared(TMP_MODEL_FILE); +// auto model_restored = ClassificationModel::create_model(adapter); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->topLabels; +// +// EXPECT_EQ(result_restored[0].id, result[0].id); +// EXPECT_EQ(result_restored[0].score, result[0].score); +// } + +TEST_P(SSDModelParameterizedTest, TestDetectionDefaultConfig) { + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + bool preload = true; + auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + + auto config = model.algorithm->adapter->getModelConfig(); + + std::string model_type; + model_type = utils::get_from_any_maps("model_type", config, {}, model_type); + EXPECT_EQ(model_type, "ssd"); + + bool embedded_processing; + embedded_processing = utils::get_from_any_maps("embedded_processing", config, {}, embedded_processing); + EXPECT_TRUE(embedded_processing); +} + +TEST_P(SSDModelParameterizedTest, TestDetectionCustomConfig) { + GTEST_SKIP() << "Detection config tests fail on CI"; + auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); + std::vector mock_labels; + size_t num_classes = 80; + for (size_t i = 0; i < num_classes; i++) { + mock_labels.push_back(std::to_string(i)); + } + ov::AnyMap configuration = {{"layout", "data:HWC"}, {"resize_type", "fit_to_window"}, {"labels", mock_labels}}; + bool preload = true; + auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, configuration, preload, "CPU"); + + auto config = model.algorithm->adapter->getModelConfig(); + std::string layout; + layout = utils::get_from_any_maps("layout", config, {}, layout); + EXPECT_EQ(layout, configuration.at("layout").as()); + + std::string resize_type; + resize_type = utils::get_from_any_maps("resize_type", config, {}, resize_type); + EXPECT_EQ(resize_type, configuration.at("resize_type").as()); + + std::vector labels; + labels = utils::get_from_any_maps("labels", config, {}, labels); + for (size_t i = 0; i < num_classes; i++) { + EXPECT_EQ(labels[i], mock_labels[i]); + } +} + +// TEST_P(DetectionModelParameterizedTestSaveLoad, TestDetctionCorrectnessAfterSaveLoad) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// bool preload = true; +// auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); +// +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// +// auto result = model->infer(image)->objects; +// +// image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// auto model_restored = DetectionModel::create_model(TMP_MODEL_FILE, {}, "", preload, "CPU"); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->objects; +// +// ASSERT_EQ(result.size(), result_restored.size()); +// +// for (size_t i = 0; i < result.size(); i++) { +// ASSERT_EQ(result[i].x, result_restored[i].x); +// ASSERT_EQ(result[i].y, result_restored[i].y); +// ASSERT_EQ(result[i].width, result_restored[i].width); +// ASSERT_EQ(result[i].height, result_restored[i].height); +// } +// } +// +// TEST_P(DetectionModelParameterizedTestSaveLoad, TestDetctionCorrectnessAfterSaveLoadWithAdapter) { +// cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// auto model_path = string_format(MODEL_PATH_TEMPLATE, GetParam().name.c_str(), GetParam().name.c_str()); +// bool preload = true; +// auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, "", preload, "CPU"); +// auto ov_model = model->getModel(); +// ov::serialize(ov_model, TMP_MODEL_FILE); +// auto result = model->infer(image)->objects; +// +// image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); +// if (!image.data) { +// throw std::runtime_error{"Failed to read the image"}; +// } +// +// std::shared_ptr adapter = std::make_shared(TMP_MODEL_FILE); +// auto model_restored = DetectionModel::create_model(adapter); +// auto result_data = model_restored->infer(image); +// auto result_restored = result_data->objects; +// +// ASSERT_EQ(result.size(), result_restored.size()); +// +// for (size_t i = 0; i < result.size(); i++) { +// ASSERT_EQ(result[i].x, result_restored[i].x); +// ASSERT_EQ(result[i].y, result_restored[i].y); +// ASSERT_EQ(result[i].width, result_restored[i].width); +// ASSERT_EQ(result[i].height, result_restored[i].height); +// } +// } + +INSTANTIATE_TEST_SUITE_P(ClassificationTestInstance, + ClassificationModelParameterizedTest, + ::testing::Values(ModelData("mlc_mobilenetv3_large_voc"))); +// INSTANTIATE_TEST_SUITE_P(ClassificationTestInstance, +// ClassificationModelParameterizedTestSaveLoad, +// ::testing::Values(ModelData("mlc_mobilenetv3_large_voc"))); +INSTANTIATE_TEST_SUITE_P(SSDTestInstance, + SSDModelParameterizedTest, + ::testing::Values(ModelData("detection_model_with_xai_head"))); +// INSTANTIATE_TEST_SUITE_P(SSDTestInstance, +// DetectionModelParameterizedTestSaveLoad, +// ::testing::Values(ModelData("detection_model_with_xai_head"))); + +class InputParser { +public: + InputParser(int& argc, char** argv) { + for (int i = 1; i < argc; ++i) + this->tokens.push_back(std::string(argv[i])); + } + + const std::string& getCmdOption(const std::string& option) const { + std::vector::const_iterator itr; + itr = std::find(this->tokens.begin(), this->tokens.end(), option); + if (itr != this->tokens.end() && ++itr != this->tokens.end()) { + return *itr; + } + static const std::string empty_string(""); + return empty_string; + } + + bool cmdOptionExists(const std::string& option) const { + return std::find(this->tokens.begin(), this->tokens.end(), option) != this->tokens.end(); + } + +private: + std::vector tokens; +}; + +void print_help(const char* program_name) { + std::cout << "Usage: " << program_name << "-d " << std::endl; +} + +int main(int argc, char** argv) { + InputParser input(argc, argv); + + if (input.cmdOptionExists("-h")) { + print_help(argv[0]); + return 1; + } + + const std::string& data_dir = input.getCmdOption("-d"); + if (!data_dir.empty()) { + DATA_DIR = data_dir; + } else { + print_help(argv[0]); + return 1; + } + + testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/tests/cpp/precommit/test_sanity.cpp b/tests/cpp/precommit/test_sanity.cpp new file mode 100644 index 00000000..106d1219 --- /dev/null +++ b/tests/cpp/precommit/test_sanity.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2020-2024 Intel Corporation + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +std::string PUBLIC_SCOPE_PATH = "../../tests/cpp/precommit/public_scope.json"; +std::string DATA_DIR = "../data"; +std::string MODEL_PATH_TEMPLATE = "public/%s/FP16/%s.xml"; +std::string IMAGE_PATH = "coco128/images/train2017/000000000074.jpg"; + +struct ModelData { + std::string name; + std::string type; +}; + +class ModelParameterizedTest : public testing::TestWithParam {}; + +template +std::string string_format(const std::string& fmt, Args... args) { + size_t size = snprintf(nullptr, 0, fmt.c_str(), args...); + std::string buf; + buf.reserve(size + 1); + buf.resize(size); + snprintf(&buf[0], size + 1, fmt.c_str(), args...); + return buf; +} + +inline void from_json(const nlohmann::json& j, ModelData& test) { + test.name = j.at("name").get(); + test.type = j.at("type").get(); +} + +std::vector GetTestData(const std::string& path) { + std::ifstream input(path); + nlohmann::json j; + input >> j; + return j; +} +TEST_P(ModelParameterizedTest, SynchronousInference) { + cv::Mat image = cv::imread(DATA_DIR + "/" + IMAGE_PATH); + if (!image.data) { + throw std::runtime_error{"Failed to read the image"}; + } + + std::string model_path; + const std::string& name = GetParam().name; + if (name.substr(name.size() - 4) == ".xml") { + model_path = name; + } else { + model_path = string_format(MODEL_PATH_TEMPLATE, name.c_str(), name.c_str()); + } + + if ("DetectionModel" == GetParam().type) { + bool preload = true; + auto model = DetectionModel::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + auto result = model.infer(image); + EXPECT_GT(result.objects.size(), 0); + } else if ("ClassificationModel" == GetParam().type) { + bool preload = true; + auto model = Classification::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + auto result = model.infer(image); + ASSERT_GT(result.topLabels.size(), 0); + EXPECT_GT(result.topLabels.front().score, 0.0f); + } else if ("SegmentationModel" == GetParam().type) { + bool preload = true; + auto model = SemanticSegmentation::create_model(DATA_DIR + "/" + model_path, {}, preload, "CPU"); + auto result = model.infer(image); + ASSERT_GT(model.getContours(result).size(), 0); + } +} + +INSTANTIATE_TEST_SUITE_P(TestSanityPublic, ModelParameterizedTest, testing::ValuesIn(GetTestData(PUBLIC_SCOPE_PATH))); + +class InputParser { +public: + InputParser(int& argc, char** argv) { + for (int i = 1; i < argc; ++i) + this->tokens.push_back(std::string(argv[i])); + } + + const std::string& getCmdOption(const std::string& option) const { + std::vector::const_iterator itr; + itr = std::find(this->tokens.begin(), this->tokens.end(), option); + if (itr != this->tokens.end() && ++itr != this->tokens.end()) { + return *itr; + } + static const std::string empty_string(""); + return empty_string; + } + + bool cmdOptionExists(const std::string& option) const { + return std::find(this->tokens.begin(), this->tokens.end(), option) != this->tokens.end(); + } + +private: + std::vector tokens; +}; + +void print_help(const char* program_name) { + std::cout << "Usage: " << program_name << " -p -d " << std::endl; +} + +int main(int argc, char** argv) { + InputParser input(argc, argv); + + if (input.cmdOptionExists("-h")) { + print_help(argv[0]); + return 1; + } + const std::string& public_scope = input.getCmdOption("-p"); + if (!public_scope.empty()) { + PUBLIC_SCOPE_PATH = public_scope; + } else { + print_help(argv[0]); + return 1; + } + const std::string& data_dir = input.getCmdOption("-d"); + if (!data_dir.empty()) { + DATA_DIR = data_dir; + } else { + print_help(argv[0]); + return 1; + } + + testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/tests/cpp/test_accuracy.cpp b/tests/cpp/test_accuracy.cpp index 24b2c908..ae80dfd8 100644 --- a/tests/cpp/test_accuracy.cpp +++ b/tests/cpp/test_accuracy.cpp @@ -86,7 +86,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { if (data.type == "DetectionModel") { auto use_tiling = !data.input_res.empty(); - auto model = DetectionModel::load(model_path, {{"tiling", use_tiling}}); + auto model = DetectionModel::create_model(model_path, {{"tiling", use_tiling}}); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -99,7 +99,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { } } else if (data.type == "SegmentationModel") { - auto model = SemanticSegmentation::load(model_path); + auto model = SemanticSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -109,7 +109,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "MaskRCNNModel") { - auto model = InstanceSegmentation::load(model_path); + auto model = InstanceSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -119,7 +119,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "ClassificationModel") { - auto model = Classification::load(model_path); + auto model = Classification::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -127,7 +127,7 @@ TEST_P(ModelParameterizedTest, AccuracyTest) { EXPECT_EQ(std::string{result}, test_data.reference[0]); } } else if (data.type == "AnomalyDetection") { - auto model = Anomaly::load(model_path); + auto model = Anomaly::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -153,7 +153,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { if (data.type == "DetectionModel") { auto use_tiling = !data.input_res.empty(); - auto model = DetectionModel::load(model_path, {{"tiling", use_tiling}}); + auto model = DetectionModel::create_model(model_path, {{"tiling", use_tiling}}); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -164,7 +164,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(std::string{result}, test_data.reference[0]); } } else if (data.type == "SegmentationModel") { - auto model = SemanticSegmentation::load(model_path); + auto model = SemanticSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -174,7 +174,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "MaskRCNNModel") { - auto model = InstanceSegmentation::load(model_path); + auto model = InstanceSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -184,7 +184,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(format_test_output_to_string(model, result), test_data.reference[0]); } } else if (data.type == "ClassificationModel") { - auto model = Classification::load(model_path); + auto model = Classification::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -193,7 +193,7 @@ TEST_P(ModelParameterizedTest, SerializedAccuracyTest) { EXPECT_EQ(std::string{result}, test_data.reference[0]); } } else if (data.type == "AnomalyDetection") { - auto model = Anomaly::load(model_path); + auto model = Anomaly::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; cv::Mat image = cv::imread(image_path); @@ -213,7 +213,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { if (data.type == "DetectionModel") { auto use_tiling = !data.input_res.empty(); - auto model = DetectionModel::load(model_path, {{"tiling", use_tiling}}); + auto model = DetectionModel::create_model(model_path, {{"tiling", use_tiling}}); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -226,7 +226,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(std::string{result[0]}, test_data.reference[0]); } } else if (data.type == "SegmentationModel") { - auto model = SemanticSegmentation::load(model_path); + auto model = SemanticSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -236,7 +236,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(format_test_output_to_string(model, result[0]), test_data.reference[0]); } } else if (data.type == "MaskRCNNModel") { - auto model = InstanceSegmentation::load(model_path); + auto model = InstanceSegmentation::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -247,7 +247,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(format_test_output_to_string(model, result[0]), test_data.reference[0]); } } else if (data.type == "ClassificationModel") { - auto model = Classification::load(model_path); + auto model = Classification::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image; @@ -258,7 +258,7 @@ TEST_P(ModelParameterizedTest, AccuracyTestBatch) { EXPECT_EQ(std::string{result[0]}, test_data.reference[0]); } } else if (data.type == "AnomalyDetection") { - auto model = Anomaly::load(model_path); + auto model = Anomaly::create_model(model_path); for (auto& test_data : data.test_data) { std::string image_path = DATA_DIR + '/' + test_data.image;