Skip to content

Commit ffb9fb2

Browse files
authored
Merge pull request #3104 from ivikhrev/ov2.0-cpp-model-api
OV2.0 c++ modelAPI demos
2 parents bed5932 + 37817a8 commit ffb9fb2

File tree

75 files changed

+2232
-2093
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

75 files changed

+2232
-2093
lines changed

demos/background_subtraction_demo/cpp_gapi/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,15 +122,15 @@ int main(int argc, char *argv[]) {
122122
});
123123

124124
/** Configure network **/
125-
auto config = ConfigFactory::getUserConfig(FLAGS_d, "", "", FLAGS_nireq,
125+
auto config = ConfigFactory::getUserConfig(FLAGS_d, FLAGS_nireq,
126126
FLAGS_nstreams, FLAGS_nthreads);
127127
const auto net = cv::gapi::ie::Params<cv::gapi::Generic> {
128128
model->getName(),
129129
FLAGS_m, // path to topology IR
130130
fileNameNoExt(FLAGS_m) + ".bin", // path to weights
131131
FLAGS_d // device specifier
132132
}.cfgNumRequests(config.maxAsyncRequests)
133-
.pluginConfig(config.execNetworkConfig);
133+
.pluginConfig(config.getLegacyConfig());
134134
slog::info << "The background matting model " << FLAGS_m << " is loaded to " << FLAGS_d << " device." << slog::endl;
135135

136136
auto kernels = cv::gapi::combine(custom::kernels(),

demos/classification_benchmark_demo/cpp/README.md

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -119,33 +119,7 @@ and `<omz_dir>/data/dataset_classes/imagenet_2012.txt` labels file with all othe
119119

120120
## Running
121121

122-
Running the application with the `-h` option yields the following usage message:
123-
124-
```
125-
classification_benchmark_demo [OPTION]
126-
Options:
127-
128-
-h Print a usage message.
129-
-i "<path>" Required. Path to a folder with images or path to an image file.
130-
-m "<path>" Required. Path to an .xml file with a trained model.
131-
-l "<absolute_path>" Required for CPU custom layers.Absolute path to a shared library with the kernels implementation.
132-
Or
133-
-c "<absolute_path>" Required for GPU custom kernels. Absolute path to the .xml file with kernels description.
134-
-auto_resize Optional. Enables resizable input.
135-
-labels "<path>" Required. Path to .txt file with labels.
136-
-gt "<path>" Optional. Path to ground truth .txt file.
137-
-d "<device>" Optional. Specify the target device to infer on (the list of available devices is shown below). Default value is CPU. The demo will look for a suitable plugin for device specified.
138-
-nthreads "<integer>" Optional. Specify count of threads.
139-
-nstreams "<integer>" Optional. Specify count of streams.
140-
-nireq "<integer>" Optional. Number of infer requests.
141-
-nt "<integer>" Optional. Number of top results. Default value is 5. Must be >= 1.
142-
-res "<WxH>" Optional. Set image grid resolution in format WxH. Default value is 1280x720.
143-
-no_show Optional. Disable showing of processed images.
144-
-time "<integer>" Optional. Time in seconds to execute program. Default is -1 (infinite time).
145-
-u Optional. List of monitors to show initially.
146-
```
147-
148-
Running the application with the empty list of options yields an error message.
122+
Running the demo with the `-h` option yields a usage message.
149123

150124
The number of `InferRequest`s is specified by -nireq flag. Each `InferRequest` acts as a "buffer": it waits in queue before being filled with images and sent for inference, then after the inference completes, it waits in queue until its results are processed. Increasing the number of `InferRequest`s usually increases performance, because in that case multiple `InferRequest`s can be processed simultaneously if the device supports parallelization. However, big number of `InferRequest`s increases latency because each image still needs to wait in queue.
151125

demos/classification_benchmark_demo/cpp/main.cpp

Lines changed: 28 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,37 @@
1-
// Copyright (C) 2018-2019 Intel Corporation
1+
// Copyright (C) 2020-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

5-
#include <vector>
6-
#include <queue>
7-
#include <memory>
8-
#include <string>
95
#include <chrono>
10-
#include <condition_variable>
11-
#include <mutex>
12-
#include <cstdio>
13-
#include <functional>
14-
#include <atomic>
6+
#include <fstream>
7+
#include <iostream>
8+
#include <limits>
9+
#include <string>
10+
#include <vector>
1511

16-
#include <inference_engine.hpp>
1712
#include <gflags/gflags.h>
13+
#include <opencv2/opencv.hpp>
14+
#include <openvino/openvino.hpp>
15+
16+
#include <models/classification_model.h>
17+
#include <models/results.h>
1818
#include <pipelines/async_pipeline.h>
1919
#include <pipelines/metadata.h>
20-
#include <models/classification_model.h>
21-
#include <utils/common.hpp>
22-
#include <utils/slog.hpp>
20+
2321
#include <utils/args_helper.hpp>
22+
#include <utils/common.hpp>
2423
#include <utils/ocv_common.hpp>
2524
#include <utils/performance_metrics.hpp>
25+
#include <utils/slog.hpp>
2626

2727
#include "grid_mat.hpp"
2828

2929
static const char help_message[] = "Print a usage message.";
3030
static const char image_message[] = "Required. Path to a folder with images or path to an image file.";
3131
static const char model_message[] = "Required. Path to an .xml file with a trained model.";
3232
static const char labels_message[] = "Required. Path to .txt file with labels.";
33+
static const char layout_message[] = "Optional. Specify inputs layouts."
34+
" Ex. \"[NCHW]\" or \"input1[NCHW],input2[NC]\" in case of more than one input.";
3335
static const char gt_message[] = "Optional. Path to ground truth .txt file.";
3436
static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available "
3537
"devices is shown below). Default value is CPU. "
@@ -40,10 +42,6 @@ static const char num_inf_req_message[] = "Optional. Number of infer requests.";
4042
static const char image_grid_resolution_message[] = "Optional. Set image grid resolution in format WxH. "
4143
"Default value is 1280x720.";
4244
static const char ntop_message[] = "Optional. Number of top results. Default value is 5. Must be >= 1.";
43-
static const char custom_cldnn_message[] = "Required for GPU custom kernels. "
44-
"Absolute path to the .xml file with kernels description.";
45-
static const char custom_cpu_library_message[] = "Required for CPU custom layers."
46-
"Absolute path to a shared library with the kernels implementation.";
4745
static const char input_resizable_message[] = "Optional. Enables resizable input.";
4846
static const char no_show_message[] = "Optional. Disable showing of processed images.";
4947
static const char execution_time_message[] = "Optional. Time in seconds to execute program. "
@@ -54,15 +52,14 @@ DEFINE_bool(h, false, help_message);
5452
DEFINE_string(i, "", image_message);
5553
DEFINE_string(m, "", model_message);
5654
DEFINE_string(labels, "", labels_message);
55+
DEFINE_string(layout, "", layout_message);
5756
DEFINE_string(gt, "", gt_message);
5857
DEFINE_string(d, "CPU", target_device_message);
5958
DEFINE_uint32(nthreads, 0, num_threads_message);
6059
DEFINE_string(nstreams, "", num_streams_message);
6160
DEFINE_uint32(nireq, 0, num_inf_req_message);
6261
DEFINE_uint32(nt, 5, ntop_message);
6362
DEFINE_string(res, "1280x720", image_grid_resolution_message);
64-
DEFINE_string(c, "", custom_cldnn_message);
65-
DEFINE_string(l, "", custom_cpu_library_message);
6663
DEFINE_bool(auto_resize, false, input_resizable_message);
6764
DEFINE_bool(no_show, false, no_show_message);
6865
DEFINE_uint32(time, std::numeric_limits<gflags::uint32>::max(), execution_time_message);
@@ -76,11 +73,9 @@ static void showUsage() {
7673
std::cout << " -h " << help_message << std::endl;
7774
std::cout << " -i \"<path>\" " << image_message << std::endl;
7875
std::cout << " -m \"<path>\" " << model_message << std::endl;
79-
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
80-
std::cout << " Or" << std::endl;
81-
std::cout << " -c \"<absolute_path>\" " << custom_cldnn_message << std::endl;
8276
std::cout << " -auto_resize " << input_resizable_message << std::endl;
8377
std::cout << " -labels \"<path>\" " << labels_message << std::endl;
78+
std::cout << " -layout \"<string>\" " << layout_message << std::endl;
8479
std::cout << " -gt \"<path>\" " << gt_message << std::endl;
8580
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
8681
std::cout << " -nthreads \"<integer>\" " << num_threads_message << std::endl;
@@ -165,11 +160,12 @@ int main(int argc, char *argv[]) {
165160
if (!FLAGS_gt.empty()) {
166161
std::map<std::string, unsigned> classIndicesMap;
167162
std::ifstream inputGtFile(FLAGS_gt);
168-
if (!inputGtFile.is_open()) throw std::runtime_error("Can't open the ground truth file.");
163+
if (!inputGtFile.is_open()) {
164+
throw std::runtime_error("Can't open the ground truth file.");
165+
}
169166

170167
std::string line;
171-
while (std::getline(inputGtFile, line))
172-
{
168+
while (std::getline(inputGtFile, line)) {
173169
size_t separatorIdx = line.find(' ');
174170
if (separatorIdx == std::string::npos) {
175171
throw std::runtime_error("The ground truth file has incorrect format.");
@@ -196,7 +192,7 @@ int main(int argc, char *argv[]) {
196192
std::fill(classIndices.begin(), classIndices.end(), 0);
197193
}
198194

199-
//------------------------------ Running Detection routines ----------------------------------------------
195+
//------------------------------ Running routines ----------------------------------------------
200196
std::vector<std::string> labels = ClassificationModel::loadLabels(FLAGS_labels);
201197
for (const auto & classIndex : classIndices) {
202198
if (classIndex >= labels.size()) {
@@ -205,11 +201,11 @@ int main(int argc, char *argv[]) {
205201
}
206202
}
207203

208-
slog::info << *InferenceEngine::GetInferenceEngineVersion() << slog::endl;
209-
InferenceEngine::Core core;
210-
AsyncPipeline pipeline(std::unique_ptr<ModelBase>(new ClassificationModel(FLAGS_m, FLAGS_nt, FLAGS_auto_resize, labels)),
211-
ConfigFactory::getUserConfig(FLAGS_d, FLAGS_l, FLAGS_c, FLAGS_nireq, FLAGS_nstreams, FLAGS_nthreads),
212-
core);
204+
slog::info << ov::get_openvino_version() << slog::endl;
205+
ov::Core core;
206+
207+
AsyncPipeline pipeline(std::unique_ptr<ModelBase>(new ClassificationModel(FLAGS_m, FLAGS_nt, FLAGS_auto_resize, labels, FLAGS_layout)),
208+
ConfigFactory::getUserConfig(FLAGS_d, FLAGS_nireq, FLAGS_nstreams, FLAGS_nthreads), core);
213209

214210
Presenter presenter(FLAGS_u, 0);
215211
int width;

demos/common/cpp/models/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@ source_group("include" FILES ${HEADERS})
1212

1313
add_library(models STATIC ${SOURCES} ${HEADERS})
1414
target_include_directories(models PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
15-
target_link_libraries(models PRIVATE ngraph::ngraph gflags ${InferenceEngine_LIBRARIES} utils opencv_core opencv_imgproc)
15+
target_link_libraries(models PRIVATE gflags ${InferenceEngine_LIBRARIES} utils opencv_core opencv_imgproc)

demos/common/cpp/models/include/models/associative_embedding_decoder.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
// Copyright (C) 2021 Intel Corporation
2+
// Copyright (C) 2021-2022 Intel Corporation
33
//
44
// Licensed under the Apache License, Version 2.0 (the "License");
55
// you may not use this file except in compliance with the License.
@@ -13,10 +13,9 @@
1313
// See the License for the specific language governing permissions and
1414
// limitations under the License.
1515
*/
16-
#pragma once
17-
#include "opencv2/core.hpp"
18-
#include "results.h"
1916

17+
#pragma once
18+
#include <opencv2/core.hpp>
2019

2120
struct Peak {
2221
explicit Peak(const cv::Point2f& keypoint = cv::Point2f(-1, -1),
@@ -31,7 +30,6 @@ struct Peak {
3130
float tag;
3231
};
3332

34-
3533
class Pose {
3634
public:
3735
explicit Pose(size_t numJoints) : peaks(numJoints) {}

demos/common/cpp/models/include/models/classification_model.h

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
// Copyright (C) 2018-2020 Intel Corporation
2+
// Copyright (C) 2020-2022 Intel Corporation
33
//
44
// Licensed under the Apache License, Version 2.0 (the "License");
55
// you may not use this file except in compliance with the License.
@@ -13,20 +13,26 @@
1313
// See the License for the specific language governing permissions and
1414
// limitations under the License.
1515
*/
16-
#pragma once
1716

17+
#pragma once
18+
#include <string>
19+
#include <vector>
20+
#include <openvino/openvino.hpp>
1821
#include "models/image_model.h"
22+
#include "models/results.h"
1923

2024
class ClassificationModel : public ImageModel {
2125
public:
2226
/// Constructor
2327
/// @param modelFileName name of model to load.
2428
/// @param nTop - number of top results.
2529
/// Any detected object with confidence lower than this threshold will be ignored.
26-
/// @param useAutoResize - if true, image will be resized by IE.
30+
/// @param useAutoResize - if true, image will be resized by openvino.
2731
/// Otherwise, image will be preprocessed and resized using OpenCV routines.
2832
/// @param labels - array of labels for every class.
29-
ClassificationModel(const std::string& modelFileName, size_t nTop, bool useAutoResize, const std::vector<std::string>& labels);
33+
/// @param layout - model input layout
34+
ClassificationModel(const std::string& modelFileName, size_t nTop, bool useAutoResize,
35+
const std::vector<std::string>& labels, const std::string& layout = "");
3036

3137
std::unique_ptr<ResultBase> postprocess(InferenceResult& infResult) override;
3238

@@ -36,5 +42,5 @@ class ClassificationModel : public ImageModel {
3642
size_t nTop;
3743
std::vector<std::string> labels;
3844

39-
void prepareInputsOutputs(InferenceEngine::CNNNetwork& cnnNetwork) override;
45+
void prepareInputsOutputs(std::shared_ptr<ov::Model>& model) override;
4046
};

demos/common/cpp/models/include/models/deblurring_model.h

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
// Copyright (C) 2021 Intel Corporation
2+
// Copyright (C) 2021-2022 Intel Corporation
33
//
44
// Licensed under the Apache License, Version 2.0 (the "License");
55
// you may not use this file except in compliance with the License.
@@ -15,23 +15,28 @@
1515
*/
1616

1717
#pragma once
18-
19-
#include "image_model.h"
18+
#include <string>
19+
#include <vector>
20+
#include <opencv2/opencv.hpp>
21+
#include <openvino/openvino.hpp>
22+
#include "models/image_model.h"
23+
#include "models/results.h"
2024

2125
class DeblurringModel : public ImageModel {
2226
public:
2327
/// Constructor
2428
/// @param modelFileName name of model to load
2529
/// @param inputImgSize size of image to set model input shape
26-
DeblurringModel(const std::string& modelFileName, const cv::Size& inputImgSize);
30+
/// @param layout - model input layout
31+
DeblurringModel(const std::string& modelFileName, const cv::Size& inputImgSize, const std::string& layout = "");
2732

2833
std::shared_ptr<InternalModelData> preprocess(
29-
const InputData& inputData, InferenceEngine::InferRequest::Ptr& request) override;
34+
const InputData& inputData, ov::InferRequest& request) override;
3035
std::unique_ptr<ResultBase> postprocess(InferenceResult& infResult) override;
3136

3237
protected:
33-
void prepareInputsOutputs(InferenceEngine::CNNNetwork & cnnNetwork) override;
34-
void changeInputSize(InferenceEngine::CNNNetwork& cnnNetwork);
38+
void prepareInputsOutputs(std::shared_ptr<ov::Model>& model) override;
39+
void changeInputSize(std::shared_ptr<ov::Model>& model);
3540

3641
static const size_t stride = 32;
3742
};

demos/common/cpp/models/include/models/detection_model.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
// Copyright (C) 2018-2020 Intel Corporation
2+
// Copyright (C) 2020-2022 Intel Corporation
33
//
44
// Licensed under the Apache License, Version 2.0 (the "License");
55
// you may not use this file except in compliance with the License.
@@ -13,7 +13,10 @@
1313
// See the License for the specific language governing permissions and
1414
// limitations under the License.
1515
*/
16+
1617
#pragma once
18+
#include <string>
19+
#include <vector>
1720
#include "models/image_model.h"
1821

1922
class DetectionModel : public ImageModel {
@@ -22,17 +25,20 @@ class DetectionModel : public ImageModel {
2225
/// @param modelFileName name of model to load
2326
/// @param confidenceThreshold - threshold to eliminate low-confidence detections.
2427
/// Any detected object with confidence lower than this threshold will be ignored.
25-
/// @param useAutoResize - if true, image will be resized by IE.
28+
/// @param useAutoResize - if true, image will be resized by openvino.
2629
/// Otherwise, image will be preprocessed and resized using OpenCV routines.
2730
/// @param labels - array of labels for every class. If this array is empty or contains less elements
2831
/// than actual classes number, default "Label #N" will be shown for missing items.
29-
DetectionModel(const std::string& modelFileName, float confidenceThreshold, bool useAutoResize, const std::vector<std::string>& labels);
32+
/// @param layout - model input layout
33+
DetectionModel(const std::string& modelFileName, float confidenceThreshold, bool useAutoResize,
34+
const std::vector<std::string>& labels, const std::string& layout = "");
3035

3136
static std::vector<std::string> loadLabels(const std::string& labelFilename);
3237

3338
protected:
3439
float confidenceThreshold;
3540
std::vector<std::string> labels;
3641

37-
std::string getLabelName(int labelID) { return (size_t)labelID < labels.size() ? labels[labelID] : std::string("Label #") + std::to_string(labelID); }
42+
std::string getLabelName(int labelID) { return (size_t)labelID < labels.size() ?
43+
labels[labelID] : std::string("Label #") + std::to_string(labelID); }
3844
};

demos/common/cpp/models/include/models/detection_model_centernet.h

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
// Copyright (C) 2020-2021 Intel Corporation
2+
// Copyright (C) 2020-2022 Intel Corporation
33
//
44
// Licensed under the Apache License, Version 2.0 (the "License");
55
// you may not use this file except in compliance with the License.
@@ -15,7 +15,11 @@
1515
*/
1616

1717
#pragma once
18-
#include "detection_model.h"
18+
#include <string>
19+
#include <vector>
20+
#include <openvino/openvino.hpp>
21+
#include "models/detection_model.h"
22+
#include "models/results.h"
1923

2024
class ModelCenterNet : public DetectionModel {
2125
public:
@@ -31,11 +35,11 @@ class ModelCenterNet : public DetectionModel {
3135
static const int INIT_VECTOR_SIZE = 200;
3236

3337
ModelCenterNet(const std::string& modelFileName, float confidenceThreshold,
34-
const std::vector<std::string>& labels = std::vector<std::string>());
38+
const std::vector<std::string>& labels = std::vector<std::string>(), const std::string& layout = "");
3539
std::shared_ptr<InternalModelData> preprocess(
36-
const InputData& inputData, InferenceEngine::InferRequest::Ptr& request) override;
40+
const InputData& inputData, ov::InferRequest& request) override;
3741
std::unique_ptr<ResultBase> postprocess(InferenceResult& infResult) override;
3842

3943
protected:
40-
void prepareInputsOutputs(InferenceEngine::CNNNetwork& cnnNetwork) override;
44+
void prepareInputsOutputs(std::shared_ptr<ov::Model>& model) override;
4145
};

0 commit comments

Comments
 (0)