1
- // Copyright (C) 2021 Intel Corporation
1
+ // Copyright (C) 2021-2022 Intel Corporation
2
2
// SPDX-License-Identifier: Apache-2.0
3
3
//
4
4
10
10
#include < vector>
11
11
#include < map>
12
12
13
- #include < inference_engine.hpp>
14
- #include < utils/common.hpp>
15
- #include < utils/ocv_common.hpp>
13
+ #include " openvino/openvino.hpp"
14
+
15
+ #include " utils/common.hpp"
16
+ #include " utils/ocv_common.hpp"
16
17
17
18
class PersonDetector {
18
19
public:
@@ -26,73 +27,82 @@ class PersonDetector {
26
27
static constexpr int objectSize = 7 ; // Output should have 7 as a last dimension"
27
28
28
29
PersonDetector () = default ;
29
- PersonDetector (InferenceEngine::Core& ie, const std::string& deviceName, const std::string& xmlPath, const std::vector<float >& detectionTresholds,
30
- const bool autoResize, const std::map<std::string, std::string> & pluginConfig) :
31
- detectionTresholds{detectionTresholds}, ie_{ie} {
32
- auto network = ie.ReadNetwork (xmlPath);
33
- InferenceEngine::InputsDataMap inputInfo (network.getInputsInfo ());
30
+ PersonDetector (ov::Core& core, const std::string& deviceName, const std::string& xmlPath, const std::vector<float >& detectionTresholds,
31
+ const bool autoResize, const ov::AnyMap& pluginConfig) :
32
+ autoResize{autoResize}, detectionTresholds{detectionTresholds}, core_{core} {
33
+ slog::info << " Reading Person Detection model " << xmlPath << slog::endl;
34
+ auto model = core.read_model (xmlPath);
35
+ logBasicModelInfo (model);
36
+ ov::OutputVector inputInfo = model->inputs ();
34
37
if (inputInfo.size () != 1 ) {
35
- throw std::logic_error (" Detector should have only one input" );
38
+ throw std::logic_error (" Person Detection model should have only one input" );
36
39
}
37
- InferenceEngine::InputInfo::Ptr& inputInfoFirst = inputInfo. begin ()-> second ;
38
- inputInfoFirst-> setPrecision (InferenceEngine::Precision::U8 );
40
+
41
+ ov::preprocess::PrePostProcessor ppp (model );
39
42
if (autoResize) {
40
- inputInfoFirst->getPreProcess ().setResizeAlgorithm (InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
41
- inputInfoFirst->setLayout (InferenceEngine::Layout::NHWC);
43
+ ppp.input ().tensor ().
44
+ set_element_type (ov::element::u8 ).
45
+ set_spatial_dynamic_shape ().
46
+ set_layout ({ " NHWC" });
47
+
48
+ ppp.input ().preprocess ().
49
+ convert_element_type (ov::element::f32 ).
50
+ convert_layout (" NCHW" ).
51
+ resize (ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
52
+
53
+ ppp.input ().model ().set_layout (" NCHW" );
42
54
} else {
43
- inputInfoFirst->setLayout (InferenceEngine::Layout::NCHW);
55
+ ppp.input ().tensor ().
56
+ set_element_type (ov::element::u8 ).
57
+ set_layout ({ " NCHW" });
44
58
}
45
59
46
- detectorInputBlobName = inputInfo.begin ()->first ;
47
-
48
60
// ---------------------------Check outputs ------------------------------------------------------
49
- InferenceEngine::OutputsDataMap outputInfo (network. getOutputsInfo () );
61
+ ov::OutputVector outputInfo = model-> outputs ( );
50
62
if (outputInfo.size () != 1 ) {
51
- throw std::logic_error (" Person Detection network should have only one output" );
63
+ throw std::logic_error (" Person Detection model should have only one output" );
52
64
}
53
- InferenceEngine::DataPtr& _output = outputInfo.begin ()->second ;
54
- const InferenceEngine::SizeVector outputDims = _output->getTensorDesc ().getDims ();
55
- detectorOutputBlobName = outputInfo.begin ()->first ;
56
- if (maxProposalCount != outputDims[2 ]) {
65
+ const ov::Shape outputShape = model->output ().get_shape ();
66
+ if (maxProposalCount != outputShape[2 ]) {
57
67
throw std::logic_error (" unexpected ProposalCount" );
58
68
}
59
- if (objectSize != outputDims [3 ]) {
69
+ if (objectSize != outputShape [3 ]) {
60
70
throw std::logic_error (" Output should have 7 as a last dimension" );
61
71
}
62
- if (outputDims .size () != 4 ) {
72
+ if (outputShape .size () != 4 ) {
63
73
throw std::logic_error (" Incorrect output dimensions for SSD" );
64
74
}
65
- _output->setPrecision (InferenceEngine::Precision::FP32);
66
75
67
- net = ie_.LoadNetwork (network, deviceName, pluginConfig);
68
- logExecNetworkInfo (net, xmlPath, deviceName, " Person Detection" );
76
+ ppp.output ().tensor ().set_element_type (ov::element::f32 );
77
+ model = ppp.build ();
78
+ compiledModel = core_.compile_model (model, deviceName, pluginConfig);
79
+ logCompiledModelInfo (compiledModel, xmlPath, deviceName, " Person Detection" );
69
80
}
70
81
71
- InferenceEngine ::InferRequest createInferRequest () {
72
- return net. CreateInferRequest ();
82
+ ov ::InferRequest createInferRequest () {
83
+ return compiledModel. create_infer_request ();
73
84
}
74
85
75
- void setImage (InferenceEngine ::InferRequest& inferRequest, const cv::Mat& img) {
76
- InferenceEngine::Blob::Ptr input = inferRequest.GetBlob (detectorInputBlobName );
77
- if (InferenceEngine::Layout::NHWC == input-> getTensorDesc (). getLayout ()) { // autoResize is set
86
+ void setImage (ov ::InferRequest& inferRequest, const cv::Mat& img) {
87
+ ov::Tensor input = inferRequest.get_input_tensor ( );
88
+ if (autoResize) {
78
89
if (!img.isSubmatrix ()) {
79
90
// just wrap Mat object with Blob::Ptr without additional memory allocation
80
- InferenceEngine::Blob::Ptr frameBlob = wrapMat2Blob (img);
81
- inferRequest.SetBlob (detectorInputBlobName, frameBlob );
91
+ ov::Tensor frameTensor = wrapMat2Tensor (img);
92
+ inferRequest.set_input_tensor (frameTensor );
82
93
} else {
83
94
throw std::logic_error (" Sparse matrix are not supported" );
84
95
}
85
96
} else {
86
- matToBlob (img, input);
97
+ matToTensor (img, input);
87
98
}
88
99
}
89
100
90
- std::list<Result> getResults (InferenceEngine ::InferRequest& inferRequest, cv::Size upscale, std::vector<std::string>& rawResults) {
101
+ std::list<Result> getResults (ov ::InferRequest& inferRequest, cv::Size upscale, std::vector<std::string>& rawResults) {
91
102
// there is no big difference if InferReq of detector from another device is passed because the processing is the same for the same topology
92
103
std::list<Result> results;
93
- InferenceEngine::LockedMemory<const void > detectorOutputBlobMapped = InferenceEngine::as<
94
- InferenceEngine::MemoryBlob>(inferRequest.GetBlob (detectorOutputBlobName))->rmap ();
95
- const float * const detections = detectorOutputBlobMapped.as <float *>();
104
+ const float * const detections = inferRequest.get_output_tensor ().data <float >();
105
+
96
106
// pretty much regular SSD post-processing
97
107
for (int i = 0 ; i < maxProposalCount; i++) {
98
108
float image_id = detections[i * objectSize + 0 ]; // in case of batch
@@ -120,89 +130,101 @@ class PersonDetector {
120
130
}
121
131
122
132
private:
133
+ bool autoResize;
123
134
std::vector<float > detectionTresholds;
124
- std::string detectorInputBlobName;
125
- std::string detectorOutputBlobName;
126
- InferenceEngine::Core ie_; // The only reason to store a plugin as to assure that it lives at least as long as ExecutableNetwork
127
- InferenceEngine::ExecutableNetwork net;
135
+ ov::Core core_; // The only reason to store a plugin as to assure that it lives at least as long as CompiledModel
136
+ ov::CompiledModel compiledModel;
128
137
};
129
138
130
139
class ReId {
131
140
public:
132
141
ReId () = default ;
133
- ReId (InferenceEngine::Core& ie, const std::string & deviceName, const std::string& xmlPath, const bool autoResize,
134
- const std::map<std::string, std::string> &pluginConfig) :
135
- ie_{ie} {
136
- auto network = ie.ReadNetwork (xmlPath);
137
-
138
- /* * Re-ID network should have only one input and one output **/
142
+ ReId (ov::Core& core, const std::string& deviceName, const std::string& xmlPath, const bool autoResize,
143
+ const ov::AnyMap& pluginConfig) :
144
+ autoResize {autoResize},
145
+ core_{core} {
146
+ slog::info << " Reading Person Re-ID model " << xmlPath << slog::endl;
147
+ auto model = core.read_model (xmlPath);
148
+ logBasicModelInfo (model);
149
+ /* * Re-ID model should have only one input and one output **/
139
150
// ---------------------------Check inputs ------------------------------------------------------
140
- InferenceEngine::InputsDataMap ReIdInputInfo (network. getInputsInfo () );
141
- if (ReIdInputInfo .size () != 1 ) {
142
- throw std::logic_error (" Re-ID network should have only one input" );
151
+ ov::OutputVector inputInfo = model-> inputs ( );
152
+ if (inputInfo .size () != 1 ) {
153
+ throw std::logic_error (" Re-ID model should have only one input" );
143
154
}
144
- InferenceEngine::InputInfo::Ptr& ReIdInputInfoFirst = ReIdInputInfo. begin ()-> second ;
145
- ReIdInputInfoFirst-> setPrecision (InferenceEngine::Precision::U8 );
155
+
156
+ ov::preprocess::PrePostProcessor ppp (model );
146
157
if (autoResize) {
147
- ReIdInputInfoFirst->getPreProcess ().setResizeAlgorithm (InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
148
- ReIdInputInfoFirst->setLayout (InferenceEngine::Layout::NHWC);
149
- } else {
150
- ReIdInputInfoFirst->setLayout (InferenceEngine::Layout::NCHW);
158
+ ppp.input ().tensor ().
159
+ set_element_type (ov::element::u8 ).
160
+ set_spatial_dynamic_shape ().
161
+ set_layout ({ " NHWC" });
162
+
163
+ ppp.input ().preprocess ().
164
+ convert_element_type (ov::element::f32 ).
165
+ convert_layout (" NCHW" ).
166
+ resize (ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
167
+
168
+ ppp.input ().model ().set_layout (" NCHW" );
169
+ }
170
+ else {
171
+ ppp.input ().tensor ().
172
+ set_element_type (ov::element::u8 ).
173
+ set_layout ({ " NCHW" });
151
174
}
152
- reIdInputName = ReIdInputInfo.begin ()->first ;
153
175
// -----------------------------------------------------------------------------------------------------
154
176
155
177
// ---------------------------Check outputs ------------------------------------------------------
156
- InferenceEngine::OutputsDataMap ReIdOutputInfo (network. getOutputsInfo () );
157
- if (ReIdOutputInfo .size () != 1 ) {
158
- throw std::logic_error (" Re-ID should have 1 output" );
178
+ ov::OutputVector outputInfo = model-> outputs ( );
179
+ if (outputInfo .size () != 1 ) {
180
+ throw std::logic_error (" Re-ID model should have 1 output" );
159
181
}
160
- reIdOutputName = ReIdOutputInfo.begin ()->first ;
161
- InferenceEngine::DataPtr& _output = ReIdOutputInfo.begin ()->second ;
162
- const InferenceEngine::SizeVector outputDims = _output->getTensorDesc ().getDims ();
163
- if (outputDims.size () != 2 ) {
182
+ const ov::Shape outputShape = model->output ().get_shape ();
183
+ if (outputShape.size () != 2 ) {
164
184
throw std::logic_error (" Incorrect output dimensions for Re-ID" );
165
185
}
166
- reidLen = outputDims[1 ];
167
- _output->setPrecision (InferenceEngine::Precision::FP32);
168
186
169
- net = ie_.LoadNetwork (network, deviceName, pluginConfig);
170
- logExecNetworkInfo (net, xmlPath, deviceName, " Person Re-Identification" );
187
+ reidLen = (int )outputShape[1 ];
188
+ ppp.output ().tensor ().set_element_type (ov::element::f32 );
189
+ model = ppp.build ();
190
+ compiledModel = core_.compile_model (model, deviceName, pluginConfig);
191
+ logCompiledModelInfo (compiledModel, xmlPath, deviceName, " Person Re-ID" );
171
192
}
172
193
173
- InferenceEngine ::InferRequest createInferRequest () {
174
- return net. CreateInferRequest ();
194
+ ov ::InferRequest createInferRequest () {
195
+ return compiledModel. create_infer_request ();
175
196
}
176
197
177
- void setImage (InferenceEngine::InferRequest& inferRequest, const cv::Mat& img, const cv::Rect personRect) {
178
- InferenceEngine::Blob::Ptr roiBlob = inferRequest.GetBlob (reIdInputName);
179
- if (InferenceEngine::Layout::NHWC == roiBlob->getTensorDesc ().getLayout ()) { // autoResize is set
180
- InferenceEngine::ROI cropRoi{0 , static_cast <size_t >(personRect.x ), static_cast <size_t >(personRect.y ), static_cast <size_t >(personRect.width ),
181
- static_cast <size_t >(personRect.height )};
182
- InferenceEngine::Blob::Ptr frameBlob = wrapMat2Blob (img);
183
- InferenceEngine::Blob::Ptr roiBlob = make_shared_blob (frameBlob, cropRoi);
184
- inferRequest.SetBlob (reIdInputName, roiBlob);
198
+ void setImage (ov::InferRequest& inferRequest, const cv::Mat& img, const cv::Rect personRect) {
199
+ ov::Tensor input = inferRequest.get_input_tensor ();
200
+ if (autoResize) {
201
+ ov::Tensor frameTensor = wrapMat2Tensor (img);
202
+ ov::Shape tensorShape = frameTensor.get_shape ();
203
+ ov::Layout layout (" NHWC" );
204
+ const size_t batch = tensorShape[ov::layout::batch_idx (layout)];
205
+ const size_t channels = tensorShape[ov::layout::channels_idx (layout)];
206
+ ov::Tensor roiTensor (frameTensor, {0 , static_cast <size_t >(personRect.y ), static_cast <size_t >(personRect.x ), 0 },
207
+ {batch, static_cast <size_t >(personRect.y ) + static_cast <size_t >(personRect.height ),
208
+ static_cast <size_t >(personRect.x ) + static_cast <size_t >(personRect.width ), channels});
209
+ inferRequest.set_input_tensor (roiTensor);
185
210
} else {
186
211
const cv::Mat& personImage = img (personRect);
187
- matToBlob (personImage, roiBlob );
212
+ matToTensor (personImage, input );
188
213
}
189
214
}
190
215
191
- std::vector<float > getResults (InferenceEngine ::InferRequest& inferRequest) {
216
+ std::vector<float > getResults (ov ::InferRequest& inferRequest) {
192
217
std::vector<float > result;
193
- InferenceEngine::LockedMemory<const void > reIdOutputMapped = InferenceEngine::as<InferenceEngine::MemoryBlob>(
194
- inferRequest.GetBlob (reIdOutputName))->rmap ();
195
- const auto data = reIdOutputMapped.as <float *>();
218
+ const float * const reids = inferRequest.get_output_tensor ().data <float >();
196
219
for (int i = 0 ; i < reidLen; i++) {
197
- result.push_back (data [i]);
220
+ result.push_back (reids [i]);
198
221
}
199
222
return result;
200
223
}
201
224
202
225
private:
226
+ bool autoResize;
203
227
int reidLen;
204
- std::string reIdInputName;
205
- std::string reIdOutputName;
206
- InferenceEngine::Core ie_; // The only reason to store a device as to assure that it lives at least as long as ExecutableNetwork
207
- InferenceEngine::ExecutableNetwork net;
228
+ ov::Core core_; // The only reason to store a device as to assure that it lives at least as long as CompiledModel
229
+ ov::CompiledModel compiledModel;
208
230
};
0 commit comments