Skip to content

Commit 84fbfc5

Browse files
OV2.0 c++ social_distance_demo (#3078)
* start migration * upd logging, upd year * roi selection from tensor for auto resize * removed commented lines, specify callback * removed debug output * replace std::function<void(std::exception_ptr)>() with [](std::exception_ptr e) {} * code style improvements, typos fixed * undo changes in cmakelists file * use common functions * upd version function * rebase to master * rebase to master, change get_config to get_property * common.hpp from master Co-authored-by: Vladimir Dudnik <[email protected]>
1 parent 6dea63c commit 84fbfc5

File tree

8 files changed

+203
-194
lines changed

8 files changed

+203
-194
lines changed

demos/common/cpp/utils/include/utils/ocv_common.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ static UNUSED void matToTensor(const cv::Mat& mat, const ov::Tensor& tensor, int
9292
const size_t height = tensorShape[ov::layout::height_idx(layout)];
9393
const size_t channels = tensorShape[ov::layout::channels_idx(layout)];
9494
if (static_cast<size_t>(mat.channels()) != channels) {
95-
throw std::runtime_error("The number of channels for net input and image must match");
95+
throw std::runtime_error("The number of channels for model input and image must match");
9696
}
9797
if (channels != 1 && channels != 3) {
9898
throw std::runtime_error("Unsupported number of channels");

demos/social_distance_demo/cpp/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright (C) 2021 Intel Corporation
1+
# Copyright (C) 2021-2022 Intel Corporation
22
# SPDX-License-Identifier: Apache-2.0
33
#
44

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// Copyright (C) 2021 Intel Corporation
1+
// Copyright (C) 2021-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

55
#pragma once
6-
#include <opencv2/core.hpp>
76
#include <tuple>
7+
#include <opencv2/core.hpp>
88

9-
std::tuple<bool, bool, double> socialDistance(std::tuple<int, int> &frameShape,
10-
cv::Point2d &A, cv::Point2d &B,
11-
cv::Point2d &C, cv::Point2d &D,
9+
std::tuple<bool, bool, double> socialDistance(std::tuple<int, int>& frameShape,
10+
cv::Point2d& A, cv::Point2d& B,
11+
cv::Point2d& C, cv::Point2d& D,
1212
unsigned minIter = 3, double minW = 0, double maxW = 0);
Lines changed: 113 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (C) 2021 Intel Corporation
1+
// Copyright (C) 2021-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

@@ -10,9 +10,10 @@
1010
#include <vector>
1111
#include <map>
1212

13-
#include <inference_engine.hpp>
14-
#include <utils/common.hpp>
15-
#include <utils/ocv_common.hpp>
13+
#include "openvino/openvino.hpp"
14+
15+
#include "utils/common.hpp"
16+
#include "utils/ocv_common.hpp"
1617

1718
class PersonDetector {
1819
public:
@@ -26,73 +27,82 @@ class PersonDetector {
2627
static constexpr int objectSize = 7; // Output should have 7 as a last dimension"
2728

2829
PersonDetector() = default;
29-
PersonDetector(InferenceEngine::Core& ie, const std::string& deviceName, const std::string& xmlPath, const std::vector<float>& detectionTresholds,
30-
const bool autoResize, const std::map<std::string, std::string> & pluginConfig) :
31-
detectionTresholds{detectionTresholds}, ie_{ie} {
32-
auto network = ie.ReadNetwork(xmlPath);
33-
InferenceEngine::InputsDataMap inputInfo(network.getInputsInfo());
30+
PersonDetector(ov::Core& core, const std::string& deviceName, const std::string& xmlPath, const std::vector<float>& detectionTresholds,
31+
const bool autoResize, const ov::AnyMap& pluginConfig) :
32+
autoResize{autoResize}, detectionTresholds{detectionTresholds}, core_{core} {
33+
slog::info << "Reading Person Detection model " << xmlPath << slog::endl;
34+
auto model = core.read_model(xmlPath);
35+
logBasicModelInfo(model);
36+
ov::OutputVector inputInfo = model->inputs();
3437
if (inputInfo.size() != 1) {
35-
throw std::logic_error("Detector should have only one input");
38+
throw std::logic_error("Person Detection model should have only one input");
3639
}
37-
InferenceEngine::InputInfo::Ptr& inputInfoFirst = inputInfo.begin()->second;
38-
inputInfoFirst->setPrecision(InferenceEngine::Precision::U8);
40+
41+
ov::preprocess::PrePostProcessor ppp(model);
3942
if (autoResize) {
40-
inputInfoFirst->getPreProcess().setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
41-
inputInfoFirst->setLayout(InferenceEngine::Layout::NHWC);
43+
ppp.input().tensor().
44+
set_element_type(ov::element::u8).
45+
set_spatial_dynamic_shape().
46+
set_layout({ "NHWC" });
47+
48+
ppp.input().preprocess().
49+
convert_element_type(ov::element::f32).
50+
convert_layout("NCHW").
51+
resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
52+
53+
ppp.input().model().set_layout("NCHW");
4254
} else {
43-
inputInfoFirst->setLayout(InferenceEngine::Layout::NCHW);
55+
ppp.input().tensor().
56+
set_element_type(ov::element::u8).
57+
set_layout({ "NCHW" });
4458
}
4559

46-
detectorInputBlobName = inputInfo.begin()->first;
47-
4860
// ---------------------------Check outputs ------------------------------------------------------
49-
InferenceEngine::OutputsDataMap outputInfo(network.getOutputsInfo());
61+
ov::OutputVector outputInfo = model->outputs();
5062
if (outputInfo.size() != 1) {
51-
throw std::logic_error("Person Detection network should have only one output");
63+
throw std::logic_error("Person Detection model should have only one output");
5264
}
53-
InferenceEngine::DataPtr& _output = outputInfo.begin()->second;
54-
const InferenceEngine::SizeVector outputDims = _output->getTensorDesc().getDims();
55-
detectorOutputBlobName = outputInfo.begin()->first;
56-
if (maxProposalCount != outputDims[2]) {
65+
const ov::Shape outputShape = model->output().get_shape();
66+
if (maxProposalCount != outputShape[2]) {
5767
throw std::logic_error("unexpected ProposalCount");
5868
}
59-
if (objectSize != outputDims[3]) {
69+
if (objectSize != outputShape[3]) {
6070
throw std::logic_error("Output should have 7 as a last dimension");
6171
}
62-
if (outputDims.size() != 4) {
72+
if (outputShape.size() != 4) {
6373
throw std::logic_error("Incorrect output dimensions for SSD");
6474
}
65-
_output->setPrecision(InferenceEngine::Precision::FP32);
6675

67-
net = ie_.LoadNetwork(network, deviceName, pluginConfig);
68-
logExecNetworkInfo(net, xmlPath, deviceName, "Person Detection");
76+
ppp.output().tensor().set_element_type(ov::element::f32);
77+
model = ppp.build();
78+
compiledModel = core_.compile_model(model, deviceName, pluginConfig);
79+
logCompiledModelInfo(compiledModel, xmlPath, deviceName, "Person Detection");
6980
}
7081

71-
InferenceEngine::InferRequest createInferRequest() {
72-
return net.CreateInferRequest();
82+
ov::InferRequest createInferRequest() {
83+
return compiledModel.create_infer_request();
7384
}
7485

75-
void setImage(InferenceEngine::InferRequest& inferRequest, const cv::Mat& img) {
76-
InferenceEngine::Blob::Ptr input = inferRequest.GetBlob(detectorInputBlobName);
77-
if (InferenceEngine::Layout::NHWC == input->getTensorDesc().getLayout()) { // autoResize is set
86+
void setImage(ov::InferRequest& inferRequest, const cv::Mat& img) {
87+
ov::Tensor input = inferRequest.get_input_tensor();
88+
if (autoResize) {
7889
if (!img.isSubmatrix()) {
7990
// just wrap Mat object with Blob::Ptr without additional memory allocation
80-
InferenceEngine::Blob::Ptr frameBlob = wrapMat2Blob(img);
81-
inferRequest.SetBlob(detectorInputBlobName, frameBlob);
91+
ov::Tensor frameTensor = wrapMat2Tensor(img);
92+
inferRequest.set_input_tensor(frameTensor);
8293
} else {
8394
throw std::logic_error("Sparse matrix are not supported");
8495
}
8596
} else {
86-
matToBlob(img, input);
97+
matToTensor(img, input);
8798
}
8899
}
89100

90-
std::list<Result> getResults(InferenceEngine::InferRequest& inferRequest, cv::Size upscale, std::vector<std::string>& rawResults) {
101+
std::list<Result> getResults(ov::InferRequest& inferRequest, cv::Size upscale, std::vector<std::string>& rawResults) {
91102
// there is no big difference if InferReq of detector from another device is passed because the processing is the same for the same topology
92103
std::list<Result> results;
93-
InferenceEngine::LockedMemory<const void> detectorOutputBlobMapped = InferenceEngine::as<
94-
InferenceEngine::MemoryBlob>(inferRequest.GetBlob(detectorOutputBlobName))->rmap();
95-
const float * const detections = detectorOutputBlobMapped.as<float *>();
104+
const float* const detections = inferRequest.get_output_tensor().data<float>();
105+
96106
// pretty much regular SSD post-processing
97107
for (int i = 0; i < maxProposalCount; i++) {
98108
float image_id = detections[i * objectSize + 0]; // in case of batch
@@ -120,89 +130,101 @@ class PersonDetector {
120130
}
121131

122132
private:
133+
bool autoResize;
123134
std::vector<float> detectionTresholds;
124-
std::string detectorInputBlobName;
125-
std::string detectorOutputBlobName;
126-
InferenceEngine::Core ie_; // The only reason to store a plugin as to assure that it lives at least as long as ExecutableNetwork
127-
InferenceEngine::ExecutableNetwork net;
135+
ov::Core core_; // The only reason to store a plugin as to assure that it lives at least as long as CompiledModel
136+
ov::CompiledModel compiledModel;
128137
};
129138

130139
class ReId {
131140
public:
132141
ReId() = default;
133-
ReId(InferenceEngine::Core& ie, const std::string & deviceName, const std::string& xmlPath, const bool autoResize,
134-
const std::map<std::string, std::string> &pluginConfig) :
135-
ie_{ie} {
136-
auto network = ie.ReadNetwork(xmlPath);
137-
138-
/** Re-ID network should have only one input and one output **/
142+
ReId(ov::Core& core, const std::string& deviceName, const std::string& xmlPath, const bool autoResize,
143+
const ov::AnyMap& pluginConfig) :
144+
autoResize {autoResize},
145+
core_{core} {
146+
slog::info << "Reading Person Re-ID model " << xmlPath << slog::endl;
147+
auto model = core.read_model(xmlPath);
148+
logBasicModelInfo(model);
149+
/** Re-ID model should have only one input and one output **/
139150
// ---------------------------Check inputs ------------------------------------------------------
140-
InferenceEngine::InputsDataMap ReIdInputInfo(network.getInputsInfo());
141-
if (ReIdInputInfo.size() != 1) {
142-
throw std::logic_error("Re-ID network should have only one input");
151+
ov::OutputVector inputInfo = model->inputs();
152+
if (inputInfo.size() != 1) {
153+
throw std::logic_error("Re-ID model should have only one input");
143154
}
144-
InferenceEngine::InputInfo::Ptr& ReIdInputInfoFirst = ReIdInputInfo.begin()->second;
145-
ReIdInputInfoFirst->setPrecision(InferenceEngine::Precision::U8);
155+
156+
ov::preprocess::PrePostProcessor ppp(model);
146157
if (autoResize) {
147-
ReIdInputInfoFirst->getPreProcess().setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
148-
ReIdInputInfoFirst->setLayout(InferenceEngine::Layout::NHWC);
149-
} else {
150-
ReIdInputInfoFirst->setLayout(InferenceEngine::Layout::NCHW);
158+
ppp.input().tensor().
159+
set_element_type(ov::element::u8).
160+
set_spatial_dynamic_shape().
161+
set_layout({ "NHWC" });
162+
163+
ppp.input().preprocess().
164+
convert_element_type(ov::element::f32).
165+
convert_layout("NCHW").
166+
resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
167+
168+
ppp.input().model().set_layout("NCHW");
169+
}
170+
else {
171+
ppp.input().tensor().
172+
set_element_type(ov::element::u8).
173+
set_layout({ "NCHW" });
151174
}
152-
reIdInputName = ReIdInputInfo.begin()->first;
153175
// -----------------------------------------------------------------------------------------------------
154176

155177
// ---------------------------Check outputs ------------------------------------------------------
156-
InferenceEngine::OutputsDataMap ReIdOutputInfo(network.getOutputsInfo());
157-
if (ReIdOutputInfo.size() != 1) {
158-
throw std::logic_error("Re-ID should have 1 output");
178+
ov::OutputVector outputInfo = model->outputs();
179+
if (outputInfo.size() != 1) {
180+
throw std::logic_error("Re-ID model should have 1 output");
159181
}
160-
reIdOutputName = ReIdOutputInfo.begin()->first;
161-
InferenceEngine::DataPtr& _output = ReIdOutputInfo.begin()->second;
162-
const InferenceEngine::SizeVector outputDims = _output->getTensorDesc().getDims();
163-
if (outputDims.size() != 2) {
182+
const ov::Shape outputShape = model->output().get_shape();
183+
if (outputShape.size() != 2) {
164184
throw std::logic_error("Incorrect output dimensions for Re-ID");
165185
}
166-
reidLen = outputDims[1];
167-
_output->setPrecision(InferenceEngine::Precision::FP32);
168186

169-
net = ie_.LoadNetwork(network, deviceName, pluginConfig);
170-
logExecNetworkInfo(net, xmlPath, deviceName, "Person Re-Identification");
187+
reidLen = (int)outputShape[1];
188+
ppp.output().tensor().set_element_type(ov::element::f32);
189+
model = ppp.build();
190+
compiledModel = core_.compile_model(model, deviceName, pluginConfig);
191+
logCompiledModelInfo(compiledModel, xmlPath, deviceName, "Person Re-ID");
171192
}
172193

173-
InferenceEngine::InferRequest createInferRequest() {
174-
return net.CreateInferRequest();
194+
ov::InferRequest createInferRequest() {
195+
return compiledModel.create_infer_request();
175196
}
176197

177-
void setImage(InferenceEngine::InferRequest& inferRequest, const cv::Mat& img, const cv::Rect personRect) {
178-
InferenceEngine::Blob::Ptr roiBlob = inferRequest.GetBlob(reIdInputName);
179-
if (InferenceEngine::Layout::NHWC == roiBlob->getTensorDesc().getLayout()) { // autoResize is set
180-
InferenceEngine::ROI cropRoi{0, static_cast<size_t>(personRect.x), static_cast<size_t>(personRect.y), static_cast<size_t>(personRect.width),
181-
static_cast<size_t>(personRect.height)};
182-
InferenceEngine::Blob::Ptr frameBlob = wrapMat2Blob(img);
183-
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(frameBlob, cropRoi);
184-
inferRequest.SetBlob(reIdInputName, roiBlob);
198+
void setImage(ov::InferRequest& inferRequest, const cv::Mat& img, const cv::Rect personRect) {
199+
ov::Tensor input = inferRequest.get_input_tensor();
200+
if (autoResize) {
201+
ov::Tensor frameTensor = wrapMat2Tensor(img);
202+
ov::Shape tensorShape = frameTensor.get_shape();
203+
ov::Layout layout("NHWC");
204+
const size_t batch = tensorShape[ov::layout::batch_idx(layout)];
205+
const size_t channels = tensorShape[ov::layout::channels_idx(layout)];
206+
ov::Tensor roiTensor(frameTensor, {0, static_cast<size_t>(personRect.y), static_cast<size_t>(personRect.x), 0},
207+
{batch, static_cast<size_t>(personRect.y) + static_cast<size_t>(personRect.height),
208+
static_cast<size_t>(personRect.x) + static_cast<size_t>(personRect.width), channels});
209+
inferRequest.set_input_tensor(roiTensor);
185210
} else {
186211
const cv::Mat& personImage = img(personRect);
187-
matToBlob(personImage, roiBlob);
212+
matToTensor(personImage, input);
188213
}
189214
}
190215

191-
std::vector<float> getResults(InferenceEngine::InferRequest& inferRequest) {
216+
std::vector<float> getResults(ov::InferRequest& inferRequest) {
192217
std::vector<float> result;
193-
InferenceEngine::LockedMemory<const void> reIdOutputMapped = InferenceEngine::as<InferenceEngine::MemoryBlob>(
194-
inferRequest.GetBlob(reIdOutputName))->rmap();
195-
const auto data = reIdOutputMapped.as<float*>();
218+
const float* const reids = inferRequest.get_output_tensor().data<float>();
196219
for (int i = 0; i < reidLen; i++) {
197-
result.push_back(data[i]);
220+
result.push_back(reids[i]);
198221
}
199222
return result;
200223
}
201224

202225
private:
226+
bool autoResize;
203227
int reidLen;
204-
std::string reIdInputName;
205-
std::string reIdOutputName;
206-
InferenceEngine::Core ie_; // The only reason to store a device as to assure that it lives at least as long as ExecutableNetwork
207-
InferenceEngine::ExecutableNetwork net;
228+
ov::Core core_; // The only reason to store a device as to assure that it lives at least as long as CompiledModel
229+
ov::CompiledModel compiledModel;
208230
};

demos/social_distance_demo/cpp/include/person_trackers.hpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (C) 2021 Intel Corporation
1+
// Copyright (C) 2021-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

@@ -12,7 +12,7 @@
1212
#include <opencv2/core.hpp>
1313

1414
struct TrackableObject {
15-
TrackableObject(cv::Rect2i bb, const std::vector<float> &r, cv::Point centroid)
15+
TrackableObject(cv::Rect2i bb, const std::vector<float>& r, cv::Point centroid)
1616
: bbox{bb}, reid{r}, updated{false}, disappeared(0) {
1717
centroids.push_back(centroid);
1818
}
@@ -28,10 +28,10 @@ class PersonTrackers {
2828
public:
2929
PersonTrackers() : trackIdGenerator{0}, similarityThreshold{0.7f}, maxDisappeared{10} {}
3030

31-
void similarity(std::list<TrackableObject> &tos) {
31+
void similarity(std::list<TrackableObject>& tos) {
3232
for (const auto& to : tos) {
3333
std::deque<std::pair<int, float>> sim;
34-
for (auto &tracker : trackables) {
34+
for (auto& tracker : trackables) {
3535
if (!tracker.second.updated) {
3636
float cosine = cosineSimilarity(to.reid, tracker.second.reid);
3737
if (cosine > similarityThreshold) {
@@ -68,7 +68,7 @@ class PersonTrackers {
6868
}
6969
}
7070

71-
float cosineSimilarity(const std::vector<float> &a, const std::vector<float> &b) {
71+
float cosineSimilarity(const std::vector<float>& a, const std::vector<float>& b) {
7272
if (a.size() != b.size()) {
7373
throw "Vector sizes don't match!";
7474
}

0 commit comments

Comments
 (0)