Skip to content

Commit 1410536

Browse files
authored
Merge pull request #2866 from vladimir-dudnik/vd/ov2.0-crossroad-barrier-demo
OV2.0 API C++ crossroad barrier demo
2 parents b3910cf + cd75d98 commit 1410536

File tree

9 files changed

+701
-610
lines changed

9 files changed

+701
-610
lines changed

demos/common/cpp/utils/include/utils/common.hpp

Lines changed: 55 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,11 @@
2828
#include "utils/args_helper.hpp"
2929

3030
#ifndef UNUSED
31-
#ifdef _WIN32
32-
#define UNUSED
33-
#else
34-
#define UNUSED __attribute__((unused))
35-
#endif
31+
#ifdef _WIN32
32+
#define UNUSED
33+
#else
34+
#define UNUSED __attribute__((unused))
35+
#endif
3636
#endif
3737

3838
template <typename T, std::size_t N>
@@ -92,8 +92,8 @@ class Color {
9292
* @param b - value for blue channel
9393
*/
9494
Color(unsigned char r,
95-
unsigned char g,
96-
unsigned char b) : _r(r), _g(g), _b(b) {}
95+
unsigned char g,
96+
unsigned char b) : _r(r), _g(g), _b(b) {}
9797

9898
inline unsigned char red() const {
9999
return _r;
@@ -138,16 +138,17 @@ inline std::size_t getTensorWidth(const InferenceEngine::TensorDesc& desc) {
138138
const auto& dims = desc.getDims();
139139
const auto& size = dims.size();
140140
if ((size >= 2) &&
141-
(layout == InferenceEngine::Layout::NCHW ||
142-
layout == InferenceEngine::Layout::NHWC ||
143-
layout == InferenceEngine::Layout::NCDHW ||
144-
layout == InferenceEngine::Layout::NDHWC ||
145-
layout == InferenceEngine::Layout::OIHW ||
146-
layout == InferenceEngine::Layout::CHW ||
147-
layout == InferenceEngine::Layout::HW)) {
141+
(layout == InferenceEngine::Layout::NCHW ||
142+
layout == InferenceEngine::Layout::NHWC ||
143+
layout == InferenceEngine::Layout::NCDHW ||
144+
layout == InferenceEngine::Layout::NDHWC ||
145+
layout == InferenceEngine::Layout::OIHW ||
146+
layout == InferenceEngine::Layout::CHW ||
147+
layout == InferenceEngine::Layout::HW)) {
148148
// Regardless of layout, dimensions are stored in fixed order
149149
return dims.back();
150-
} else {
150+
}
151+
else {
151152
throw std::runtime_error("Tensor does not have width dimension");
152153
}
153154
return 0;
@@ -158,72 +159,75 @@ inline std::size_t getTensorHeight(const InferenceEngine::TensorDesc& desc) {
158159
const auto& dims = desc.getDims();
159160
const auto& size = dims.size();
160161
if ((size >= 2) &&
161-
(layout == InferenceEngine::Layout::NCHW ||
162-
layout == InferenceEngine::Layout::NHWC ||
163-
layout == InferenceEngine::Layout::NCDHW ||
164-
layout == InferenceEngine::Layout::NDHWC ||
165-
layout == InferenceEngine::Layout::OIHW ||
166-
layout == InferenceEngine::Layout::CHW ||
167-
layout == InferenceEngine::Layout::HW)) {
162+
(layout == InferenceEngine::Layout::NCHW ||
163+
layout == InferenceEngine::Layout::NHWC ||
164+
layout == InferenceEngine::Layout::NCDHW ||
165+
layout == InferenceEngine::Layout::NDHWC ||
166+
layout == InferenceEngine::Layout::OIHW ||
167+
layout == InferenceEngine::Layout::CHW ||
168+
layout == InferenceEngine::Layout::HW)) {
168169
// Regardless of layout, dimensions are stored in fixed order
169170
return dims.at(size - 2);
170-
} else {
171+
}
172+
else {
171173
throw std::runtime_error("Tensor does not have height dimension");
172174
}
173175
return 0;
174176
}
175177

176178
inline std::size_t getTensorChannels(const InferenceEngine::TensorDesc& desc) {
177179
const auto& layout = desc.getLayout();
178-
if (layout == InferenceEngine::Layout::NCHW ||
179-
layout == InferenceEngine::Layout::NHWC ||
180+
if (layout == InferenceEngine::Layout::NCHW ||
181+
layout == InferenceEngine::Layout::NHWC ||
180182
layout == InferenceEngine::Layout::NCDHW ||
181183
layout == InferenceEngine::Layout::NDHWC ||
182-
layout == InferenceEngine::Layout::C ||
183-
layout == InferenceEngine::Layout::CHW ||
184-
layout == InferenceEngine::Layout::NC ||
184+
layout == InferenceEngine::Layout::C ||
185+
layout == InferenceEngine::Layout::CHW ||
186+
layout == InferenceEngine::Layout::NC ||
185187
layout == InferenceEngine::Layout::CN) {
186188
// Regardless of layout, dimensions are stored in fixed order
187189
const auto& dims = desc.getDims();
188190
switch (desc.getLayoutByDims(dims)) {
189-
case InferenceEngine::Layout::C: return dims.at(0);
190-
case InferenceEngine::Layout::NC: return dims.at(1);
191-
case InferenceEngine::Layout::CHW: return dims.at(0);
192-
case InferenceEngine::Layout::NCHW: return dims.at(1);
193-
case InferenceEngine::Layout::NCDHW: return dims.at(1);
194-
case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
195-
case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
196-
default:
197-
throw std::runtime_error("Tensor does not have channels dimension");
191+
case InferenceEngine::Layout::C: return dims.at(0);
192+
case InferenceEngine::Layout::NC: return dims.at(1);
193+
case InferenceEngine::Layout::CHW: return dims.at(0);
194+
case InferenceEngine::Layout::NCHW: return dims.at(1);
195+
case InferenceEngine::Layout::NCDHW: return dims.at(1);
196+
case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
197+
case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
198+
default:
199+
throw std::runtime_error("Tensor does not have channels dimension");
198200
}
199-
} else {
201+
}
202+
else {
200203
throw std::runtime_error("Tensor does not have channels dimension");
201204
}
202205
return 0;
203206
}
204207

205208
inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) {
206209
const auto& layout = desc.getLayout();
207-
if (layout == InferenceEngine::Layout::NCHW ||
208-
layout == InferenceEngine::Layout::NHWC ||
210+
if (layout == InferenceEngine::Layout::NCHW ||
211+
layout == InferenceEngine::Layout::NHWC ||
209212
layout == InferenceEngine::Layout::NCDHW ||
210213
layout == InferenceEngine::Layout::NDHWC ||
211-
layout == InferenceEngine::Layout::NC ||
214+
layout == InferenceEngine::Layout::NC ||
212215
layout == InferenceEngine::Layout::CN) {
213216
// Regardless of layout, dimensions are stored in fixed order
214217
const auto& dims = desc.getDims();
215218
switch (desc.getLayoutByDims(dims)) {
216-
case InferenceEngine::Layout::NC: return dims.at(0);
217-
case InferenceEngine::Layout::NCHW: return dims.at(0);
218-
case InferenceEngine::Layout::NCDHW: return dims.at(0);
219-
case InferenceEngine::Layout::CHW: // [[fallthrough]]
220-
case InferenceEngine::Layout::C: // [[fallthrough]]
221-
case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
222-
case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
223-
default:
224-
throw std::runtime_error("Tensor does not have channels dimension");
219+
case InferenceEngine::Layout::NC: return dims.at(0);
220+
case InferenceEngine::Layout::NCHW: return dims.at(0);
221+
case InferenceEngine::Layout::NCDHW: return dims.at(0);
222+
case InferenceEngine::Layout::CHW: // [[fallthrough]]
223+
case InferenceEngine::Layout::C: // [[fallthrough]]
224+
case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
225+
case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
226+
default:
227+
throw std::runtime_error("Tensor does not have channels dimension");
225228
}
226-
} else {
229+
}
230+
else {
227231
throw std::runtime_error("Tensor does not have channels dimension");
228232
}
229233
return 0;

demos/common/cpp/utils/include/utils/threads_common.hpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (C) 2018-2021 Intel Corporation
1+
// Copyright (C) 2018-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

@@ -17,9 +17,10 @@
1717
#include <vector>
1818

1919
#include <opencv2/core/core.hpp>
20-
#include <utils/performance_metrics.hpp>
20+
#include "utils/performance_metrics.hpp"
2121

22-
class VideoFrame { // VideoFrame can represent not a single image but the whole grid
22+
// VideoFrame can represent not a single image but the whole grid
23+
class VideoFrame {
2324
public:
2425
typedef std::shared_ptr<VideoFrame> Ptr;
2526

@@ -44,6 +45,7 @@ class Task {
4445
virtual void process() = 0;
4546
virtual ~Task() = default;
4647

48+
std::string name;
4749
VideoFrame::Ptr sharedVideoFrame; // it is possible that two tasks try to draw on the same cvMat
4850
const float priority;
4951
};
Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
1-
# Copyright (C) 2018-2019 Intel Corporation
1+
# Copyright (C) 2018-2022 Intel Corporation
22
# SPDX-License-Identifier: Apache-2.0
33
#
44

5+
file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
6+
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
7+
58
add_demo(NAME crossroad_camera_demo
6-
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
7-
HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/crossroad_camera_demo.hpp"
9+
SOURCES "${SOURCES}"
10+
HEADERS "${HEADERS}"
811
DEPENDENCIES monitors)

demos/crossroad_camera_demo/cpp/crossroad_camera_demo.hpp

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,12 @@
1-
// Copyright (C) 2018-2019 Intel Corporation
1+
// Copyright (C) 2018-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

55
///////////////////////////////////////////////////////////////////////////////////////////////////
66
#pragma once
77

8-
#include <string>
9-
#include <vector>
10-
#include <gflags/gflags.h>
11-
12-
#include <utils/default_flags.hpp>
8+
#include "gflags/gflags.h"
9+
#include "utils/default_flags.hpp"
1310

1411
DEFINE_INPUT_FLAGS
1512
DEFINE_OUTPUT_FLAGS
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
// Copyright (C) 2022 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
5+
#include <string>
6+
#include "openvino/openvino.hpp"
7+
#include "utils/slog.hpp"
8+
9+
#pragma once
10+
11+
struct BaseDetection {
12+
ov::CompiledModel m_compiled_model;
13+
ov::InferRequest m_infer_request;
14+
std::string& m_commandLineFlag;
15+
std::string m_detectorName;
16+
ov::Tensor m_input_tensor;
17+
std::string m_inputName;
18+
std::string m_outputName;
19+
20+
BaseDetection(std::string& commandLineFlag, const std::string& detectorName) :
21+
m_commandLineFlag(commandLineFlag), m_detectorName(detectorName) {}
22+
23+
ov::CompiledModel* operator->() {
24+
return &m_compiled_model;
25+
}
26+
27+
virtual std::shared_ptr<ov::Model> read(const ov::Core& core) = 0;
28+
29+
virtual void setRoiTensor(const ov::Tensor& roi_tensor) {
30+
if (!enabled())
31+
return;
32+
if (!m_infer_request)
33+
m_infer_request = m_compiled_model.create_infer_request();
34+
35+
m_infer_request.set_input_tensor(roi_tensor);
36+
}
37+
38+
virtual void enqueue(const cv::Mat& person) {
39+
if (!enabled())
40+
return;
41+
if (!m_infer_request)
42+
m_infer_request = m_compiled_model.create_infer_request();
43+
44+
m_input_tensor = m_infer_request.get_input_tensor();
45+
matToTensor(person, m_input_tensor);
46+
}
47+
48+
virtual void submitRequest() {
49+
if (!enabled() || !m_infer_request)
50+
return;
51+
52+
m_infer_request.start_async();
53+
}
54+
55+
virtual void wait() {
56+
if (!enabled()|| !m_infer_request)
57+
return;
58+
59+
m_infer_request.wait();
60+
}
61+
62+
mutable bool m_enablingChecked = false;
63+
mutable bool m_enabled = false;
64+
65+
bool enabled() const {
66+
if (!m_enablingChecked) {
67+
m_enabled = !m_commandLineFlag.empty();
68+
if (!m_enabled) {
69+
slog::info << m_detectorName << " detection DISABLED" << slog::endl;
70+
}
71+
m_enablingChecked = true;
72+
}
73+
return m_enabled;
74+
}
75+
};
76+
77+
struct Load {
78+
BaseDetection& m_detector;
79+
explicit Load(BaseDetection& detector) : m_detector(detector) {}
80+
81+
void into(ov::Core& core, const std::string& deviceName) const {
82+
if (m_detector.enabled()) {
83+
m_detector.m_compiled_model = core.compile_model(m_detector.read(core), deviceName);
84+
logCompiledModelInfo(m_detector.m_compiled_model, m_detector.m_commandLineFlag, deviceName, m_detector.m_detectorName);
85+
}
86+
}
87+
};

0 commit comments

Comments
 (0)