Skip to content

Commit f324811

Browse files
author
akorobeinikov
committed
Merge remote-tracking branch 'upstream/master' into ak/model_api_2.0
2 parents c889f78 + bf766bd commit f324811

File tree

618 files changed

+3963
-1997
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

618 files changed

+3963
-1997
lines changed

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ This example uses validation configuration file for [DenseNet-121](models/public
214214
models:
215215
- name: densenet-121-tf
216216
launchers:
217-
- framework: dlsdk
217+
- framework: openvino
218218
adapter: classification
219219
220220
datasets:

ci/dependencies.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
opencv_linux: '2022.1.0.dev20220131'
2-
opencv_windows: '2022.1.0.dev20220131'
3-
openvino_linux: '2022.1.0.dev20220131'
4-
openvino_windows: '2022.1.0.dev20220131'
5-
wheel_linux: '2022.1.0.dev20220131-6386'
6-
wheel_windows: '2022.1.0.dev20220131-6386'
1+
opencv_linux: '20220210_0636-4.5.5_043'
2+
opencv_windows: '20220210_0636-4.5.5_043'
3+
openvino_linux: '2022.1.0.577'
4+
openvino_windows: '2022.1.0.577'
5+
wheel_linux: '2022.1.0.dev20220209-6562'
6+
wheel_windows: '2022.1.0.dev20220209-6562'

demos/action_recognition_demo/python/action_recognition_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def main():
8888
core = Core()
8989

9090
if 'MYRIAD' in args.device:
91-
myriad_config = {'VPU_HW_STAGES_OPTIMIZATION': 'YES'}
91+
myriad_config = {'MYRIAD_ENABLE_HW_ACCELERATION': 'YES'}
9292
core.set_config(myriad_config, 'MYRIAD')
9393

9494
decoder_target_device = 'CPU'

demos/background_subtraction_demo/cpp_gapi/main.cpp

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <utils/args_helper.hpp>
77
#include <utils_gapi/stream_source.hpp>
88
#include <utils/config_factory.h>
9+
#include <utils/ocv_common.hpp>
910

1011
#include <opencv2/gapi/streaming/cap.hpp>
1112
#include <opencv2/gapi/imgproc.hpp>
@@ -160,20 +161,13 @@ int main(int argc, char *argv[]) {
160161
cv::Size graphSize{static_cast<int>(frame_size.width / 4), 60};
161162
Presenter presenter(FLAGS_u, frame_size.height - graphSize.height - 10, graphSize);
162163

163-
/** Save output result **/
164-
cv::VideoWriter videoWriter;
165-
if (!FLAGS_o.empty() && !videoWriter.open(FLAGS_o, cv::VideoWriter::fourcc('M', 'J', 'P', 'G'),
166-
cap->fps(), frame_size)) {
167-
throw std::runtime_error("Can't open video writer");
168-
}
164+
LazyVideoWriter videoWriter{FLAGS_o, cap->fps(), FLAGS_limit};
169165

170166
bool isStart = true;
171-
uint64_t curr_frame_num = 0;
172167
const auto startTime = std::chrono::steady_clock::now();
173168
pipeline.start();
174169

175170
while(pipeline.pull(cv::gout(output))) {
176-
++curr_frame_num;
177171
presenter.drawGraphs(output);
178172
if (isStart) {
179173
metrics.update(startTime, output, { 10, 22 }, cv::FONT_HERSHEY_COMPLEX,
@@ -185,10 +179,7 @@ int main(int argc, char *argv[]) {
185179
0.65, { 200, 10, 10 }, 2, PerformanceMetrics::MetricTypes::FPS);
186180
}
187181

188-
if (videoWriter.isOpened() &&
189-
(FLAGS_limit <= 0 || curr_frame_num <= FLAGS_limit)) {
190-
videoWriter.write(output);
191-
}
182+
videoWriter.write(output);
192183

193184
if (!FLAGS_no_show) {
194185
cv::imshow(windowName, output);

demos/background_subtraction_demo/python/background_subtraction_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def build_argparser():
7070
help='Optional. Background blur strength (by default with value 0 is not applied).')
7171
args.add_argument('--layout', type=str, default=None,
7272
help='Optional. Model inputs layouts. '
73-
'Format "NCHW" or "<input1>:<layout1>,<input2>:<layout2>" in case of more than one input.'
73+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
7474
'To define layout you should use only capital letters')
7575

7676
infer_args = parser.add_argument_group('Inference options')

demos/classification_benchmark_demo/cpp/README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ omz_converter --list models.lst
4545
* googlenet-v1
4646
* googlenet-v1-tf
4747
* googlenet-v2
48+
* googlenet-v2-tf
4849
* googlenet-v3
4950
* googlenet-v3-pytorch
5051
* googlenet-v4-tf
@@ -59,6 +60,8 @@ omz_converter --list models.lst
5960
* mobilenet-v2-1.0-224
6061
* mobilenet-v2-1.4-224
6162
* mobilenet-v2-pytorch
63+
* mobilenet-v3-large-1.0-224-tf
64+
* mobilenet-v3-small-1.0-224-tf
6265
* nfnet-f0
6366
* octave-resnet-26-0.25
6467
* regnetx-3.2gf
@@ -67,6 +70,7 @@ omz_converter --list models.lst
6770
* repvgg-b3
6871
* resnest-50-pytorch
6972
* resnet-18-pytorch
73+
* resnet-34-pytorch
7074
* resnet-50-pytorch
7175
* resnet-50-tf
7276
* resnet18-xnor-binary-onnx-0001

demos/classification_benchmark_demo/cpp/models.lst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ efficientnet-b0-pytorch
99
googlenet-v1
1010
googlenet-v1-tf
1111
googlenet-v2
12+
googlenet-v2-tf
1213
googlenet-v3
1314
googlenet-v3-pytorch
1415
googlenet-v4-tf
@@ -23,6 +24,8 @@ mobilenet-v2
2324
mobilenet-v2-1.0-224
2425
mobilenet-v2-1.4-224
2526
mobilenet-v2-pytorch
27+
mobilenet-v3-large-1.0-224-tf
28+
mobilenet-v3-small-1.0-224-tf
2629
nfnet-f0
2730
octave-resnet-26-0.25
2831
regnetx-3.2gf
@@ -31,6 +34,7 @@ repvgg-b1
3134
repvgg-b3
3235
resnest-50-pytorch
3336
resnet-18-pytorch
37+
resnet-34-pytorch
3438
resnet-50-pytorch
3539
resnet-50-tf
3640
resnet18-xnor-binary-onnx-0001

demos/classification_demo/python/README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ omz_converter --list models.lst
4343
* googlenet-v1
4444
* googlenet-v1-tf
4545
* googlenet-v2
46+
* googlenet-v2-tf
4647
* googlenet-v3
4748
* googlenet-v3-pytorch
4849
* googlenet-v4-tf
@@ -57,6 +58,8 @@ omz_converter --list models.lst
5758
* mobilenet-v2-1.0-224
5859
* mobilenet-v2-1.4-224
5960
* mobilenet-v2-pytorch
61+
* mobilenet-v3-large-1.0-224-tf
62+
* mobilenet-v3-small-1.0-224-tf
6063
* nfnet-f0
6164
* octave-resnet-26-0.25
6265
* regnetx-3.2gf
@@ -65,6 +68,7 @@ omz_converter --list models.lst
6568
* repvgg-b3
6669
* resnest-50-pytorch
6770
* resnet-18-pytorch
71+
* resnet-34-pytorch
6872
* resnet-50-pytorch
6973
* resnet-50-tf
7074
* resnet18-xnor-binary-onnx-0001

demos/classification_demo/python/models.lst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ efficientnet-v2-s
1111
googlenet-v1
1212
googlenet-v1-tf
1313
googlenet-v2
14+
googlenet-v2-tf
1415
googlenet-v3
1516
googlenet-v3-pytorch
1617
googlenet-v4-tf
@@ -25,6 +26,8 @@ mobilenet-v2
2526
mobilenet-v2-1.0-224
2627
mobilenet-v2-1.4-224
2728
mobilenet-v2-pytorch
29+
mobilenet-v3-large-1.0-224-tf
30+
mobilenet-v3-small-1.0-224-tf
2831
nfnet-f0
2932
octave-resnet-26-0.25
3033
regnetx-3.2gf
@@ -33,6 +36,7 @@ repvgg-b1
3336
repvgg-b3
3437
resnest-50-pytorch
3538
resnet-18-pytorch
39+
resnet-34-pytorch
3640
resnet-50-pytorch
3741
resnet-50-tf
3842
resnet18-xnor-binary-onnx-0001

demos/common/cpp/utils/include/utils/common.hpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -296,11 +296,11 @@ void logCompiledModelInfo(
296296
for (const auto& device : devices) {
297297
try {
298298
slog::info << "\tDevice: " << device << slog::endl;
299-
std::string nstreams = compiledModel.get_property(device + "_THROUGHPUT_STREAMS").as<std::string>();
299+
int32_t nstreams = compiledModel.get_property(ov::streams::num);
300300
slog::info << "\t\tNumber of streams: " << nstreams << slog::endl;
301301
if (device == "CPU") {
302-
std::string nthreads = compiledModel.get_property("CPU_THREADS_NUM").as<std::string>();
303-
slog::info << "\t\tNumber of threads: " << (nthreads == "0" ? "AUTO" : nthreads) << slog::endl;
302+
int32_t nthreads = compiledModel.get_property(ov::inference_num_threads);
303+
slog::info << "\t\tNumber of threads: " << (nthreads == 0 ? "AUTO" : std::to_string(nthreads)) << slog::endl;
304304
}
305305
}
306306
catch (const ov::Exception&) {}
@@ -317,23 +317,23 @@ void logBasicModelInfo(const std::shared_ptr<ov::Model>& model) {
317317
ov::OutputVector outputs = model->outputs();
318318

319319
slog::info << "inputs: " << slog::endl;
320-
for (const ov::Output<ov::Node> input : inputs)
321-
{
320+
for (const ov::Output<ov::Node>& input : inputs) {
322321
const std::string name = input.get_any_name();
323322
const ov::element::Type type = input.get_element_type();
324323
const ov::PartialShape shape = input.get_partial_shape();
324+
const ov::Layout layout = ov::layout::get_layout(input);
325325

326-
slog::info << name << ", " << type << ", " << shape << slog::endl;
326+
slog::info << name << ", " << type << ", " << shape << ", " << layout.to_string() << slog::endl;
327327
}
328328

329329
slog::info << "outputs: " << slog::endl;
330-
for (const ov::Output<ov::Node> output : outputs)
331-
{
330+
for (const ov::Output<ov::Node>& output : outputs) {
332331
const std::string name = output.get_any_name();
333332
const ov::element::Type type = output.get_element_type();
334333
const ov::PartialShape shape = output.get_partial_shape();
334+
const ov::Layout layout = ov::layout::get_layout(output);
335335

336-
slog::info << name << ", " << type << ", " << shape << slog::endl;
336+
slog::info << name << ", " << type << ", " << shape << ", " << layout.to_string() << slog::endl;
337337
}
338338

339339
return;

0 commit comments

Comments
 (0)