Skip to content

Commit 3eb05dd

Browse files
authored
Merge branch 'openvinotoolkit:master' into master
2 parents 26a2216 + 5903bf2 commit 3eb05dd

File tree

464 files changed

+5203
-3388
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

464 files changed

+5203
-3388
lines changed

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ This example uses validation configuration file for [DenseNet-121](models/public
214214
models:
215215
- name: densenet-121-tf
216216
launchers:
217-
- framework: dlsdk
217+
- framework: openvino
218218
adapter: classification
219219
220220
datasets:

ci/dependencies.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
opencv_linux: '20220210_0636-4.5.5_043'
22
opencv_windows: '20220210_0636-4.5.5_043'
3-
openvino_linux: '2022.1.0.577'
4-
openvino_windows: '2022.1.0.577'
5-
wheel_linux: '2022.1.0.dev20220209-6562'
6-
wheel_windows: '2022.1.0.dev20220209-6562'
3+
openvino_linux: '2022.1.0.582'
4+
openvino_windows: '2022.1.0.582'
5+
wheel_linux: '2022.1.0.dev20220211-6610'
6+
wheel_windows: '2022.1.0.dev20220211-6610'

data/dataset_definitions.yml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,19 @@
11
datasets:
2+
- name: HumanMattingClips120
3+
annotation_conversion:
4+
converter: background_matting_sequential
5+
images_dir: HumanMattingClips120
6+
masks_dir: HumanMattingClips120
7+
backgrounds_dir: HumanMattingClips120
8+
image_prefix: com/
9+
mask_prefix: fgr/
10+
background_prefix: bgr/
11+
with_background: True
12+
with_alpha: True
13+
annotation: human_matting_120.pickle
14+
dataset_meta: human_matting_120_meta.json
15+
data_source: HumanMattingClips120
16+
217
- name: ms_coco_mask_rcnn
318
annotation_conversion:
419
converter: mscoco_mask_rcnn

demos/3d_segmentation_demo/python/3d_segmentation_demo.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -96,18 +96,12 @@ def parse_arguments():
9696
args.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
9797
help="Optional. Specify a target device to infer on: CPU, GPU. "
9898
"Use \"-d HETERO:<comma separated devices list>\" format to specify HETERO plugin.")
99-
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
100-
help="Required for CPU custom layers. "
101-
"Absolute path to a shared library with the kernels implementations.")
10299
args.add_argument("-nii", "--output_nifti", help="Show output inference results as raw values", default=False,
103100
action="store_true")
104101
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
105102
help="Optional. Number of threads to use for inference on CPU (including HETERO cases).")
106103
args.add_argument('-s', '--shape', nargs='*', type=int, required=False, default=None,
107104
help="Optional. Specify shape for a network")
108-
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
109-
help="Required for GPU custom kernels. "
110-
"Absolute path to an .xml file with the kernels description.")
111105
args.add_argument('-ms', '--mri_sequence', type=mri_sequence, metavar='N1,N2,N3,N4', default=(0, 1, 2, 3),
112106
help='Optional. Transfer MRI-sequence from dataset order to the network order.')
113107
args.add_argument("--full_intensities_range", required=False, default=False, action="store_true",
@@ -250,14 +244,9 @@ def main():
250244
core = Core()
251245

252246
if 'CPU' in args.target_device:
253-
if args.path_to_extension:
254-
core.add_extension(args.path_to_extension, "CPU")
255247
if args.number_threads is not None:
256-
core.set_config({'CPU_THREADS_NUM': str(args.number_threads)}, "CPU")
257-
elif 'GPU' in args.target_device:
258-
if args.path_to_cldnn_config:
259-
core.set_config({'CONFIG_FILE': args.path_to_cldnn_config}, "GPU")
260-
else:
248+
core.set_property("CPU", {'CPU_THREADS_NUM': str(args.number_threads)})
249+
elif 'GPU' not in args.target_device:
261250
raise AttributeError("Device {} do not support of 3D convolution. "
262251
"Please use CPU, GPU or HETERO:*CPU*, HETERO:*GPU*")
263252

@@ -278,6 +267,7 @@ def main():
278267
n, c, d, h, w = model.inputs[0].shape
279268

280269
compiled_model = core.compile_model(model, args.target_device)
270+
output_tensor = compiled_model.outputs[0]
281271
infer_request = compiled_model.create_infer_request()
282272
log.info('The model {} is loaded to {}'.format(args.path_to_model, args.target_device))
283273

@@ -308,8 +298,8 @@ def main():
308298
original_data = data_crop
309299
original_size = original_data.shape[-3:]
310300

311-
result = infer_request.infer({input_tensor_name: data_crop})
312-
result = next(iter(result.values()))
301+
input_data = {input_tensor_name: data_crop}
302+
result = infer_request.infer(input_data)[output_tensor]
313303
batch, channels, out_d, out_h, out_w = result.shape
314304

315305
list_img = []

demos/action_recognition_demo/python/action_recognition_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def main():
8989

9090
if 'MYRIAD' in args.device:
9191
myriad_config = {'MYRIAD_ENABLE_HW_ACCELERATION': 'YES'}
92-
core.set_config(myriad_config, 'MYRIAD')
92+
core.set_property('MYRIAD', myriad_config)
9393

9494
decoder_target_device = 'CPU'
9595
if args.device != 'CPU':

demos/action_recognition_demo/python/action_recognition_demo/models.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,13 @@ def __init__(self, model_path, core, target_device, num_requests, model_type):
9393
log.error("Demo supports only models with 1 output")
9494
sys.exit(1)
9595

96-
self.compiled_model = core.compile_model(self.model, target_device)
96+
compiled_model = core.compile_model(self.model, target_device)
97+
self.output_tensor = compiled_model.outputs[0]
9798
self.input_name = self.model.inputs[0].get_any_name()
9899
self.input_shape = self.model.inputs[0].shape
99100

100101
self.num_requests = num_requests
101-
self.infer_requests = [self.compiled_model.create_infer_request() for _ in range(self.num_requests)]
102+
self.infer_requests = [compiled_model.create_infer_request() for _ in range(self.num_requests)]
102103
log.info('The {} model {} is loaded to {}'.format(model_type, model_path, target_device))
103104

104105
def async_infer(self, frame, req_id):
@@ -107,7 +108,7 @@ def async_infer(self, frame, req_id):
107108

108109
def wait_request(self, req_id):
109110
self.infer_requests[req_id].wait()
110-
return next(iter(self.infer_requests[req_id].results.values()))
111+
return self.infer_requests[req_id].results[self.output_tensor]
111112

112113

113114
class DummyDecoder:

demos/background_subtraction_demo/cpp_gapi/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,15 +122,15 @@ int main(int argc, char *argv[]) {
122122
});
123123

124124
/** Configure network **/
125-
auto config = ConfigFactory::getUserConfig(FLAGS_d, "", "", FLAGS_nireq,
125+
auto config = ConfigFactory::getUserConfig(FLAGS_d, FLAGS_nireq,
126126
FLAGS_nstreams, FLAGS_nthreads);
127127
const auto net = cv::gapi::ie::Params<cv::gapi::Generic> {
128128
model->getName(),
129129
FLAGS_m, // path to topology IR
130130
fileNameNoExt(FLAGS_m) + ".bin", // path to weights
131131
FLAGS_d // device specifier
132132
}.cfgNumRequests(config.maxAsyncRequests)
133-
.pluginConfig(config.execNetworkConfig);
133+
.pluginConfig(config.getLegacyConfig());
134134
slog::info << "The background matting model " << FLAGS_m << " is loaded to " << FLAGS_d << " device." << slog::endl;
135135

136136
auto kernels = cv::gapi::combine(custom::kernels(),

demos/background_subtraction_demo/python/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ omz_converter --list models.lst
7878
* instance-segmentation-person-????
7979
* yolact-resnet50-fpn-pytorch
8080
* background-matting-mobilenetv2
81-
* robust-video-matting
81+
* robust-video-matting-mobilenetv3
8282

8383
> **NOTE**: Refer to the tables [Intel's Pre-Trained Models Device Support](../../../models/intel/device_support.md) and [Public Pre-Trained Models Device Support](../../../models/public/device_support.md) for the details on models inference support at different devices.
8484

demos/background_subtraction_demo/python/background_subtraction_demo.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python3
22
"""
3-
Copyright (C) 2021 Intel Corporation
3+
Copyright (C) 2021-2022 Intel Corporation
44
55
Licensed under the Apache License, Version 2.0 (the "License");
66
you may not use this file except in compliance with the License.
@@ -68,6 +68,10 @@ def build_argparser():
6868
'same shape as an input image.')
6969
args.add_argument('--blur_bgr', default=0, type=int,
7070
help='Optional. Background blur strength (by default with value 0 is not applied).')
71+
args.add_argument('--layout', type=str, default=None,
72+
help='Optional. Model inputs layouts. '
73+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
74+
'To define layout you should use only capital letters')
7175

7276
infer_args = parser.add_argument_group('Inference options')
7377
infer_args.add_argument('-nireq', '--num_infer_requests', help='Optional. Number of infer requests.',
@@ -211,7 +215,7 @@ def main():
211215
if args.adapter == 'openvino':
212216
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
213217
model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
214-
max_num_requests=args.num_infer_requests)
218+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout})
215219
elif args.adapter == 'ovms':
216220
model_adapter = OVMSAdapter(args.model)
217221

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# This file can be used with the --list option of the model downloader.
22
instance-segmentation-person-????
33
yolact-resnet50-fpn-pytorch
4-
# TODO: background-matting-mobilenetv2
5-
# TODO: robust-video-matting
4+
background-matting-mobilenetv2
5+
robust-video-matting-mobilenetv3

0 commit comments

Comments
 (0)