Skip to content

Commit c634395

Browse files
Merge branch 'master' into python_demos/remove_extensions
2 parents 04f63f7 + 89f856a commit c634395

File tree

47 files changed

+405
-213
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+405
-213
lines changed

ci/dependencies.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
opencv_linux: '20220210_0636-4.5.5_043'
22
opencv_windows: '20220210_0636-4.5.5_043'
3-
openvino_linux: '2022.1.0.577'
4-
openvino_windows: '2022.1.0.577'
5-
wheel_linux: '2022.1.0.dev20220209-6562'
6-
wheel_windows: '2022.1.0.dev20220209-6562'
3+
openvino_linux: '2022.1.0.582'
4+
openvino_windows: '2022.1.0.582'
5+
wheel_linux: '2022.1.0.dev20220211-6610'
6+
wheel_windows: '2022.1.0.dev20220211-6610'

demos/3d_segmentation_demo/python/3d_segmentation_demo.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ def main():
267267
n, c, d, h, w = model.inputs[0].shape
268268

269269
compiled_model = core.compile_model(model, args.target_device)
270+
output_tensor = compiled_model.outputs[0]
270271
infer_request = compiled_model.create_infer_request()
271272
log.info('The model {} is loaded to {}'.format(args.path_to_model, args.target_device))
272273

@@ -297,8 +298,8 @@ def main():
297298
original_data = data_crop
298299
original_size = original_data.shape[-3:]
299300

300-
result = infer_request.infer({input_tensor_name: data_crop})
301-
result = next(iter(result.values()))
301+
input_data = {input_tensor_name: data_crop}
302+
result = infer_request.infer(input_data)[output_tensor]
302303
batch, channels, out_d, out_h, out_w = result.shape
303304

304305
list_img = []

demos/action_recognition_demo/python/action_recognition_demo/models.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,13 @@ def __init__(self, model_path, core, target_device, num_requests, model_type):
9393
log.error("Demo supports only models with 1 output")
9494
sys.exit(1)
9595

96-
self.compiled_model = core.compile_model(self.model, target_device)
96+
compiled_model = core.compile_model(self.model, target_device)
97+
self.output_tensor = compiled_model.outputs[0]
9798
self.input_name = self.model.inputs[0].get_any_name()
9899
self.input_shape = self.model.inputs[0].shape
99100

100101
self.num_requests = num_requests
101-
self.infer_requests = [self.compiled_model.create_infer_request() for _ in range(self.num_requests)]
102+
self.infer_requests = [compiled_model.create_infer_request() for _ in range(self.num_requests)]
102103
log.info('The {} model {} is loaded to {}'.format(model_type, model_path, target_device))
103104

104105
def async_infer(self, frame, req_id):
@@ -107,7 +108,7 @@ def async_infer(self, frame, req_id):
107108

108109
def wait_request(self, req_id):
109110
self.infer_requests[req_id].wait()
110-
return next(iter(self.infer_requests[req_id].results.values()))
111+
return self.infer_requests[req_id].results[self.output_tensor]
111112

112113

113114
class DummyDecoder:

demos/background_subtraction_demo/python/background_subtraction_demo.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python3
22
"""
3-
Copyright (C) 2021 Intel Corporation
3+
Copyright (C) 2021-2022 Intel Corporation
44
55
Licensed under the Apache License, Version 2.0 (the "License");
66
you may not use this file except in compliance with the License.
@@ -68,6 +68,10 @@ def build_argparser():
6868
'same shape as an input image.')
6969
args.add_argument('--blur_bgr', default=0, type=int,
7070
help='Optional. Background blur strength (by default with value 0 is not applied).')
71+
args.add_argument('--layout', type=str, default=None,
72+
help='Optional. Model inputs layouts. '
73+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
74+
'To define layout you should use only capital letters')
7175

7276
infer_args = parser.add_argument_group('Inference options')
7377
infer_args.add_argument('-nireq', '--num_infer_requests', help='Optional. Number of infer requests.',
@@ -211,7 +215,7 @@ def main():
211215
if args.adapter == 'openvino':
212216
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
213217
model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
214-
max_num_requests=args.num_infer_requests)
218+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout})
215219
elif args.adapter == 'ovms':
216220
model_adapter = OVMSAdapter(args.model)
217221

demos/bert_named_entity_recognition_demo/python/bert_named_entity_recognition_demo.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python3
22

33
"""
4-
Copyright (c) 2021 Intel Corporation
4+
Copyright (c) 2021-2022 Intel Corporation
55
66
Licensed under the Apache License, Version 2.0 (the "License");
77
you may not use this file except in compliance with the License.
@@ -56,6 +56,11 @@ def build_argparser():
5656
help="Optional. Inputs names for the network. "
5757
"Default values are \"input_ids,attention_mask,token_type_ids\" ",
5858
required=False, type=str, default="input_ids,attention_mask,token_type_ids")
59+
args.add_argument('--layout',
60+
help='Optional. Model inputs layouts. '
61+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
62+
'To define layout you should use only capital letters',
63+
type=str, default=None)
5964
args.add_argument("-d", "--device",
6065
help="Optional. Target device to perform inference on."
6166
"Default value is CPU", default="CPU", type=str)
@@ -108,7 +113,7 @@ def main():
108113
if args.adapter == 'openvino':
109114
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
110115
model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
111-
max_num_requests=args.num_infer_requests)
116+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout})
112117
elif args.adapter == 'ovms':
113118
model_adapter = OVMSAdapter(args.model)
114119

demos/bert_question_answering_demo/python/bert_question_answering_demo.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python3
22

33
"""
4-
Copyright (c) 2020-2021 Intel Corporation
4+
Copyright (c) 2020-2022 Intel Corporation
55
66
Licensed under the Apache License, Version 2.0 (the "License");
77
you may not use this file except in compliance with the License.
@@ -55,6 +55,10 @@ def build_argparser():
5555
help="Optional. Inputs names for the network. "
5656
"Default values are \"input_ids,attention_mask,token_type_ids\" ",
5757
required=False, type=str, default="input_ids,attention_mask,token_type_ids")
58+
args.add_argument('--layout', type=str, default=None,
59+
help='Optional. Model inputs layouts. '
60+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
61+
'To define layout you should use only capital letters')
5862
args.add_argument("--output_names",
5963
help="Optional. Outputs names for the network. "
6064
"Default values are \"output_s,output_e\" ",
@@ -166,7 +170,7 @@ def main():
166170
if args.adapter == 'openvino':
167171
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
168172
model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
169-
max_num_requests=args.num_infer_requests)
173+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout})
170174
elif args.adapter == 'ovms':
171175
model_adapter = OVMSAdapter(args.model)
172176

@@ -227,7 +231,7 @@ def questions():
227231
if pipeline.is_ready():
228232
if source.is_over():
229233
break
230-
pipeline.submit_data(source.get_data(), next_window_id, None)
234+
pipeline.submit_data(source.get_data(), next_window_id)
231235
next_window_id += 1
232236
else:
233237
pipeline.await_any()

demos/bert_question_answering_embedding_demo/python/bert_question_answering_embedding_demo.py

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python3
22

33
"""
4-
Copyright (c) 2020-2021 Intel Corporation
4+
Copyright (c) 2020-2022 Intel Corporation
55
66
Licensed under the Apache License, Version 2.0 (the "License");
77
you may not use this file except in compliance with the License.
@@ -60,6 +60,10 @@ def build_argparser():
6060
"For example 'input_ids,attention_mask,token_type_ids','position_ids'",
6161
default='input_ids,attention_mask,token_type_ids,position_ids',
6262
required=False, type=str)
63+
args.add_argument('--layout_emb', type=str, default=None,
64+
help='Optional. MODEL_EMB inputs layouts. '
65+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
66+
'To define layout you should use only capital letters')
6367
args.add_argument("-m_qa", "--model_qa",
6468
help="Optional. Path to an .xml file with a trained model to give exact answer",
6569
default = None,
@@ -75,6 +79,10 @@ def build_argparser():
7579
required=False, type=str)
7680
args.add_argument("--model_qa_squad_ver", help="Optional. SQUAD version used for QuestionAnswering model fine tuning",
7781
default="1.2", required=False, type=str)
82+
args.add_argument('--layout_qa', type=str, default=None,
83+
help='Optional. MODEL_QA inputs layouts. '
84+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
85+
'To define layout you should use only capital letters')
7886
args.add_argument("-a", "--max_answer_token_num",
7987
help="Optional. Maximum number of tokens in exact answer",
8088
default=15,
@@ -167,10 +175,10 @@ def main():
167175
visualizer = Visualizer(args.colors)
168176
total_latency = (perf_counter() - vocab_start_time) * 1e3
169177

170-
ie = create_core()
178+
core = create_core()
171179
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
172-
model_emb_adapter = OpenvinoAdapter(ie, args.model_emb, device=args.device, plugin_config=plugin_config,
173-
max_num_requests=args.num_infer_requests)
180+
model_emb_adapter = OpenvinoAdapter(core, args.model_emb, device=args.device, plugin_config=plugin_config,
181+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout_emb})
174182
model_emb = BertEmbedding(model_emb_adapter, {'vocab': vocab, 'input_names': args.input_names_emb})
175183
model_emb.log_layers_info()
176184

@@ -181,13 +189,13 @@ def main():
181189
for new_length in [max_len_question, max_len_context]:
182190
model_emb.reshape(new_length)
183191
if new_length == max_len_question:
184-
emb_exec_net = ie.load_network(model_emb_adapter.net, args.device)
192+
emb_request = core.compile_model(model_emb_adapter.model, args.device).create_infer_request()
185193
else:
186194
emb_pipeline = AsyncPipeline(model_emb)
187195

188196
if args.model_qa:
189-
model_qa_adapter = OpenvinoAdapter(ie, args.model_qa, device=args.device, plugin_config=plugin_config,
190-
max_num_requests=args.num_infer_requests)
197+
model_qa_adapter = OpenvinoAdapter(core, args.model_qa, device=args.device, plugin_config=plugin_config,
198+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout_qa})
191199
config = {
192200
'vocab': vocab,
193201
'input_names': args.input_names_qa,
@@ -216,7 +224,8 @@ def main():
216224
def calc_question_embedding(tokens_id):
217225
num = min(max_len_question - 2, len(tokens_id))
218226
inputs, _ = model_emb.preprocess((tokens_id[:num], max_len_question))
219-
raw_result = emb_exec_net.infer(inputs)
227+
emb_request.infer(inputs)
228+
raw_result = model_emb_adapter.get_raw_result(emb_request)
220229
return model_emb.postprocess(raw_result, None)
221230

222231
source = ContextSource(paragraphs, vocab, c_window_len)

demos/classification_demo/python/classification_demo.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python3
22
"""
3-
Copyright (C) 2018-2021 Intel Corporation
3+
Copyright (C) 2018-2022 Intel Corporation
44
55
Licensed under the Apache License, Version 2.0 (the "License");
66
you may not use this file except in compliance with the License.
@@ -58,6 +58,10 @@ def build_argparser():
5858
common_model_args.add_argument('--labels', help='Optional. Labels mapping file.', default=None, type=str)
5959
common_model_args.add_argument('-topk', help='Optional. Number of top results. Default value is 5. Must be from 1 to 10.', default=5,
6060
type=int, choices=range(1, 11))
61+
common_model_args.add_argument('--layout', type=str, default=None,
62+
help='Optional. Model inputs layouts. '
63+
'Format "[<layout>]" or "<input1>[<layout1>],<input2>[<layout2>]" in case of more than one input.'
64+
'To define layout you should use only capital letters')
6165

6266
infer_args = parser.add_argument_group('Inference options')
6367
infer_args.add_argument('-nireq', '--num_infer_requests', help='Optional. Number of infer requests',
@@ -106,11 +110,11 @@ def build_argparser():
106110

107111
def draw_labels(frame, classifications, output_transform):
108112
frame = output_transform.resize(frame)
109-
сlass_label = ""
113+
class_label = ""
110114
if classifications:
111-
сlass_label = classifications[0][1]
115+
class_label = classifications[0][1]
112116
font_scale = 0.7
113-
label_height = cv2.getTextSize(сlass_label, cv2.FONT_HERSHEY_COMPLEX, font_scale, 2)[0][1]
117+
label_height = cv2.getTextSize(class_label, cv2.FONT_HERSHEY_COMPLEX, font_scale, 2)[0][1]
114118
initial_labels_pos = frame.shape[0] - label_height * (int(1.5 * len(classifications)) + 1)
115119

116120
if (initial_labels_pos < 0):
@@ -123,8 +127,8 @@ def draw_labels(frame, classifications, output_transform):
123127
put_highlighted_text(frame, header, (frame.shape[1] - label_width, offset_y),
124128
cv2.FONT_HERSHEY_COMPLEX, font_scale, (255, 0, 0), 2)
125129

126-
for idx, сlass_label, score in classifications:
127-
label = '{}. {} {:.2f}'.format(idx, сlass_label, score)
130+
for idx, class_label, score in classifications:
131+
label = '{}. {} {:.2f}'.format(idx, class_label, score)
128132
label_width = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX, font_scale, 2)[0][0]
129133
offset_y += int(label_height * 1.5)
130134
put_highlighted_text(frame, label, (frame.shape[1] - label_width, offset_y),
@@ -160,7 +164,7 @@ def main():
160164
if args.adapter == 'openvino':
161165
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
162166
model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
163-
max_num_requests=args.num_infer_requests)
167+
max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout})
164168
elif args.adapter == 'ovms':
165169
model_adapter = OVMSAdapter(args.model)
166170

demos/colorization_demo/python/colorization_demo.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ def main(args):
8080
assert len(model.outputs) == 1, "Expected number of outputs is equal 1"
8181

8282
compiled_model = core.compile_model(model, device_name=args.device)
83+
output_tensor = compiled_model.outputs[0]
8384
infer_request = compiled_model.create_infer_request()
8485
log.info('The model {} is loaded to {}'.format(args.model, args.device))
8586

@@ -115,7 +116,7 @@ def main(args):
115116

116117
inputs[input_tensor_name] = np.expand_dims(img_l_rs, axis=[0, 1])
117118

118-
res = next(iter(infer_request.infer(inputs).values()))
119+
res = infer_request.infer(inputs)[output_tensor]
119120

120121
update_res = np.squeeze(res)
121122

demos/common/cpp/utils/include/utils/common.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ void logCompiledModelInfo(
296296
for (const auto& device : devices) {
297297
try {
298298
slog::info << "\tDevice: " << device << slog::endl;
299-
int32_t nstreams = compiledModel.get_property(ov::streams::num);
299+
int32_t nstreams = compiledModel.get_property(ov::num_streams);
300300
slog::info << "\t\tNumber of streams: " << nstreams << slog::endl;
301301
if (device == "CPU") {
302302
int32_t nthreads = compiledModel.get_property(ov::inference_num_threads);

0 commit comments

Comments
 (0)