Skip to content

Commit bb680b0

Browse files
authored
Merge pull request #3210 from anzhella-pankratova/python_demos/use_output_tensor_as_key
Python demos: modify output result accessing in case of single output layer
2 parents 515ab7a + a184ae9 commit bb680b0

File tree

16 files changed

+46
-33
lines changed

16 files changed

+46
-33
lines changed

demos/3d_segmentation_demo/python/3d_segmentation_demo.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,7 @@ def main():
278278
n, c, d, h, w = model.inputs[0].shape
279279

280280
compiled_model = core.compile_model(model, args.target_device)
281+
output_tensor = compiled_model.outputs[0]
281282
infer_request = compiled_model.create_infer_request()
282283
log.info('The model {} is loaded to {}'.format(args.path_to_model, args.target_device))
283284

@@ -308,8 +309,8 @@ def main():
308309
original_data = data_crop
309310
original_size = original_data.shape[-3:]
310311

311-
result = infer_request.infer({input_tensor_name: data_crop})
312-
result = next(iter(result.values()))
312+
input_data = {input_tensor_name: data_crop}
313+
result = infer_request.infer(input_data)[output_tensor]
313314
batch, channels, out_d, out_h, out_w = result.shape
314315

315316
list_img = []

demos/action_recognition_demo/python/action_recognition_demo/models.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,13 @@ def __init__(self, model_path, core, target_device, num_requests, model_type):
9393
log.error("Demo supports only models with 1 output")
9494
sys.exit(1)
9595

96-
self.compiled_model = core.compile_model(self.model, target_device)
96+
compiled_model = core.compile_model(self.model, target_device)
97+
self.output_tensor = compiled_model.outputs[0]
9798
self.input_name = self.model.inputs[0].get_any_name()
9899
self.input_shape = self.model.inputs[0].shape
99100

100101
self.num_requests = num_requests
101-
self.infer_requests = [self.compiled_model.create_infer_request() for _ in range(self.num_requests)]
102+
self.infer_requests = [compiled_model.create_infer_request() for _ in range(self.num_requests)]
102103
log.info('The {} model {} is loaded to {}'.format(model_type, model_path, target_device))
103104

104105
def async_infer(self, frame, req_id):
@@ -107,7 +108,7 @@ def async_infer(self, frame, req_id):
107108

108109
def wait_request(self, req_id):
109110
self.infer_requests[req_id].wait()
110-
return next(iter(self.infer_requests[req_id].results.values()))
111+
return self.infer_requests[req_id].results[self.output_tensor]
111112

112113

113114
class DummyDecoder:

demos/colorization_demo/python/colorization_demo.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ def main(args):
8080
assert len(model.outputs) == 1, "Expected number of outputs is equal 1"
8181

8282
compiled_model = core.compile_model(model, device_name=args.device)
83+
output_tensor = compiled_model.outputs[0]
8384
infer_request = compiled_model.create_infer_request()
8485
log.info('The model {} is loaded to {}'.format(args.model, args.device))
8586

@@ -115,7 +116,7 @@ def main(args):
115116

116117
inputs[input_tensor_name] = np.expand_dims(img_l_rs, axis=[0, 1])
117118

118-
res = next(iter(infer_request.infer(inputs).values()))
119+
res = infer_request.infer(inputs)[output_tensor]
119120

120121
update_res = np.squeeze(res)
121122

demos/face_recognition_demo/python/ie_module.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,13 @@ def __init__(self, core, model_path, model_type):
3131
def deploy(self, device, plugin_config, max_requests=1):
3232
self.max_requests = max_requests
3333
compiled_model = self.core.compile_model(self.model, device, config=plugin_config)
34+
self.output_tensor = compiled_model.outputs[0]
3435
self.infer_queue = AsyncInferQueue(compiled_model, self.max_requests)
3536
self.infer_queue.set_callback(self.completion_callback)
3637
log.info('The {} model {} is loaded to {}'.format(self.model_type, self.model_path, device))
3738

3839
def completion_callback(self, infer_request, id):
39-
self.outputs[id] = next(iter(infer_request.results.values()))
40+
self.outputs[id] = infer_request.results[self.output_tensor]
4041

4142
def enqueue(self, input):
4243
self.clear()

demos/gpt2_text_prediction_demo/python/gpt2_text_prediction_demo.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ def main():
9999

100100
# load model to the device
101101
compiled_model = ie.compile_model(model, args.device)
102+
output_tensor = compiled_model.outputs[0]
102103
infer_request = compiled_model.create_infer_request()
103104
log.info('The model {} is loaded to {}'.format(args.model, args.device))
104105

@@ -148,13 +149,12 @@ def prompts():
148149

149150
# infer by IE
150151
t_start = time.perf_counter()
151-
res = infer_request.infer(inputs)
152+
outputs = infer_request.infer(inputs)[output_tensor]
152153
t_end = time.perf_counter()
153154
t_count += 1
154155
log.info("Sequence of length {} is processed with {:0.2f} requests/sec ({:0.2} sec per request)".format(
155156
model_input.shape[1], 1 / (t_end - t_start), t_end - t_start))
156157

157-
outputs = next(iter(res.values()))
158158
next_token_logits = outputs[:, cur_input_len-1, :]
159159

160160
# pre-process distribution

demos/image_inpainting_demo/python/inpainting.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ def __init__(self, core, model_path, device='CPU'):
2626
self.image_input_layer, self.mask_input_layer = sorted([node.get_any_name() for node in model.inputs])
2727

2828
compiled_model = core.compile_model(model, device)
29+
self.output_tensor = compiled_model.outputs[0]
2930
self.infer_request = compiled_model.create_infer_request()
3031

3132
self.nchw_layout = model.input(self.image_input_layer).shape[1] == 3
@@ -44,11 +45,9 @@ def __init__(self, core, model_path, device='CPU'):
4445
self.input_height = input_height
4546
self.input_width = input_width
4647

47-
4848
def infer(self, image, mask):
49-
output = self.infer_request.infer(inputs={self.image_input_layer: image, self.mask_input_layer: mask})
50-
return next(iter(output.values()))
51-
49+
input_data = {self.image_input_layer: image, self.mask_input_layer: mask}
50+
return self.infer_request.infer(input_data)[self.output_tensor]
5251

5352
def process(self, image, mask):
5453
if self.nchw_layout:

demos/image_retrieval_demo/python/image_retrieval_demo/image_retrieval.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,16 +40,16 @@ def __init__(self, model_path, device, cpu_extension):
4040
self.model = core.read_model(model_path)
4141
self.input_tensor_name = "Placeholder"
4242
compiled_model = core.compile_model(self.model, device)
43+
self.output_tensor = compiled_model.outputs[0]
4344
self.infer_request = compiled_model.create_infer_request()
4445
log.info('The model {} is loaded to {}'.format(model_path, device))
4546

4647
def predict(self, image):
4748
''' Takes input image and returns L2-normalized embedding vector. '''
4849

49-
assert len(image.shape) == 4
5050
image = np.transpose(image, (0, 3, 1, 2))
51-
out = next(iter(self.infer_request.infer({self.input_tensor_name: image}).values()))
52-
return out
51+
input_data = {self.input_tensor_name: image}
52+
return self.infer_request.infer(input_data)[self.output_tensor]
5353

5454

5555
class ImageRetrieval:

demos/image_translation_demo/python/image_translation_demo/models.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ def __init__(self, core, model_path, device='CPU'):
2525
self.input_image_size = model.input(self.reference_image).shape
2626

2727
compiled_model = core.compile_model(model, device)
28+
self.output_tensor = compiled_model.outputs[0]
2829
self.infer_request = compiled_model.create_infer_request()
2930

3031
def infer(self, input_semantics, reference_image, reference_semantics):
@@ -33,8 +34,7 @@ def infer(self, input_semantics, reference_image, reference_semantics):
3334
self.reference_image: reference_image,
3435
self.reference_semantics: reference_semantics
3536
}
36-
result = self.infer_request.infer(input_data)
37-
return next(iter(result.values()))
37+
return self.infer_request.infer(input_data)[self.output_tensor]
3838

3939

4040
class SegmentationModel:
@@ -49,9 +49,9 @@ def __init__(self, core, model_path, device='CPU'):
4949
self.input_size = model.inputs[0].shape
5050

5151
compiled_model = core.compile_model(model, device)
52+
self.output_tensor = compiled_model.outputs[0]
5253
self.infer_request = compiled_model.create_infer_request()
5354

5455
def infer(self, input):
5556
input_data = {self.input_tensor_name: input}
56-
result = self.infer_request.infer(input_data)
57-
return next(iter(result.values()))
57+
return self.infer_request.infer(input_data)[self.output_tensor]

demos/place_recognition_demo/python/place_recognition_demo/place_recognition.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,17 +41,17 @@ def __init__(self, model_path, device, cpu_extension):
4141
self.input_size = self.model.input(self.input_tensor_name).shape
4242
self.nchw_layout = self.input_size[1] == 3
4343
compiled_model = core.compile_model(self.model, device)
44+
self.output_tensor = compiled_model.outputs[0]
4445
self.infer_request = compiled_model.create_infer_request()
4546
log.info('The model {} is loaded to {}'.format(model_path, device))
4647

4748
def predict(self, image):
4849
''' Takes input image and returns L2-normalized embedding vector. '''
4950

50-
assert len(image.shape) == 4
5151
if self.nchw_layout:
5252
image = np.transpose(image, (0, 3, 1, 2))
53-
out = next(iter(self.infer_request.infer({self.input_tensor_name: image}).values()))
54-
return out
53+
input_data = {self.input_tensor_name: image}
54+
return self.infer_request.infer(input_data)[self.output_tensor]
5555

5656

5757
class PlaceRecognition:

demos/single_human_pose_estimation_demo/python/detector.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ def __init__(self, core, model_path, label_class, thr=0.3, device='CPU'):
2121
raise RuntimeError("Expected model output shape with {} outputs".format(OUTPUT_SIZE))
2222

2323
compiled_model = core.compile_model(self.model, device)
24+
self.output_tensor = compiled_model.outputs[0]
2425
self.infer_request = compiled_model.create_infer_request()
2526
self.input_tensor_name = self.model.inputs[0].get_any_name()
2627
if self.nchw_layout:
@@ -38,7 +39,8 @@ def _preprocess(self, img):
3839
return img[None, ]
3940

4041
def _infer(self, prep_img):
41-
output = next(iter(self.infer_request.infer({self.input_tensor_name: prep_img}).values()))
42+
input_data = {self.input_tensor_name: prep_img}
43+
output = self.infer_request.infer(input_data)[self.output_tensor]
4244
return output[0][0]
4345

4446
def _postprocess(self, bboxes):

0 commit comments

Comments
 (0)