Skip to content

Commit 87f0fe2

Browse files
use model_path name everywhere
1 parent 8a47e3c commit 87f0fe2

File tree

10 files changed

+37
-39
lines changed

10 files changed

+37
-39
lines changed

demos/face_recognition_demo/python/ie_module.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@
1919

2020

2121
class Module:
22-
def __init__(self, core, model, model_type):
22+
def __init__(self, core, model_path, model_type):
2323
self.core = core
2424
self.model_type = model_type
25-
log.info('Reading {} model {}'.format(model_type, model))
26-
self.model = core.read_model(model)
27-
self.model_path = model
25+
log.info('Reading {} model {}'.format(model_type, model_path))
26+
self.model = core.read_model(model_path)
27+
self.model_path = model_path
2828
self.active_requests = 0
2929
self.clear()
3030

demos/formula_recognition_demo/python/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,9 @@ def prerocess_crop(crop, tgt_shape, preprocess_type='crop'):
120120
return preprocess_image(PREPROCESSING[preprocess_type], bin_crop, tgt_shape)
121121

122122

123-
def read_net(model_xml, ie, model_type):
124-
log.info('Reading {} model {}'.format(model_type, model_xml))
125-
return ie.read_model(model_xml)
123+
def read_net(model_path, ie, model_type):
124+
log.info('Reading {} model {}'.format(model_type, model_path))
125+
return ie.read_model(model_path)
126126

127127

128128
def change_layout(model_input):

demos/gpt2_text_prediction_demo/python/gpt2_text_prediction_demo.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,8 @@ def main():
7979
ie = Core()
8080

8181
# read IR
82-
model_path = args.model
8382
log.info('Reading model {}'.format(args.model))
84-
model = ie.read_model(model_path)
83+
model = ie.read_model(args.model)
8584

8685
# check number inputs and outputs
8786
if len(model.inputs) != 1:

demos/image_translation_demo/python/image_translation_demo/models.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
"""
1313

1414
class CocosnetModel:
15-
def __init__(self, core, model_xml, device='CPU'):
16-
model = core.read_model(model_xml)
15+
def __init__(self, core, model_path, device='CPU'):
16+
model = core.read_model(model_path)
1717
if len(model.inputs) != 3:
1818
raise RuntimeError("The CocosnetModel expects 3 input layers")
1919
if len(model.outputs) != 1:
@@ -38,8 +38,8 @@ def infer(self, input_semantics, reference_image, reference_semantics):
3838

3939

4040
class SegmentationModel:
41-
def __init__(self, core, model_xml, device='CPU'):
42-
model = core.read_model(model_xml)
41+
def __init__(self, core, model_path, device='CPU'):
42+
model = core.read_model(model_path)
4343
if len(model.inputs) != 1:
4444
raise RuntimeError("The SegmentationModel expects 1 input layer")
4545
if len(model.outputs) != 1:

demos/machine_translation_demo/python/machine_translation_demo.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,12 @@ class Translator:
3030
""" Language translation.
3131
3232
Arguments:
33-
model_xml (str): path to model's .xml file.
34-
model_bin (str): path to model's .bin file.
33+
model_path (str): path to model's .xml file.
3534
tokenizer_src (str): path to src tokenizer.
3635
tokenizer_tgt (str): path to tgt tokenizer.
3736
"""
38-
def __init__(self, model_xml, device, tokenizer_src, tokenizer_tgt, output_name):
39-
self.engine = TranslationEngine(model_xml, device, output_name)
37+
def __init__(self, model_path, device, tokenizer_src, tokenizer_tgt, output_name):
38+
self.engine = TranslationEngine(model_path, device, output_name)
4039
self.max_tokens = self.engine.get_max_tokens()
4140
self.tokenizer_src = Tokenizer(tokenizer_src, self.max_tokens)
4241
log.debug('Loaded src tokenizer, max tokens: {}'.format(self.max_tokens))
@@ -65,19 +64,19 @@ class TranslationEngine:
6564
""" OpenVINO engine for machine translation.
6665
6766
Arguments:
68-
model_xml (str): path to model's .xml file.
67+
model_path (str): path to model's .xml file.
6968
output_name (str): name of output blob of model.
7069
"""
71-
def __init__(self, model_xml, device, output_name):
70+
def __init__(self, model_path, device, output_name):
7271
log.info('OpenVINO Inference Engine')
7372
log.info('\tbuild: {}'.format(get_version()))
7473
core = Core()
7574

76-
log.info('Reading model {}'.format(model_xml))
77-
self.model = core.read_model(model_xml)
75+
log.info('Reading model {}'.format(model_path))
76+
self.model = core.read_model(model_path)
7877
compiled_model = core.compile_model(self.model, args.device)
7978
self.infer_request = compiled_model.create_infer_request()
80-
log.info('The model {} is loaded to {}'.format(model_xml, device))
79+
log.info('The model {} is loaded to {}'.format(model_path, device))
8180
self.input_tensor_name = "tokens"
8281
self.output_tensor_name = output_name
8382
self.model.output(self.output_tensor_name) # ensure a tensor with the name exists
@@ -216,7 +215,7 @@ def parse_input(input):
216215

217216
def main(args):
218217
model = Translator(
219-
model_xml=args.model,
218+
model_path=args.model,
220219
device=args.device,
221220
tokenizer_src=args.tokenizer_src,
222221
tokenizer_tgt=args.tokenizer_tgt,

demos/multi_camera_multi_target_tracking_demo/python/utils/ie_tools.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,14 @@ def get_input_shape(self):
5757
"""Returns an input shape of the wrapped IE model"""
5858
return self.model.inputs[0].shape
5959

60-
def load_model(self, core, model_xml, device, model_type, num_reqs=1, cpu_extension=''):
60+
def load_model(self, core, model_path, device, model_type, num_reqs=1, cpu_extension=''):
6161
"""Loads a model in the Inference Engine format"""
6262
# Plugin initialization for specified device and load extensions library if specified
6363
if cpu_extension and 'CPU' in device:
6464
core.add_extension(cpu_extension, 'CPU')
6565
# Read IR
66-
log.info('Reading {} model {}'.format(model_type, model_xml))
67-
self.model = core.read_model(model_xml)
66+
log.info('Reading {} model {}'.format(model_type, model_path))
67+
self.model = core.read_model(model_path)
6868

6969
if len(self.model.inputs) not in self.get_allowed_inputs_len():
7070
raise RuntimeError("Supports topologies with only {} inputs, but got {}"
@@ -79,4 +79,4 @@ def load_model(self, core, model_xml, device, model_type, num_reqs=1, cpu_extens
7979
compiled_model = core.compile_model(self.model, device)
8080
self.infer_queue = AsyncInferQueue(compiled_model, num_reqs)
8181
self.infer_queue.set_callback(self.completion_callback)
82-
log.info('The {} model {} is loaded to {}'.format(model_type, model_xml, device))
82+
log.info('The {} model {} is loaded to {}'.format(model_type, model_path, device))

demos/single_human_pose_estimation_demo/python/detector.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@
22

33

44
class Detector:
5-
def __init__(self, core, path_to_model_xml, label_class, thr=0.3, device='CPU'):
5+
def __init__(self, core, model_path, label_class, thr=0.3, device='CPU'):
66
self.thr = thr
77
self.label_class = label_class
88

9-
self.model = core.read_model(path_to_model_xml)
9+
self.model = core.read_model(model_path)
1010
if len(self.model.inputs) != 1:
1111
raise RuntimeError("Detector supports only models with 1 input layer")
1212
if len(self.model.outputs) != 1:

demos/single_human_pose_estimation_demo/python/estimator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,8 @@ def get_trasformation_matrix(center, scale, output_size):
8282

8383

8484
class HumanPoseEstimator:
85-
def __init__(self, core, path_to_model_xml, device='CPU'):
86-
self.model = core.read_model(path_to_model_xml)
85+
def __init__(self, core, model_path, device='CPU'):
86+
self.model = core.read_model(model_path)
8787
if len(self.model.inputs) != 1:
8888
raise RuntimeError("HumanPoseEstimator supports only models with 1 input layer")
8989
if len(self.model.outputs) != 1:

demos/time_series_forecasting_demo/python/time_series_forecasting_demo.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,20 +30,20 @@ class ForecastingEngine:
3030
""" OpenVINO engine for Time Series Forecasting.
3131
3232
Arguments:
33-
model_xml (str): path to model's .xml file.
33+
model_path (str): path to model's .xml file.
3434
input_name (str): name of input blob of model.
3535
output_name (str): name of output blob of model.
3636
"""
37-
def __init__(self, model_xml, input_name, output_name, quantiles):
37+
def __init__(self, model_path, input_name, output_name, quantiles):
3838
device = "CPU"
3939
log.info('OpenVINO Inference Engine')
4040
log.info('\tbuild: {}'.format(get_version()))
4141
core = Core()
42-
log.info('Reading model {}'.format(model_xml))
43-
model = core.read_model(model_xml)
42+
log.info('Reading model {}'.format(model_path))
43+
model = core.read_model(model_path)
4444
compiled_model = core.compile_model(model, device)
4545
self.infer_request = compiled_model.create_infer_request()
46-
log.info('The model {} is loaded to {}'.format(model_xml, device))
46+
log.info('The model {} is loaded to {}'.format(model_path, device))
4747
self.input_tensor_name = input_name
4848
self.output_tensor_name = output_name
4949
self.quantiles = quantiles
@@ -159,7 +159,7 @@ def build_argparser():
159159
def main(args):
160160
quantiles = args.quantiles.split(",")
161161
model = ForecastingEngine(
162-
model_xml=args.model,
162+
model_path=args.model,
163163
input_name=args.input_name,
164164
output_name=args.output_name,
165165
quantiles=quantiles

demos/whiteboard_inpainting_demo/python/utils/ie_tools.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,13 @@ def get_allowed_inputs_len(self):
5353
def get_allowed_outputs_len(self):
5454
return (1, )
5555

56-
def load_model(self, core, model_xml, device, cpu_extension=''):
56+
def load_model(self, core, model_path, device, cpu_extension=''):
5757
"""Loads a model in the Inference Engine format"""
5858
# Plugin initialization for specified device and load extensions library if specified
5959
if cpu_extension and 'CPU' in device:
6060
core.add_extension(cpu_extension, 'CPU')
6161
# Read IR
62-
self.model = core.read_model(model_xml)
62+
self.model = core.read_model(model_path)
6363

6464
if len(self.model.inputs) not in self.get_allowed_inputs_len():
6565
raise RuntimeError("Supports topologies with only {} inputs, but got {}"

0 commit comments

Comments
 (0)