Skip to content

Commit 8a47e3c

Browse files
Submit only path to .xml file to read_model()
1 parent 77eabec commit 8a47e3c

File tree

13 files changed

+19
-24
lines changed

13 files changed

+19
-24
lines changed

demos/3d_segmentation_demo/python/3d_segmentation_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def main():
262262
raise AttributeError("Device {} do not support of 3D convolution. "
263263
"Please use CPU, GPU or HETERO:*CPU*, HETERO:*GPU*")
264264

265-
# --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------
265+
# --------------------- 2. Read IR Generated by ModelOptimizer (.xml file) ---------------------
266266
log.info('Reading model {}'.format(args.path_to_model))
267267
model = core.read_model(args.path_to_model)
268268

demos/colorization_demo/python/colorization_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def main(args):
6767
core = Core()
6868

6969
log.info('Reading model {}'.format(args.model))
70-
model = core.read_model(args.model, args.model.with_suffix(".bin"))
70+
model = core.read_model(args.model)
7171

7272
input_tensor_name = 'data_l'
7373
input_shape = model.input(input_tensor_name).shape

demos/face_recognition_demo/python/ie_module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(self, core, model, model_type):
2323
self.core = core
2424
self.model_type = model_type
2525
log.info('Reading {} model {}'.format(model_type, model))
26-
self.model = core.read_model(model, model.with_suffix('.bin'))
26+
self.model = core.read_model(model)
2727
self.model_path = model
2828
self.active_requests = 0
2929
self.clear()

demos/formula_recognition_demo/python/utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,10 +121,8 @@ def prerocess_crop(crop, tgt_shape, preprocess_type='crop'):
121121

122122

123123
def read_net(model_xml, ie, model_type):
124-
model_bin = os.path.splitext(model_xml)[0] + ".bin"
125-
126124
log.info('Reading {} model {}'.format(model_type, model_xml))
127-
return ie.read_model(model_xml, model_bin)
125+
return ie.read_model(model_xml)
128126

129127

130128
def change_layout(model_input):

demos/human_pose_estimation_3d_demo/python/modules/inference_engine.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,16 @@
1818

1919

2020
class InferenceEngine:
21-
def __init__(self, net_model_xml_path, device, stride):
21+
def __init__(self, model_path, device, stride):
2222
self.device = device
2323
self.stride = stride
2424

2525
log.info('OpenVINO Inference Engine')
2626
log.info('\tbuild: {}'.format(get_version()))
2727
self.core = Core()
2828

29-
log.info('Reading model {}'.format(net_model_xml_path))
30-
self.model = self.core.read_model(net_model_xml_path)
29+
log.info('Reading model {}'.format(model_path))
30+
self.model = self.core.read_model(model_path)
3131

3232
required_output_keys = {'features', 'heatmaps', 'pafs'}
3333
for output_tensor_name in required_output_keys:
@@ -40,7 +40,7 @@ def __init__(self, net_model_xml_path, device, stride):
4040
self.input_tensor_name = self.model.inputs[0].get_any_name()
4141
compiled_model = self.core.compile_model(self.model, self.device)
4242
self.infer_request = compiled_model.create_infer_request()
43-
log.info('The model {} is loaded to {}'.format(net_model_xml_path, self.device))
43+
log.info('The model {} is loaded to {}'.format(model_path, self.device))
4444

4545
def infer(self, img):
4646
img = img[0:img.shape[0] - (img.shape[0] % self.stride),

demos/image_inpainting_demo/python/inpainting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
class ImageInpainting:
1818
def __init__(self, core, model_path, device='CPU'):
19-
model = core.read_model(model_path, model_path.with_suffix('.bin'))
19+
model = core.read_model(model_path)
2020

2121
if len(model.inputs) != 2:
2222
raise RuntimeError("The model expects 2 input layers")

demos/image_retrieval_demo/python/image_retrieval_demo/image_retrieval.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ def __init__(self, model_path, device, cpu_extension):
3636
if cpu_extension and device == 'CPU':
3737
core.add_extension(cpu_extension, 'CPU')
3838

39-
path = '.'.join(model_path.split('.')[:-1])
4039
log.info('Reading model {}'.format(model_path))
41-
self.model = core.read_model(path + '.xml', path + '.bin')
40+
self.model = core.read_model(model_path)
4241
self.input_tensor_name = "Placeholder"
4342
compiled_model = core.compile_model(self.model, device)
4443
self.infer_request = compiled_model.create_infer_request()

demos/instance_segmentation_demo/python/instance_segmentation_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def main():
115115

116116
# Read IR
117117
log.info('Reading model {}'.format(args.model))
118-
model = core.read_model(args.model, args.model.with_suffix('.bin'))
118+
model = core.read_model(args.model)
119119
image_input, image_info_input, (n, c, h, w), model_type, output_names, postprocessor = check_model(model)
120120
args.no_keep_aspect_ratio = model_type == 'yolact' or args.no_keep_aspect_ratio
121121

demos/machine_translation_demo/python/machine_translation_demo.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ class Translator:
3535
tokenizer_src (str): path to src tokenizer.
3636
tokenizer_tgt (str): path to tgt tokenizer.
3737
"""
38-
def __init__(self, model_xml, model_bin, device, tokenizer_src, tokenizer_tgt, output_name):
39-
self.engine = TranslationEngine(model_xml, model_bin, device, output_name)
38+
def __init__(self, model_xml, device, tokenizer_src, tokenizer_tgt, output_name):
39+
self.engine = TranslationEngine(model_xml, device, output_name)
4040
self.max_tokens = self.engine.get_max_tokens()
4141
self.tokenizer_src = Tokenizer(tokenizer_src, self.max_tokens)
4242
log.debug('Loaded src tokenizer, max tokens: {}'.format(self.max_tokens))
@@ -66,16 +66,15 @@ class TranslationEngine:
6666
6767
Arguments:
6868
model_xml (str): path to model's .xml file.
69-
model_bin (str): path to model's .bin file.
7069
output_name (str): name of output blob of model.
7170
"""
72-
def __init__(self, model_xml, model_bin, device, output_name):
71+
def __init__(self, model_xml, device, output_name):
7372
log.info('OpenVINO Inference Engine')
7473
log.info('\tbuild: {}'.format(get_version()))
7574
core = Core()
7675

7776
log.info('Reading model {}'.format(model_xml))
78-
self.model = core.read_model(model_xml, model_bin)
77+
self.model = core.read_model(model_xml)
7978
compiled_model = core.compile_model(self.model, args.device)
8079
self.infer_request = compiled_model.create_infer_request()
8180
log.info('The model {} is loaded to {}'.format(model_xml, device))
@@ -218,7 +217,6 @@ def parse_input(input):
218217
def main(args):
219218
model = Translator(
220219
model_xml=args.model,
221-
model_bin=args.model.with_suffix(".bin"),
222220
device=args.device,
223221
tokenizer_src=args.tokenizer_src,
224222
tokenizer_tgt=args.tokenizer_tgt,

demos/place_recognition_demo/python/place_recognition_demo/place_recognition.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def __init__(self, model_path, device, cpu_extension):
3636
core.add_extension(cpu_extension, 'CPU')
3737

3838
log.info('Reading model {}'.format(model_path))
39-
self.model = core.read_model(model_path, model_path.with_suffix('.bin'))
39+
self.model = core.read_model(model_path)
4040
self.input_tensor_name = self.model.inputs[0].get_any_name()
4141
self.input_size = self.model.input(self.input_tensor_name).shape
4242
self.nchw_layout = self.input_size[1] == 3

0 commit comments

Comments
 (0)