Skip to content

Commit 399b70c

Browse files
Remove comments in 3d_segmentation_demo.py
1 parent aea2d2c commit 399b70c

File tree

1 file changed

+0
-7
lines changed

1 file changed

+0
-7
lines changed

demos/3d_segmentation_demo/python/3d_segmentation_demo.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,6 @@ def read_image(test_data_path, data_name, sizes=(128, 128, 128), is_series=True,
245245
def main():
246246
args = parse_arguments()
247247

248-
# --------------------------------- 1. Load Plugin for inference engine ---------------------------------
249248
log.info('OpenVINO Inference Engine')
250249
log.info('\tbuild: {}'.format(get_version()))
251250
core = Core()
@@ -262,7 +261,6 @@ def main():
262261
raise AttributeError("Device {} do not support of 3D convolution. "
263262
"Please use CPU, GPU or HETERO:*CPU*, HETERO:*GPU*")
264263

265-
# --------------------- 2. Read IR Generated by ModelOptimizer (.xml file) ---------------------
266264
log.info('Reading model {}'.format(args.path_to_model))
267265
model = core.read_model(args.path_to_model)
268266

@@ -279,12 +277,10 @@ def main():
279277

280278
n, c, d, h, w = model.inputs[0].shape
281279

282-
# ------------------------------------ 3. Loading model to the plugin -------------------------------------
283280
compiled_model = core.compile_model(model, args.target_device)
284281
infer_request = compiled_model.create_infer_request()
285282
log.info('The model {} is loaded to {}'.format(args.path_to_model, args.target_device))
286283

287-
# --------------------------------------- 4. Preparing input data -----------------------------------------
288284
start_time = perf_counter()
289285
if not os.path.exists(args.path_to_input_data):
290286
raise AttributeError("Path to input data: '{}' does not exist".format(args.path_to_input_data))
@@ -312,9 +308,7 @@ def main():
312308
original_data = data_crop
313309
original_size = original_data.shape[-3:]
314310

315-
# ---------------------------------------------- 5. Do inference --------------------------------------------
316311
result = infer_request.infer({input_tensor_name: data_crop})
317-
# ---------------------------- 6. Processing of the received inference results ------------------------------
318312
result = next(iter(result.values()))
319313
batch, channels, out_d, out_h, out_w = result.shape
320314

@@ -375,7 +369,6 @@ def main():
375369
total_latency = (perf_counter() - start_time) * 1e3
376370
log.info("Metrics report:")
377371
log.info("\tLatency: {:.1f} ms".format(total_latency))
378-
# --------------------------------------------- 7. Save output -----------------------------------------------
379372
tiff_output_name = os.path.join(args.path_to_output, 'output.tiff')
380373
Image.new('RGB', (original_data.shape[3], original_data.shape[2])).save(tiff_output_name,
381374
append_images=list_img, save_all=True)

0 commit comments

Comments
 (0)