Skip to content

Commit 51eec7a

Browse files
[OVEP] Update python sample
1 parent 0e54cee commit 51eec7a

File tree

8 files changed

+57
-48
lines changed

8 files changed

+57
-48
lines changed

python/OpenVINO_EP/tiny_yolo_v2_object_detection/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ python3 tiny_yolov2_obj_detection_sample.py --h
4545
```
4646
## Running the ONNXRuntime OpenVINO™ Execution Provider sample
4747
```bash
48-
python3 tiny_yolov2_obj_detection_sample.py --video face-demographics-walking-and-pause.mp4 --model tinyyolov2.onnx --device CPU_FP32
48+
python3 tiny_yolov2_obj_detection_sample.py --video face-demographics-walking-and-pause.mp4 --model tinyyolov2.onnx --device CPU
4949
```
5050

5151
## To stop the sample from running

python/OpenVINO_EP/tiny_yolo_v2_object_detection/notebooks/OVEP_tiny_yolov2_obj_detection_sample.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -372,12 +372,12 @@
372372
" \n",
373373
"1. Create a ONNX Runtime Session Option instance using `onnxruntime.SessionOptions()`\n",
374374
"2. Using the session options instance create a Inference Session object by passing the model and the execution provider as arguments.\n",
375-
"Execution Providers are the hardware device options e.g. CPU, Myriad, GPU, etc. on which the session will be executed.\n",
375+
"Execution Providers are the hardware device options e.g. CPU, GPU and NPU on which the session will be executed.\n",
376376
"\n",
377377
"The below `create_sess` function actually takes care of the above steps. All we need to do is pass the device arguement to it. It'll return the appropriate session according to the selected device along with the input name for the model.\n",
378378
"\n",
379379
"The device option should be chosen from any one of the below options: \n",
380-
"- `cpu, CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VADM_FP16`"
380+
"- `cpu, CPU, GPU, NPU`"
381381
]
382382
},
383383
{
@@ -396,16 +396,16 @@
396396
" print(\"Device type selected is 'cpu' which is the default CPU Execution Provider (MLAS)\")\n",
397397
" #Specify the path to the ONNX model on your machine and register the CPU EP\n",
398398
" sess = rt.InferenceSession(model, so, providers=['CPUExecutionProvider'])\n",
399-
" elif (device == 'CPU_FP32' or device == 'GPU_FP32' or device == 'GPU_FP16' or device == 'MYRIAD_FP16' or device == 'VADM_FP16'):\n",
399+
" elif (device == 'CPU' or device == 'GPU' or device == 'NPU'):\n",
400400
" #Specify the path to the ONNX model on your machine and register the OpenVINO EP\n",
401401
" sess = rt.InferenceSession(model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : device}])\n",
402402
" print(\"Device type selected is: \" + device + \" using the OpenVINO Execution Provider\")\n",
403403
" '''\n",
404404
" other 'device_type' options are: (Any hardware target can be assigned if you have the access to it)\n",
405-
" 'CPU_FP32', 'GPU_FP32', 'GPU_FP16', 'MYRIAD_FP16', 'VAD-M_FP16'\n",
405+
" 'CPU', 'GPU', 'NPU'\n",
406406
" '''\n",
407407
" else:\n",
408-
" raise Exception(\"Device type selected is not [cpu, CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VADM_FP16]\")\n",
408+
" raise Exception(\"Device type selected is not [cpu, CPU, GPU, NPU]\")\n",
409409
"\n",
410410
" # Get the input name of the model\n",
411411
" input_name = sess.get_inputs()[0].name\n",
@@ -504,7 +504,7 @@
504504
"\n",
505505
"Now the `tinyyolov2-8.onnx` model will run inference on `cat.jpg` image using the below two execution providers:\n",
506506
"- `cpu`: default CPU Execution Provider (MLAS) \n",
507-
"- `CPU_FP32`: Execution on CPU with OpenVino Execution Provider\n",
507+
"- `CPU`: Execution on CPU with OpenVino Execution Provider\n",
508508
"\n",
509509
"The below code block performs the following operations:\n",
510510
"\n",
@@ -567,7 +567,7 @@
567567
},
568568
"source": [
569569
"### Run the inference with OpenVINO Execution Provider\n",
570-
"The below code block performs the same opertions as [before](#cpu_exec) with `CPU_FP32` as device, that runs on OpenVINO Execution Provider for ONNX Runtime."
570+
"The below code block performs the same opertions as [before](#cpu_exec) with `CPU` as device, that runs on OpenVINO Execution Provider for ONNX Runtime."
571571
]
572572
},
573573
{
@@ -580,7 +580,7 @@
580580
"outputs": [],
581581
"source": [
582582
"### create a session with CPU_FP32 using the OpenVINO Execution Provider\n",
583-
"sess, input_name = create_sess(\"CPU_FP32\")\n",
583+
"sess, input_name = create_sess(\"CPU\")\n",
584584
"\n",
585585
"#capturing one frame at a time from the video feed and performing the inference\n",
586586
"frame = cap.copy()\n",
@@ -607,7 +607,7 @@
607607
"postprocess_output(out, frame, x_scale, y_scale)\n",
608608
"\n",
609609
"#Show the output\n",
610-
"show_bbox(\"CPU_FP32\", frame, inference_time)\n",
610+
"show_bbox(\"CPU\", frame, inference_time)\n",
611611
"\n",
612612
"#Write the frame with the detection boxes\n",
613613
"cv2.imwrite(output_file, frame.astype(np.uint8))\n",
Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
certifi==2024.7.4
2-
flatbuffers==2.0
1+
certifi==2025.8.3
2+
flatbuffers==25.2.10
33
onnx
4-
opencv-python==4.8.1.78
5-
Pillow==10.2.0
6-
protobuf==3.20.2
7-
scipy==1.11.4
8-
typing-extensions==4.2.0
4+
opencv-python==4.12.0.88
5+
Pillow==11.3.0
6+
protobuf==6.32.1
7+
scipy==1.16.2
8+
typing-extensions==4.15.0

python/OpenVINO_EP/tiny_yolo_v2_object_detection/tiny_yolov2_obj_detection_sample.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
def parse_arguments():
3131
parser = argparse.ArgumentParser(description='Object Detection using YOLOv2 in OPENCV using OpenVINO Execution Provider for ONNXRuntime')
32-
parser.add_argument('--device', default='CPU_FP32', help="Device to perform inference on 'cpu (MLAS)' or on devices supported by OpenVINO-EP [CPU_FP32, CPU_FP32, GPU_FP32, GPU_FP16].")
32+
parser.add_argument('--device', default='CPU', help="Device to perform inference on 'cpu (MLAS)' or on devices supported by OpenVINO-EP [CPU, GPU, NPU].")
3333
parser.add_argument('--video', help='Path to video file.')
3434
parser.add_argument('--model', help='Path to model.')
3535
args = parser.parse_args()
@@ -150,16 +150,16 @@ def main():
150150
print("Device type selected is 'cpu' which is the default CPU Execution Provider (MLAS)")
151151
#Specify the path to the ONNX model on your machine and register the CPU EP
152152
sess = rt.InferenceSession(args.model, so, providers=['CPUExecutionProvider'])
153-
elif (args.device == 'CPU_FP32', args.device == 'CPU_FP16' or args.device == 'GPU_FP32' or args.device == 'GPU_FP16'):
153+
elif (args.device == 'CPU', args.device == 'GPU' or args.device == 'NPU'):
154154
#Specify the path to the ONNX model on your machine and register the OpenVINO EP
155155
sess = rt.InferenceSession(args.model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : args.device}])
156156
print("Device type selected is: " + args.device + " using the OpenVINO Execution Provider")
157157
'''
158158
other 'device_type' options are: (Any hardware target can be assigned if you have the access to it)
159-
'CPU_FP32', 'CPU_FP16', 'GPU_FP32', 'GPU_FP16'
159+
'CPU', 'GPU', 'NPU'
160160
'''
161161
else:
162-
raise Exception("Device type selected is not [cpu, CPU_FP32, GPU_FP32, GPU_FP16]")
162+
raise Exception("Device type selected is not [CPU, GPU, NPU]")
163163

164164
# Get the input name of the model
165165
input_name = sess.get_inputs()[0].name

python/OpenVINO_EP/yolov4_object_detection/README.md

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,10 @@ python3 yolov4.py --h
5454

5555
### Run the sample on OpenVINO™ Execution Provider
5656
```bash
57-
python3 yolov4.py --device CPU_FP32 --video classroom.mp4 --model yolov4.onnx
57+
python3 yolov4.py --device CPU --video classroom.mp4 --model yolov4.onnx
5858
```
5959
Note:
60-
* You can pick different device options to run on OpenVINO™ Execution Provider like GPU_FP32, GPU_FP16 and MYRIAD_FP16.
60+
* You can pick different device options to run on OpenVINO™ Execution Provider like GPU and NPU.
6161

6262
### Run the sample on default CPU Execution Provider (MLAS)
6363
```bash
@@ -66,17 +66,17 @@ python3 yolov4.py --device cpu --video classroom.mp4 --model yolov4.onnx
6666

6767
### Run the sample with video as Input
6868
```bash
69-
python3 yolov4.py --device CPU_FP32 --video classroom.mp4 --model yolov4.onnx
69+
python3 yolov4.py --device CPU --video classroom.mp4 --model yolov4.onnx
7070
```
7171

7272
### Run the sample with Image as Input
7373
```bash
74-
python3 yolov4.py --device CPU_FP32 --image cat.jpg --model yolov4.onnx
74+
python3 yolov4.py --device CPU --image cat.jpg --model yolov4.onnx
7575
```
7676

7777
### Run the sample with Live Input stream Like webcam
7878
```bash
79-
python3 yolov4.py --device CPU_FP32 --model yolov4.onnx
79+
python3 yolov4.py --device CPU --model yolov4.onnx
8080
```
8181

8282
## To stop the sample from running
@@ -88,9 +88,7 @@ Just press the letter 'q' or Ctrl+C if on Windows
8888
8989
[Download OpenVINO™ Execution Provider Latest pip wheels from here](https://pypi.org/project/onnxruntime-openvino/)
9090
91-
[OpenVINO™ Execution Provider](https://www.intel.com/content/www/us/en/artificial-intelligence/posts/faster-inferencing-with-one-line-of-code.html)
92-
93-
[Docker Containers](https://www.intel.com/content/www/us/en/artificial-intelligence/posts/openvino-execution-provider-docker-container.html)
91+
[OpenVINO™ Execution Provider](https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html)
9492
9593
[Python Pip Wheel Packages](https://www.intel.com/content/www/us/en/artificial-intelligence/posts/openvino-execution-provider-for-onnx-runtime.html)
9694

python/OpenVINO_EP/yolov4_object_detection/notebooks/OVEP_yolov4_obj_detection_sample.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -648,12 +648,12 @@
648648
" \n",
649649
"1. Create a ONNX Runtime Session Option instance using `onnxruntime.SessionOptions()`\n",
650650
"2. Using the session options instance create a Inference Session object by passing the model and the execution provider as arguments.\n",
651-
"Execution Providers are the hardware device options e.g. CPU, Myriad, GPU, etc. on which the session will be executed.\n",
651+
"Execution Providers are the hardware device options e.g. CPU, GPU, NPU on which the session will be executed.\n",
652652
"\n",
653653
"The below `create_sess` function actually takes care of the above steps. All we need to do is pass the device arguement to it. It'll return the appropriate session according to the selected device along with the input name for the model.\n",
654654
"\n",
655655
"The device option should be chosen from any one of the below options: \n",
656-
"- `cpu, CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VADM_FP16`"
656+
"- `cpu, CPU, GPU, NPU`"
657657
]
658658
},
659659
{
@@ -672,16 +672,16 @@
672672
" print(\"Device type selected is 'cpu' which is the default CPU Execution Provider (MLAS)\")\n",
673673
" #Specify the path to the ONNX model on your machine and register the CPU EP\n",
674674
" sess = rt.InferenceSession(model, so, providers=['CPUExecutionProvider'])\n",
675-
" elif (device == 'CPU_FP32' or device == 'GPU_FP32' or device == 'GPU_FP16' or device == 'MYRIAD_FP16' or device == 'VADM_FP16'):\n",
675+
" elif (device == 'CPU' or device == 'GPU' or device == 'NPU'):\n",
676676
" #Specify the path to the ONNX model on your machine and register the OpenVINO EP\n",
677677
" sess = rt.InferenceSession(model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : device}])\n",
678678
" print(\"Device type selected is: \" + device + \" using the OpenVINO Execution Provider\")\n",
679679
" '''\n",
680680
" other 'device_type' options are: (Any hardware target can be assigned if you have the access to it)\n",
681-
" 'CPU_FP32', 'GPU_FP32', 'GPU_FP16', 'MYRIAD_FP16', 'VAD-M_FP16'\n",
681+
" 'CPU', 'GPU', 'NPU'\n",
682682
" '''\n",
683683
" else:\n",
684-
" raise Exception(\"Device type selected is not [cpu, CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VADM_FP16]\")\n",
684+
" raise Exception(\"Device type selected is not [cpu, CPU, GPU, NPU]\")\n",
685685
"\n",
686686
" # Get the input name of the model\n",
687687
" input_name = sess.get_inputs()[0].name\n",
@@ -807,7 +807,7 @@
807807
"source": [
808808
"Now the `yolov4.onnx` model will run inference on `cat.jpg` image using the below two execution providers:\n",
809809
"- `cpu`: default CPU Execution Provider (MLAS) \n",
810-
"- `CPU_FP32`: Execution on CPU with OpenVino Execution Provider\n",
810+
"- `CPU`: Execution on CPU with OpenVino Execution Provider\n",
811811
"\n",
812812
"The below code block performs the following operations:\n",
813813
"\n",
@@ -887,7 +887,7 @@
887887
"id": "8c04daa0"
888888
},
889889
"source": [
890-
"The below code block performs the same opertions as [before](#cpu_exec) with `CPU_FP32` as device, that runs on OpenVINO Execution Provider for ONNX Runtime."
890+
"The below code block performs the same opertions as [before](#cpu_exec) with `CPU` as device, that runs on OpenVINO Execution Provider for ONNX Runtime."
891891
]
892892
},
893893
{
@@ -900,7 +900,7 @@
900900
"outputs": [],
901901
"source": [
902902
"### create a session with CPU_FP32 using the OpenVINO Execution Provider\n",
903-
"sess, input_name = create_sess(\"CPU_FP32\")\n",
903+
"sess, input_name = create_sess(\"CPU\")\n",
904904
"\n",
905905
"input_size = 416\n",
906906
"original_image = cap.copy()\n",
@@ -931,7 +931,7 @@
931931
"bboxes = nms(bboxes, 0.213, method='nms')\n",
932932
"image_out = draw_bbox(original_image, bboxes)\n",
933933
"\n",
934-
"cv2.putText(image_out,\"CPU_FP32\",(10,20),cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255),1)\n",
934+
"cv2.putText(image_out,\"CPU\",(10,20),cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255),1)\n",
935935
"\n",
936936
"image_out = cv2.cvtColor(image_out, cv2.COLOR_BGR2RGB)\n",
937937
"cv2_imshow(image_out)\n",
Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
certifi==2023.7.22
2-
flatbuffers==2.0
1+
certifi==2025.8.3
2+
flatbuffers==25.2.10
33
onnx
4-
opencv-python==4.5.5.64
5-
Pillow==10.3.0
6-
protobuf==3.20.2
7-
scipy==1.10.0
8-
typing-extensions==4.2.0
4+
opencv-python==4.12.0.88
5+
Pillow==11.3.0
6+
protobuf==6.32.1
7+
scipy==1.16.2
8+
typing-extensions==4.15.0

python/OpenVINO_EP/yolov4_object_detection/yolov4.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import sys
2020
import time
2121
import platform
22+
import json
2223

2324
if platform.system() == "Windows":
2425
import onnxruntime.tools.add_openvino_win_libs as utils
@@ -220,7 +221,7 @@ def get_anchors(anchors_path, tiny=False):
220221

221222
def parse_arguments():
222223
parser = argparse.ArgumentParser(description='Object Detection using YOLOv4 in OPENCV using OpenVINO Execution Provider for ONNXRuntime')
223-
parser.add_argument('--device', default='CPU_FP32', help="Device to perform inference on 'cpu (MLAS)' or on devices supported by OpenVINO-EP [CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VAD-M_FP16].")
224+
parser.add_argument('--device', default='CPU', help="Device to perform inference on 'cpu (MLAS)' or on devices supported by OpenVINO-EP [CPU, GPU, NPU].")
224225
parser.add_argument('--image', help='Path to image file.')
225226
parser.add_argument('--video', help='Path to video file.')
226227
parser.add_argument('--model', help='Path to model.')
@@ -283,11 +284,21 @@ def main():
283284
sess = rt.InferenceSession(args.model, so, providers=['CPUExecutionProvider'])
284285
else:
285286
#Specify the path to the ONNX model on your machine and register the OpenVINO EP
286-
sess = rt.InferenceSession(args.model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : device}])
287+
288+
config_dict = {
289+
"CPU": {
290+
"INFERENCE_NUM_THREADS": "5",
291+
"CACHE_DIR": "C:\\"
292+
}
293+
}
294+
295+
config_json = json.dumps(config_dict)
296+
297+
sess = rt.InferenceSession(args.model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : device, 'load_config' : config_json}])
287298
print("Device type selected is: " + device + " using the OpenVINO Execution Provider")
288299
'''
289300
other 'device_type' options are: (Any hardware target can be assigned if you have the access to it)
290-
'CPU_FP32', 'GPU_FP32', 'GPU_FP16', 'MYRIAD_FP16', 'VAD-M_FP16'
301+
'CPU', 'GPU', 'NPU'
291302
'''
292303

293304
input_name = sess.get_inputs()[0].name

0 commit comments

Comments
 (0)