Skip to content

Commit 62e37ff

Browse files
committed
blaze_hailo: Update implemenation with common hailo_inference for faster inference.
1 parent 9b13ee1 commit 62e37ff

File tree

6 files changed

+226
-796
lines changed

6 files changed

+226
-796
lines changed

blaze_detect_live.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,12 @@
2727
# Vitis-AI 3.5
2828
# xir
2929
# vitis_ai_library
30+
# Hailo
31+
# hailo_platform
3032
# plots
3133
# pyplotly
3234
# kaleido
35+
#
3336

3437

3538
import numpy as np
@@ -105,6 +108,8 @@ def detect_dpu_architecture():
105108
dpu_arch = "B?"
106109

107110
try:
111+
from blaze_hailo.hailo_inference import HailoInference
112+
hailo_infer = HailoInference()
108113
from blaze_hailo.blazedetector import BlazeDetector as BlazeDetector_hailo
109114
from blaze_hailo.blazelandmark import BlazeLandmark as BlazeLandmark_hailo
110115
print("[INFO] blaze_hailo supported ...")
@@ -175,9 +180,6 @@ def get_video_dev_by_name(src):
175180
{ "blaze": "hand", "pipeline": "tfl_hand_v0_10_full" , "model1": "blaze_tflite/models/palm_detection_full.tflite", "model2": "blaze_tflite/models/hand_landmark_full.tflite" },
176181
{ "blaze": "hand", "pipeline": "pyt_hand_v0_07" , "model1": "blaze_pytorch/models/blazepalm.pth", "model2": "blaze_pytorch/models/blazehand_landmark.pth" },
177182
{ "blaze": "hand", "pipeline": "vai_hand_v0_07" , "model1": "blaze_vitisai/models/blazepalm/"+dpu_arch+"/blazepalm.xmodel","model2": "blaze_vitisai/models/blazehandlandmark/"+dpu_arch+"/blazehandlandmark.xmodel" },
178-
{ "blaze": "hand", "pipeline": "hybrid_palm_v0_10_lite" , "model1": "blaze_hailo/models/palm_detection_lite.hef", "model2": "blaze_tflite/models/hand_landmark_lite.tflite" },
179-
{ "blaze": "hand", "pipeline": "hybrid_hand_v0_10_lite" , "model1": "blaze_tflite/models/palm_detection_lite.tflite", "model2": "blaze_hailo/models/hand_landmark_lite.hef" },
180-
{ "blaze": "hand", "pipeline": "hybrid_hand_v0_10_full" , "model1": "blaze_tflite/models/palm_detection_full.tflite", "model2": "blaze_hailo/models/hand_landmark_full.hef" },
181183
{ "blaze": "hand", "pipeline": "hai_hand_v0_10_lite" , "model1": "blaze_hailo/models/palm_detection_lite.hef", "model2": "blaze_hailo/models/hand_landmark_lite.hef" },
182184
{ "blaze": "hand", "pipeline": "hai_hand_v0_10_full" , "model1": "blaze_hailo/models/palm_detection_full.hef", "model2": "blaze_hailo/models/hand_landmark_full.hef" },
183185
{ "blaze": "face", "pipeline": "tfl_face_v0_07_front" , "model1": "blaze_tflite/models/face_detection_front_v0_07.tflite", "model2": "blaze_tflite/models/face_landmark_v0_07.tflite" },
@@ -274,7 +276,7 @@ def get_video_dev_by_name(src):
274276
elif target1=="blaze_vitisai":
275277
blaze_detector = BlazeDetector_vitisai(detector_type)
276278
elif target1=="blaze_hailo":
277-
blaze_detector = BlazeDetector_hailo(detector_type)
279+
blaze_detector = BlazeDetector_hailo(detector_type,hailo_infer)
278280
else:
279281
print("[ERROR] Invalid target : ",target1,". MUST be a valid blaze_* directory.")
280282
blaze_detector.set_debug(debug=args.debug)
@@ -288,7 +290,7 @@ def get_video_dev_by_name(src):
288290
elif target2=="blaze_vitisai":
289291
blaze_landmark = BlazeLandmark_vitisai(landmark_type)
290292
elif target2=="blaze_hailo":
291-
blaze_landmark = BlazeLandmark_hailo(landmark_type)
293+
blaze_landmark = BlazeLandmark_hailo(landmark_type,hailo_infer)
292294
else:
293295
print("[ERROR] Invalid target : ",target1,". MUST be a valid blaze_* directory.")
294296
blaze_landmark.set_debug(debug=args.debug)

blaze_hailo/blaze_detect_live.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@
4747
import plotly.graph_objects as go
4848

4949
sys.path.append(os.path.abspath('../blaze_common/'))
50+
from hailo_inference import HailoInference
51+
hailo_infer = HailoInference()
5052
from blazedetector import BlazeDetector
5153
from blazelandmark import BlazeLandmark
5254

@@ -161,12 +163,12 @@ def get_video_dev_by_name(src):
161163
if args.model2 == None:
162164
args.model2 = default_landmark_model
163165

164-
blaze_detector = BlazeDetector(blaze_detector_type)
166+
blaze_detector = BlazeDetector(blaze_detector_type,hailo_infer)
165167
blaze_detector.set_debug(debug=args.debug)
166168
blaze_detector.display_scores(debug=False)
167169
blaze_detector.load_model(args.model1)
168170

169-
blaze_landmark = BlazeLandmark(blaze_landmark_type)
171+
blaze_landmark = BlazeLandmark(blaze_landmark_type,hailo_infer)
170172
blaze_landmark.set_debug(debug=args.debug)
171173
blaze_landmark.load_model(args.model2)
172174

blaze_hailo/blazedetector.py

Lines changed: 19 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -9,64 +9,20 @@
99
InferVStreams, InputVStreamParams, InputVStreams, OutputVStreamParams, OutputVStreams,
1010
Device, VDevice)
1111

12-
13-
# Reference : https://github.com/hailo-ai/Hailo-Application-Code-Examples/blob/main/runtime/python/model_scheduler_inference/hailo_inference_scheduler.py
14-
15-
import os
16-
import psutil
17-
18-
# ----------------------------------------------------------- #
19-
# --------------- Hailo Scheduler service functions ---------- #
20-
21-
def check_if_service_enabled(process_name):
22-
'''
23-
Check if there is any running process that contains the given name processName.
24-
'''
25-
#Iterate over the all the running process
26-
for proc in psutil.process_iter():
27-
try:
28-
if process_name.lower() in proc.name().lower():
29-
print('HailoRT Scheduler service is enabled!')
30-
return
31-
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
32-
pass
33-
34-
print('HailoRT Scheduler service is disabled. Enabling service...')
35-
os.system('sudo systemctl disable hailort.service --now && sudo systemctl daemon-reload && sudo systemctl enable hailort.service --now')
36-
37-
38-
def create_vdevice_params():
39-
params = VDevice.create_params()
40-
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
41-
if False: #if args.use_multi_process:
42-
params.group_id = "SHARED"
43-
return params
44-
45-
12+
4613
from timeit import default_timer as timer
4714

4815

4916
class BlazeDetector(BlazeDetectorBase):
50-
def __init__(self,blaze_app="blazepalm"):
17+
18+
#def __init__(self,blaze_app="blazepalm"):
19+
def __init__(self,blaze_app,hailo_infer):
5120
super(BlazeDetector, self).__init__()
5221

5322
self.blaze_app = blaze_app
54-
self.batch_size = 1
55-
56-
#check_if_service_enabled('hailort_service')
23+
self.hailo_infer = hailo_infer
5724

58-
self.params = VDevice.create_params()
59-
60-
# Setting VDevice params to disable the HailoRT service feature
61-
self.params.scheduling_algorithm = HailoSchedulingAlgorithm.NONE
62-
63-
#self.params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
64-
#self.params.group_id = "SHARED"
65-
#[HailoRT] [error] multi_process_service requires service compilation with HAILO_BUILD_SERVICE
66-
#Traceback (most recent call last):
67-
# File "/usr/lib/python3.9/site-packages/hailo_platform/pyhailort/pyhailort.py", line 2626, in _open_vdevice
68-
# self._vdevice = _pyhailort.VDevice.create(self._params, device_ids)
69-
#hailo_platform.pyhailort._pyhailort.HailoRTStatusException: 6
25+
self.batch_size = 1
7026

7127

7228

@@ -76,44 +32,17 @@ def load_model(self, model_path):
7632
print("[BlazeDetector.load_model] Model File : ",model_path)
7733
#[BlazeDetector.load_model] Model File : blaze_hailo/models/palm_detection_lite.hef
7834

79-
# The target can be used as a context manager ("with" statement) to ensure it's released on time.
80-
# Here it's avoided for the sake of simplicity
81-
#self.target = VDevice(params=self.params)
82-
self.devices = Device.scan()
35+
self.hef_id = self.hailo_infer.load_model(model_path)
8336
if self.DEBUG:
84-
print("[BlazeDetector.load_model] Hailo Devices : ",self.devices)
85-
#[BlazeDetector.load_model] Hailo Devices : ['0000:01:00.0']
86-
87-
# Loading compiled HEFs to device:
88-
self.hef = HEF(model_path)
89-
90-
# The target is used as a context manager ("with" statement) to ensure it's released on time.
91-
with VDevice(device_ids=self.devices) as target:
92-
if self.DEBUG:
93-
print("[BlazeDetector.load_model] Hailo target : ",target)
94-
#[BlazeDetector.load_model] Hailo target : <hailo_platform.pyhailort.pyhailort.VDevice object at 0xffff95c24700>
95-
96-
# Get the "network groups" (connectivity groups, aka. "different networks") information from the .hef
97-
self.configure_params = ConfigureParams.create_from_hef(hef=self.hef, interface=HailoStreamInterface.PCIe)
98-
if self.DEBUG:
99-
print("[BlazeDetector.load_model] Hailo configure_params : ",self.configure_params)
100-
#[BlazeDetector.load_model] Hailo configure_params : {'palm_detection_lite': <hailo_platform.pyhailort._pyhailort.ConfigureParams object at 0xffff962d1f70>}
101-
self.network_groups = target.configure(self.hef, self.configure_params)
102-
if self.DEBUG:
103-
print("[BlazeDetector.load_model] Hailo network_groups : ",self.network_groups)
104-
#[BlazeDetector.load_model] Hailo network_groups : [<hailo_platform.pyhailort.pyhailort.ConfiguredNetwork object at 0xffff95c62eb0>]
105-
106-
self.network_group = self.network_groups[0]
107-
self.network_group_params = self.network_group.create_params()
108-
109-
# Create input and output virtual streams params
110-
# Quantized argument signifies whether or not the incoming data is already quantized.
111-
# Data is quantized by HailoRT if and only if quantized == False .
112-
#self.input_vstreams_params = InputVStreamParams.make(self.network_group, quantized=False, format_type=FormatType.FLOAT32)
113-
#self.output_vstreams_params = OutputVStreamParams.make(self.network_group, quantized=True, format_type=FormatType.UINT8)
114-
self.input_vstreams_params = InputVStreamParams.make(self.network_group)
115-
self.output_vstreams_params = OutputVStreamParams.make(self.network_group, format_type=FormatType.FLOAT32)
116-
37+
print("[BlazeDetector.load_model] HEF Id : ",self.hef_id)
38+
39+
self.hef = self.hailo_infer.hef_list[self.hef_id]
40+
self.network_group = self.hailo_infer.network_group_list[self.hef_id]
41+
self.network_group_params = self.hailo_infer.network_group_params_list[self.hef_id]
42+
self.input_vstreams_params = self.hailo_infer.input_vstreams_params_list[self.hef_id]
43+
self.output_vstreams_params = self.hailo_infer.output_vstreams_params_list[self.hef_id]
44+
45+
if True:
11746
# Define dataset params
11847
self.input_vstream_infos = self.hef.get_input_vstream_infos()
11948
self.output_vstream_infos = self.hef.get_output_vstream_infos()
@@ -262,26 +191,9 @@ def predict_on_batch(self, x):
262191
# 2. Run the neural network:
263192
start = timer()
264193
""" Execute model on Hailo-8 """
265-
# The target is used as a context manager ("with" statement) to ensure it's released on time.
266-
with VDevice(device_ids=self.devices) as target:
267-
268-
# Get the "network groups" (connectivity groups, aka. "different networks") information from the .hef
269-
self.configure_params = ConfigureParams.create_from_hef(hef=self.hef, interface=HailoStreamInterface.PCIe)
270-
self.network_groups = target.configure(self.hef, self.configure_params)
271-
self.network_group = self.network_groups[0]
272-
self.network_group_params = self.network_group.create_params()
273-
274-
# Create input and output virtual streams params
275-
# Quantized argument signifies whether or not the incoming data is already quantized.
276-
# Data is quantized by HailoRT if and only if quantized == False .
277-
#self.input_vstreams_params = InputVStreamParams.make(self.network_group, quantized=False, format_type=FormatType.FLOAT32)
278-
#self.output_vstreams_params = OutputVStreamParams.make(self.network_group, quantized=True, format_type=FormatType.UINT8)
279-
self.input_vstreams_params = InputVStreamParams.make(self.network_group)
280-
self.output_vstreams_params = OutputVStreamParams.make(self.network_group, format_type=FormatType.FLOAT32)
281-
282-
with InferVStreams(self.network_group, self.input_vstreams_params, self.output_vstreams_params) as infer_pipeline:
283-
with self.network_group.activate(self.network_group_params):
284-
infer_results = infer_pipeline.infer(input_data)
194+
with InferVStreams(self.network_group, self.input_vstreams_params, self.output_vstreams_params) as infer_pipeline:
195+
with self.network_group.activate(self.network_group_params):
196+
infer_results = infer_pipeline.infer(input_data)
285197
self.profile_model = timer()-start
286198

287199
#if self.DEBUG:

0 commit comments

Comments
 (0)