Skip to content

Commit 86816f9

Browse files
committed
Testing CUDA benchamrk
Former-commit-id: b4e7089
1 parent 4146534 commit 86816f9

File tree

1 file changed

+57
-9
lines changed

1 file changed

+57
-9
lines changed

python/sample_face_track_from_video.py

Lines changed: 57 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,36 @@
44
from inspireface.param import *
55
import numpy as np
66

7+
8+
def generate_color(id):
9+
"""
10+
Generate a bright color based on the given integer ID. Ensures 50 unique colors.
11+
12+
Args:
13+
id (int): The ID for which to generate a color.
14+
15+
Returns:
16+
tuple: A tuple representing the color in BGR format.
17+
"""
18+
max_id = 50 # Number of unique colors
19+
id = id % max_id
20+
21+
# Generate HSV color
22+
hue = int((id * 360 / max_id) % 360) # Distribute hue values equally
23+
saturation = 200 + (55 * id) % 55 # High saturation for bright colors
24+
value = 200 + (55 * id) % 55 # High value for bright colors
25+
26+
hsv_color = np.uint8([[[hue, saturation, value]]])
27+
rgb_color = cv2.cvtColor(hsv_color, cv2.COLOR_HSV2BGR)[0][0]
28+
29+
return (int(rgb_color[0]), int(rgb_color[1]), int(rgb_color[2]))
30+
731
@click.command()
832
@click.argument("resource_path")
933
@click.argument('source')
1034
@click.option('--show', is_flag=True, help='Display the video stream or video file in a window.')
11-
def case_face_tracker_from_video(resource_path, source, show):
35+
@click.option('--out', type=str, default=None, help='Path to save the processed video.')
36+
def case_face_tracker_from_video(resource_path, source, show, out):
1237
"""
1338
Launch a face tracking process from a video source. The 'source' can either be a webcam index (0, 1, ...)
1439
or a path to a video file. Use the --show option to display the video.
@@ -17,6 +42,7 @@ def case_face_tracker_from_video(resource_path, source, show):
1742
resource_path (str): Path to the resource directory for face tracking algorithms.
1843
source (str): Webcam index or path to the video file.
1944
show (bool): If set, the video will be displayed in a window.
45+
out (str): Path to save the processed video.
2046
"""
2147
# Initialize the face tracker or other resources.
2248
print(f"Initializing with resources from: {resource_path}")
@@ -26,8 +52,8 @@ def case_face_tracker_from_video(resource_path, source, show):
2652

2753
# Optional features, loaded during session creation based on the modules specified.
2854
opt = HF_ENABLE_NONE
29-
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_LIGHT_TRACK) # Use video mode
30-
55+
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_TRACK_BY_DETECTION, max_detect_num=25, detect_pixel_level=640) # Use video mode
56+
session.set_filter_minimum_face_pixel_size(0)
3157
# Determine if the source is a digital webcam index or a video file path.
3258
try:
3359
source_index = int(source) # Try to convert source to an integer.
@@ -42,6 +68,15 @@ def case_face_tracker_from_video(resource_path, source, show):
4268
print("Error: Could not open video source.")
4369
return
4470

71+
# VideoWriter to save the processed video if out is provided.
72+
if out:
73+
fourcc = cv2.VideoWriter_fourcc(*'XVID')
74+
fps = cap.get(cv2.CAP_PROP_FPS) if cap.get(cv2.CAP_PROP_FPS) > 0 else 30
75+
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
76+
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
77+
out_video = cv2.VideoWriter(out, fourcc, fps, (frame_width, frame_height))
78+
print(f"Saving video to: {out}")
79+
4580
# Main loop to process video frames.
4681
while True:
4782
ret, frame = cap.read()
@@ -62,31 +97,44 @@ def case_face_tracker_from_video(resource_path, source, show):
6297
# Calculate center, size, and angle
6398
center = ((x1 + x2) / 2, (y1 + y2) / 2)
6499
size = (x2 - x1, y2 - y1)
65-
angle = face.roll
66-
67-
# Get rotation matrix
68-
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
100+
angle = face.roll
69101

70102
# Apply rotation to the bounding box corners
71103
rect = ((center[0], center[1]), (size[0], size[1]), angle)
72104
box = cv2.boxPoints(rect)
73105
box = box.astype(int)
74106

107+
color = generate_color(face.track_id)
108+
75109
# Draw the rotated bounding box
76-
cv2.drawContours(frame, [box], 0, (100, 180, 29), 2)
110+
cv2.drawContours(frame, [box], 0, color, 4)
77111

78112
# Draw landmarks
79113
lmk = session.get_face_dense_landmark(face)
80114
for x, y in lmk.astype(int):
81-
cv2.circle(frame, (x, y), 0, (220, 100, 0), 2)
115+
cv2.circle(frame, (x, y), 0, color, 4)
116+
117+
# Draw track ID at the top of the bounding box
118+
text = f"ID: {face.track_id}"
119+
text_size, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
120+
text_x = min(box[:, 0])
121+
text_y = min(box[:, 1]) - 10
122+
if text_y < 0:
123+
text_y = min(box[:, 1]) + text_size[1] + 10
124+
cv2.putText(frame, text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
82125

83126
if show:
84127
cv2.imshow("Face Tracker", frame)
85128
if cv2.waitKey(1) & 0xFF == ord('q'):
86129
break # Exit loop if 'q' is pressed.
87130

131+
if out:
132+
out_video.write(frame)
133+
88134
# Cleanup: release video capture and close any open windows.
89135
cap.release()
136+
if out:
137+
out_video.release()
90138
cv2.destroyAllWindows()
91139
print("Released all resources and closed windows.")
92140

0 commit comments

Comments
 (0)