|
15 | 15 |
|
16 | 16 | tasks = set() |
17 | 17 |
|
| 18 | +# You can download a face landmark model file from https://developers.google.com/mediapipe/solutions/vision/face_landmarker#models |
| 19 | +model_file = "face_landmarker.task" |
| 20 | +model_path = os.path.dirname(os.path.realpath(__file__)) + "/" + model_file |
| 21 | + |
| 22 | +BaseOptions = mp.tasks.BaseOptions |
| 23 | +FaceLandmarker = mp.tasks.vision.FaceLandmarker |
| 24 | +FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions |
| 25 | +VisionRunningMode = mp.tasks.vision.RunningMode |
| 26 | + |
| 27 | +options = FaceLandmarkerOptions( |
| 28 | + base_options=BaseOptions(model_asset_path=model_path), |
| 29 | + running_mode=VisionRunningMode.VIDEO, |
| 30 | +) |
| 31 | + |
18 | 32 |
|
19 | 33 | async def main(room: rtc.Room) -> None: |
20 | 34 | video_stream = None |
@@ -97,11 +111,19 @@ async def frame_loop(video_stream: rtc.VideoStream) -> None: |
97 | 111 | arr = np.frombuffer(buffer.data, dtype=np.uint8) |
98 | 112 | arr = arr.reshape((buffer.height, buffer.width, 3)) |
99 | 113 |
|
| 114 | + mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=arr) |
| 115 | + detection_result = landmarker.detect_for_video( |
| 116 | + mp_image, frame_event.timestamp_us |
| 117 | + ) |
| 118 | + |
| 119 | + draw_landmarks_on_image(arr, detection_result) |
| 120 | + |
100 | 121 | arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) |
101 | 122 | cv2.imshow("livekit_video", arr) |
102 | 123 | if cv2.waitKey(1) & 0xFF == ord("q"): |
103 | 124 | break |
104 | 125 |
|
| 126 | + landmarker.close() |
105 | 127 | cv2.destroyAllWindows() |
106 | 128 |
|
107 | 129 |
|
|
0 commit comments