|
1 | | -# Created by Coder Shiyar | https://github.com/codershiyar | https://codershiyar.com |
| 1 | +# pip install opencv-python |
2 | 2 |
|
3 | 3 | import cv2 |
4 | 4 | from ultralytics import YOLO |
5 | 5 |
|
6 | | -# Load YOLO model |
7 | | -model = YOLO('model.pt') |
8 | | - |
9 | | -# Initialize webcam |
10 | | -webcam = cv2.VideoCapture(0) |
| 6 | +model = YOLO('yolov8s.pt') |
| 7 | +print(model.names) |
| 8 | +webcamera = cv2.VideoCapture(0) |
| 9 | +# webcamera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) |
| 10 | +# webcamera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) |
11 | 11 |
|
12 | 12 | while True: |
13 | | - # Capture frame from webcam |
14 | | - ret, frame = webcam.read() |
15 | | - |
16 | | - # Check if frame is captured successfully |
17 | | - if not ret: |
18 | | - print("Failed to capture frame") |
19 | | - break |
| 13 | + success, frame = webcamera.read() |
| 14 | + |
| 15 | + results = model.track(frame, classes=0, conf=0.8, imgsz=480) |
| 16 | + cv2.putText(frame, f"Total: {len(results[0].boxes)}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA) |
| 17 | + cv2.imshow("Live Camera", results[0].plot()) |
20 | 18 |
|
21 | | - # Detect objects in the frame |
22 | | - results = model.track(frame, conf=0.5, imgsz=480, classes=0) |
23 | | - |
24 | | - # Display total number of detected objects and bounding boxes |
25 | | - for result in results: |
26 | | - cv2.putText(frame, f"Total: {len(result.boxes)}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA) |
27 | | - cv2.imshow('YOLOv8 Object Detection', result.plot()) |
28 | | - |
29 | | - # Exit loop if ESC key is pressed |
30 | | - if cv2.waitKey(1) == 27: |
| 19 | + if cv2.waitKey(1) == ord('q'): |
31 | 20 | break |
32 | 21 |
|
33 | | -# Release webcam and close OpenCV windows |
34 | | -webcam.release() |
| 22 | +webcamera.release() |
35 | 23 | cv2.destroyAllWindows() |
36 | 24 |
|
37 | | - |
38 | | - |
39 | | - |
40 | | - |
41 | | -# For Realsense camera |
42 | | - # def initialize_realsense(): |
43 | | - # import pyrealsense2 as rs |
44 | | - # pipeline = rs.pipeline() |
45 | | - # camera_aconfig = rs.config() |
46 | | - # camera_aconfig.enable_stream(rs.stream.depth, *config.DEPTH_CAMERA_RESOLUTION, rs.format.z16, config.DEPTH_CAMERA_FPS) |
47 | | - # camera_aconfig.enable_stream(rs.stream.color, *config.COLOR_CAMERA_RESOLUTION, rs.format.bgr8, config.COLOR_CAMERA_FPS) |
48 | | - # pipeline.start(camera_aconfig) |
49 | | - # return pipeline |
50 | | -# try: |
51 | | -# # Try to initialize RealSense Camera |
52 | | -# camera = initialize_realsense() |
53 | | -# get_frame = get_frame_realsense |
54 | | -# except Exception as e: |
55 | | -# print("RealSense camera not found, using default webcam.") |
56 | | -# camera = initialize_webcam() |
57 | | -# get_frame = get_frame_webcam |
58 | | - |
59 | | -# Function to get frames from RealSense |
60 | | -# def get_frame_realsense(pipeline): |
61 | | -# import pyrealsense2 as rs |
62 | | -# frames = pipeline.wait_for_frames() |
63 | | -# depth_frame = frames.get_depth_frame() |
64 | | -# color_frame = frames.get_color_frame() |
65 | | -# if not depth_frame or not color_frame: |
66 | | -# return None, None |
67 | | -# depth_image = np.asanyarray(depth_frame.get_data()) |
68 | | -# color_image = np.asanyarray(color_frame.get_data()) |
69 | | -# return depth_image, color_image |
70 | | - |
71 | | -# # Function to get frame from webcam |
72 | | -# def get_frame_webcam(cap): |
73 | | -# ret, frame = cap.read() |
74 | | -# return None, frame if ret else None |
0 commit comments