|
| 1 | +import cv2 |
| 2 | +import numpy as np |
| 3 | +import matplotlib.pyplot as plt |
| 4 | + |
| 5 | +# Load YOLO model |
| 6 | +# The yolo model can be downloaded directly from the official website of YOLO algorithm |
| 7 | +yolo = cv2.dnn.readNet("C:\\Users\\billa\\OneDrive\\Desktop\\Programs\\ML_DL\\yolov3.weights", |
| 8 | + "C:\\Users\\billa\\OneDrive\\Desktop\\Programs\\ML_DL\\yolov3.cfg") |
| 9 | + |
| 10 | +# Load class names |
| 11 | +classes = [] |
| 12 | +with open("C:\\Users\\billa\\OneDrive\\Desktop\\Programs\\ML_DL\\coco (1).names", 'r') as f: |
| 13 | + classes = f.read().splitlines() |
| 14 | + |
| 15 | +# Load image |
| 16 | +img = cv2.imread("C:\\Users\\billa\\OneDrive\\Desktop\\Programs\\ML_DL\\ggg.jpg") |
| 17 | +if img is None: |
| 18 | + print("Error loading image.") |
| 19 | +height, width = img.shape[:2] # Get image height and width |
| 20 | + |
| 21 | +# Prepare the image for YOLO |
| 22 | +blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), (0, 0, 0), swapRB=True, crop=False) |
| 23 | +yolo.setInput(blob) |
| 24 | + |
| 25 | +# Get output layer names and run forward pass |
| 26 | +output_layers_names = yolo.getUnconnectedOutLayersNames() |
| 27 | +layer_output = yolo.forward(output_layers_names) |
| 28 | + |
| 29 | +# Initialize lists |
| 30 | +boxes = [] |
| 31 | +confidences = [] |
| 32 | +class_ids = [] |
| 33 | + |
| 34 | +# Process each detection |
| 35 | +for output in layer_output: |
| 36 | + for detection in output: |
| 37 | + scores = detection[5:] |
| 38 | + class_id = np.argmax(scores) |
| 39 | + confidence = scores[class_id] |
| 40 | + if confidence > 0.7: # Increased confidence threshold |
| 41 | + center_x = int(detection[0] * width) |
| 42 | + center_y = int(detection[1] * height) |
| 43 | + w = int(detection[2] * width) |
| 44 | + h = int(detection[3] * height) |
| 45 | + |
| 46 | + # Calculate top-left corner coordinates |
| 47 | + x = int(center_x - w / 2) |
| 48 | + y = int(center_y - h / 2) |
| 49 | + |
| 50 | + # Append detection information |
| 51 | + boxes.append([x, y, w, h]) |
| 52 | + confidences.append(float(confidence)) |
| 53 | + class_ids.append(class_id) |
| 54 | + |
| 55 | +# Perform Non-Maximum Suppression |
| 56 | +indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4) |
| 57 | + |
| 58 | +# Font for displaying labels |
| 59 | +font = cv2.FONT_HERSHEY_PLAIN |
| 60 | +# Random colors for each box |
| 61 | +colors = np.random.randint(0, 255, size=(len(boxes), 3), dtype='uint8') |
| 62 | + |
| 63 | +# Check if any boxes are returned |
| 64 | +if len(indexes) > 0: |
| 65 | + indexes = indexes.flatten() # Flatten the list of indexes |
| 66 | + |
| 67 | + # Draw bounding boxes and labels |
| 68 | + for i in indexes: |
| 69 | + x, y, w, h = boxes[i] |
| 70 | + label = str(classes[class_ids[i]]) |
| 71 | + color = [int(c) for c in colors[i]] |
| 72 | + cv2.rectangle(img, (x, y), (x + w, y + h), color, 2) |
| 73 | + cv2.putText(img, label, (x, y - 10), font, 2, (255, 255, 255), 2) |
| 74 | + |
| 75 | +# Display the image with matplotlib |
| 76 | +plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) |
| 77 | +plt.axis('off') # Hide axis |
| 78 | +plt.show() |
0 commit comments