|
24 | 24 |
|
25 | 25 | # Define sources and outputs |
26 | 26 | monoRight = pipeline.createMonoCamera() |
27 | | -nn = pipeline.createMobileNetDetectionNetwork() |
28 | 27 | manip = pipeline.createImageManip() |
| 28 | +nn = pipeline.createMobileNetDetectionNetwork() |
29 | 29 | manipOut = pipeline.createXLinkOut() |
30 | 30 | nnOut = pipeline.createXLinkOut() |
31 | 31 |
|
|
41 | 41 | # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) |
42 | 42 | manip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) |
43 | 43 |
|
44 | | -# Define a neural network that will make predictions based on the source frames |
45 | 44 | nn.setConfidenceThreshold(0.5) |
46 | 45 | nn.setBlobPath(nnPath) |
47 | 46 | nn.setNumInferenceThreads(2) |
@@ -70,11 +69,12 @@ def frameNorm(frame, bbox): |
70 | 69 | return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int) |
71 | 70 |
|
72 | 71 | def displayFrame(name, frame): |
| 72 | + color = (255, 0, 0) |
73 | 73 | for detection in detections: |
74 | 74 | bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) |
75 | | - cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) |
76 | | - cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) |
77 | | - cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2) |
| 75 | + cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color) |
| 76 | + cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color) |
| 77 | + cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2) |
78 | 78 | # Show the frame |
79 | 79 | cv2.imshow(name, frame) |
80 | 80 |
|
|
0 commit comments