-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathpi-object.py
More file actions
103 lines (81 loc) · 2.69 KB
/
pi-object.py
File metadata and controls
103 lines (81 loc) · 2.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from imutils.video import VideoStream
from imutils.video import FPS
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
import argparse
import imutils
import time
import cv2
def classify_frame(net, inputQueue, outputQueue):
while True:
if not inputQueue.empty():
frame = inputQueue.get()
frame = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(frame, 0.007843,
(300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
outputQueue.put(detections)
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
detections = None
print("[INFO] starting process...")
p = Process(target=classify_frame, args=(net, inputQueue,
outputQueue,))
p.daemon = True
p.start()
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
fps = FPS().start()
while True:
frame = vs.read()
frame = imutils.resize(frame, width=400)
(fH, fW) = frame.shape[:2]
if inputQueue.empty():
inputQueue.put(frame)
if not outputQueue.empty():
detections = outputQueue.get()
if detections is not None:
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < args["confidence"]:
continue
idx = int(detections[0, 0, i, 1])
dims = np.array([fW, fH, fW, fH])
box = detections[0, 0, i, 3:7] * dims
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
fps.update()
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
vs.stop()