Skip to content

Commit 8946ad3

Browse files
committed
demo 12: flipped outputs so it has correct orientation, fixed normalization of disparity and changed depth->disparity
1 parent 9e2a453 commit 8946ad3

File tree

1 file changed

+25
-15
lines changed

1 file changed

+25
-15
lines changed

examples/12_rgb_encoding_mono_mobilenet_depth.py

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
import depthai as dai
77
import numpy as np
88

9+
flipRectified = True
10+
911
# Get argument first
1012
nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute())
1113
if len(sys.argv) > 1:
@@ -40,13 +42,16 @@
4042
depth.setConfidenceThreshold(255)
4143
# Note: the rectified streams are horizontally mirrored by default
4244
depth.setOutputRectified(True)
45+
depth.setRectifyMirrorFrame(False)
4346
depth.setRectifyEdgeFillColor(0) # Black, to better see the cutout
4447
camLeft.out.link(depth.left)
4548
camRight.out.link(depth.right)
49+
# Disparity range is 0..95, used for normalization
50+
disparity_multiplier = 255 / 95
4651

47-
depthOut = pipeline.createXLinkOut()
48-
depthOut.setStreamName("depth")
49-
depth.disparity.link(depthOut.input)
52+
disparityOut = pipeline.createXLinkOut()
53+
disparityOut.setStreamName("disparity")
54+
depth.disparity.link(disparityOut.input)
5055

5156
nn = pipeline.createMobileNetDetectionNetwork()
5257
nn.setConfidenceThreshold(0.5)
@@ -85,14 +90,14 @@
8590

8691
queueSize = 8
8792
qRight = device.getOutputQueue("right", queueSize)
88-
qDepth = device.getOutputQueue("depth", queueSize)
93+
qDisparity = device.getOutputQueue("disparity", queueSize)
8994
qManip = device.getOutputQueue("manip", queueSize)
9095
qDet = device.getOutputQueue("nn", queueSize)
9196
qRgbEnc = device.getOutputQueue('h265', maxSize=30, blocking=True)
9297

9398
frame = None
9499
frameManip = None
95-
frameDepth = None
100+
frameDisparity = None
96101
detections = []
97102
offsetX = (camRight.getResolutionWidth() - camRight.getResolutionHeight()) // 2
98103
croppedFrame = np.zeros((camRight.getResolutionHeight(), camRight.getResolutionHeight()))
@@ -111,21 +116,26 @@ def frameNorm(frame, bbox):
111116
inRight = qRight.tryGet()
112117
inManip = qManip.tryGet()
113118
inDet = qDet.tryGet()
114-
inDepth = qDepth.tryGet()
119+
inDisparity = qDisparity.tryGet()
115120

116121
while qRgbEnc.has():
117122
qRgbEnc.get().getData().tofile(videoFile)
118123

119124
if inRight is not None:
120125
frame = cv2.flip(inRight.getCvFrame(), 1)
126+
if flipRectified:
127+
frame = cv2.flip(frame, 1)
121128

122129
if inManip is not None:
123130
frameManip = inManip.getCvFrame()
124131

125-
if inDepth is not None:
126-
frameDepth = cv2.flip(inDepth.getFrame(), 1)
127-
frameDepth = cv2.normalize(frameDepth, None, 0, 255, cv2.NORM_MINMAX)
128-
frameDepth = cv2.applyColorMap(frameDepth, cv2.COLORMAP_JET)
132+
if inDisparity is not None:
133+
# Flip disparity frame, normalize it and apply color map for better visualization
134+
frameDisparity = inDisparity.getFrame()
135+
if flipRectified:
136+
frameDisparity = cv2.flip(frameDisparity, 1)
137+
frameDisparity = (frameDisparity*disparity_multiplier).astype(np.uint8)
138+
frameDisparity = cv2.applyColorMap(frameDisparity, cv2.COLORMAP_JET)
129139

130140
if inDet is not None:
131141
detections = inDet.detections
@@ -139,14 +149,14 @@ def frameNorm(frame, bbox):
139149
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
140150
cv2.imshow("right", frame)
141151

142-
if frameDepth is not None:
152+
if frameDisparity is not None:
143153
for detection in detections:
144154
bbox = frameNorm(croppedFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
145155
bbox[::2] += offsetX
146-
cv2.rectangle(frameDepth, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
147-
cv2.putText(frameDepth, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
148-
cv2.putText(frameDepth, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
149-
cv2.imshow("depth", frameDepth)
156+
cv2.rectangle(frameDisparity, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
157+
cv2.putText(frameDisparity, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
158+
cv2.putText(frameDisparity, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
159+
cv2.imshow("disparity", frameDisparity)
150160

151161
if frameManip is not None:
152162
for detection in detections:

0 commit comments

Comments
 (0)