Skip to content

Commit de5117e

Browse files
committed
Synch
1 parent 6930810 commit de5117e

24 files changed

+127
-113
lines changed

docs/source/samples/rgb_rotate_warp.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,12 @@ Source code
3131
:language: python
3232
:linenos:
3333

34+
.. tab:: C++
35+
36+
Also `available on GitHub <https://github.com/luxonis/depthai-core/blob/main/examples/src/rgb_rotate_warp.cpp>`__
37+
38+
.. literalinclude:: ../../../depthai-core/examples/src/rgb_rotate_warp.cpp
39+
:language: cpp
40+
:linenos:
41+
3442
.. include:: /includes/footer-short.rst

examples/depth_preview.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646

4747
# Output queue will be used to get the disparity frames from the outputs defined above
4848
q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False)
49+
4950
while True:
5051
inDepth = q.get() # blocking call, will wait until a new data has arrived
5152
frame = inDepth.getFrame()

examples/encoding_max_limit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
# Connect to device and start pipeline
4545
with dai.Device(pipeline) as dev:
4646

47-
# Prepare data queues
47+
# Output queues will be used to get the encoded data from the output defined above
4848
outQ1 = dev.getOutputQueue('ve1Out', maxSize=30, blocking=True)
4949
outQ2 = dev.getOutputQueue('ve2Out', maxSize=30, blocking=True)
5050
outQ3 = dev.getOutputQueue('ve3Out', maxSize=30, blocking=True)

examples/mono_camera_control.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
def clamp(num, v0, v1):
2323
return max(v0, min(num, v1))
2424

25+
sendCamConfig = False
26+
2527
# Create pipeline
2628
pipeline = dai.Pipeline()
2729

@@ -73,11 +75,6 @@ def clamp(num, v0, v1):
7375
configQueue = device.getInputQueue(configIn.getStreamName())
7476
controlQueue = device.getInputQueue(controlIn.getStreamName())
7577

76-
def displayFrame(name, frame):
77-
cv2.imshow(name, frame)
78-
79-
sendCamConfig = False
80-
8178
# Defaults and limits for manual focus/exposure controls
8279
expTime = 20000
8380
expMin = 1
@@ -90,8 +87,8 @@ def displayFrame(name, frame):
9087
while True:
9188
inRight = qRight.get()
9289
inLeft = qLeft.get()
93-
displayFrame("right", inRight.getCvFrame())
94-
displayFrame("left", inLeft.getCvFrame())
90+
cv2.imshow("right", inRight.getCvFrame())
91+
cv2.imshow("left", inLeft.getCvFrame())
9592

9693
# Update screen (1ms pooling rate)
9794
key = cv2.waitKey(1)
@@ -134,6 +131,7 @@ def displayFrame(name, frame):
134131
bottomRight.x += stepSize
135132
sendCamConfig = True
136133

134+
# Send new config to camera
137135
if sendCamConfig:
138136
cfg = dai.ImageManipConfig()
139137
cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y)

examples/mono_depth_mobilenetssd.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
import depthai as dai
77
import numpy as np
88

9-
flipRectified = True
10-
119
# Get argument first
1210
nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute())
1311
if len(sys.argv) > 1:
@@ -21,6 +19,8 @@
2119
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
2220
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
2321

22+
flipRectified = True
23+
2424
# Create pipeline
2525
pipeline = dai.Pipeline()
2626

@@ -77,6 +77,7 @@
7777
qDet = device.getOutputQueue("nn", maxSize=4, blocking=False)
7878

7979
rightFrame = None
80+
disparityFrame = None
8081
detections = []
8182

8283
# nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
@@ -90,8 +91,8 @@ def show(name, frame):
9091
color = (255, 0, 0)
9192
for detection in detections:
9293
bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
93-
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
94-
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
94+
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
95+
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
9596
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
9697
# Show the frame
9798
cv2.imshow(name, frame)

examples/mono_full_resolution_saver.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,15 +27,14 @@
2727
# Output queue will be used to get the grayscale frames from the output defined above
2828
qRight = device.getOutputQueue(name="right", maxSize=4, blocking=False)
2929

30-
# Make sure the destination path is present before starting to store the examples
3130
dirName = "mono_data"
3231
Path(dirName).mkdir(parents=True, exist_ok=True)
3332

3433
while True:
3534
inRight = qRight.get() # Blocking call, will wait until a new data has arrived
3635
# Data is originally represented as a flat 1D array, it needs to be converted into HxW form
3736
# Frame is transformed and ready to be shown
38-
cv2.imshow("right", inRight.getFrame())
37+
cv2.imshow("right", inRight.getCvFrame())
3938

4039
# After showing the frame, it's being stored inside a target directory as a PNG image
4140
cv2.imwrite(f"{dirName}/{int(time.time() * 1000)}.png", inRight.getFrame())

examples/mono_mobilenet.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@
2424

2525
# Define sources and outputs
2626
monoRight = pipeline.createMonoCamera()
27-
nn = pipeline.createMobileNetDetectionNetwork()
2827
manip = pipeline.createImageManip()
28+
nn = pipeline.createMobileNetDetectionNetwork()
2929
manipOut = pipeline.createXLinkOut()
3030
nnOut = pipeline.createXLinkOut()
3131

@@ -41,7 +41,6 @@
4141
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
4242
manip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)
4343

44-
# Define a neural network that will make predictions based on the source frames
4544
nn.setConfidenceThreshold(0.5)
4645
nn.setBlobPath(nnPath)
4746
nn.setNumInferenceThreads(2)
@@ -70,11 +69,12 @@ def frameNorm(frame, bbox):
7069
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
7170

7271
def displayFrame(name, frame):
72+
color = (255, 0, 0)
7373
for detection in detections:
7474
bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
75-
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
76-
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
77-
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
75+
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
76+
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
77+
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
7878
# Show the frame
7979
cv2.imshow(name, frame)
8080

examples/object_tracker.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,9 @@
101101
except:
102102
label = t.label
103103

104-
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
105-
cv2.putText(frame, f"ID: {[t.id]}", (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
106-
cv2.putText(frame, t.status.name, (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
104+
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
105+
cv2.putText(frame, f"ID: {[t.id]}", (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
106+
cv2.putText(frame, t.status.name, (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
107107
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
108108

109109
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)

examples/object_tracker_video.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -158,9 +158,9 @@ def displayFrame(name, frame):
158158
except:
159159
label = t.label
160160

161-
cv2.putText(trackerFrame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
162-
cv2.putText(trackerFrame, f"ID: {[t.id]}", (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
163-
cv2.putText(trackerFrame, t.status.name, (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
161+
cv2.putText(trackerFrame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
162+
cv2.putText(trackerFrame, f"ID: {[t.id]}", (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
163+
cv2.putText(trackerFrame, t.status.name, (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
164164
cv2.rectangle(trackerFrame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
165165

166166
cv2.putText(trackerFrame, "Fps: {:.2f}".format(fps), (2, trackerFrame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)

0 commit comments

Comments
 (0)