Skip to content

Commit af8e15b

Browse files
committed
Merge branch 'release_2.20.0' into main
2 parents 2025ce1 + 8e5b547 commit af8e15b

26 files changed

+787
-15
lines changed

.github/workflows/main.yml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,3 +538,15 @@ jobs:
538538
repository: luxonis/robothub-apps
539539
event-type: depthai-python-release
540540
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'
541+
542+
notify_hil_workflow_linux_x86_64:
543+
needs: [build-linux-x86_64]
544+
runs-on: ubuntu-latest
545+
steps:
546+
- name: Repository Dispatch
547+
uses: peter-evans/repository-dispatch@v2
548+
with:
549+
token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }}
550+
repository: luxonis/depthai-core-hil-tests
551+
event-type: python-hil-event
552+
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ wheelhouse/
3838
.venv
3939
env/
4040
venv/
41+
venv_*/
4142
ENV/
4243
env.bak/
4344
venv.bak/

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ pybind11_add_module(${TARGET_NAME}
107107
src/pipeline/node/XLinkInBindings.cpp
108108
src/pipeline/node/XLinkOutBindings.cpp
109109
src/pipeline/node/ColorCameraBindings.cpp
110+
src/pipeline/node/CameraBindings.cpp
110111
src/pipeline/node/MonoCameraBindings.cpp
111112
src/pipeline/node/StereoDepthBindings.cpp
112113
src/pipeline/node/NeuralNetworkBindings.cpp

depthai-core

Submodule depthai-core updated 39 files

examples/Camera/camera_isp.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import time
6+
7+
# Connect to device and start pipeline
8+
with dai.Device() as device:
9+
# Device name
10+
print('Device name:', device.getDeviceName())
11+
# Bootloader version
12+
if device.getBootloaderVersion() is not None:
13+
print('Bootloader version:', device.getBootloaderVersion())
14+
# Print out usb speed
15+
print('Usb speed:', device.getUsbSpeed().name)
16+
# Connected cameras
17+
print('Connected cameras:', device.getConnectedCameraFeatures())
18+
19+
# Create pipeline
20+
pipeline = dai.Pipeline()
21+
cams = device.getConnectedCameraFeatures()
22+
streams = []
23+
for cam in cams:
24+
print(str(cam), str(cam.socket), cam.socket)
25+
c = pipeline.create(dai.node.Camera)
26+
x = pipeline.create(dai.node.XLinkOut)
27+
c.isp.link(x.input)
28+
c.setBoardSocket(cam.socket)
29+
stream = str(cam.socket)
30+
if cam.name:
31+
stream = f'{cam.name} ({stream})'
32+
x.setStreamName(stream)
33+
streams.append(stream)
34+
35+
# Start pipeline
36+
device.startPipeline(pipeline)
37+
fpsCounter = {}
38+
lastFpsCount = {}
39+
tfps = time.time()
40+
while not device.isClosed():
41+
queueNames = device.getQueueEvents(streams)
42+
for stream in queueNames:
43+
messages = device.getOutputQueue(stream).tryGetAll()
44+
fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages)
45+
for message in messages:
46+
# Display arrived frames
47+
if type(message) == dai.ImgFrame:
48+
# render fps
49+
fps = lastFpsCount.get(stream, 0)
50+
frame = message.getCvFrame()
51+
cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255))
52+
cv2.imshow(stream, frame)
53+
54+
if time.time() - tfps >= 1.0:
55+
scale = time.time() - tfps
56+
for stream in fpsCounter.keys():
57+
lastFpsCount[stream] = fpsCounter[stream] / scale
58+
fpsCounter = {}
59+
tfps = time.time()
60+
61+
if cv2.waitKey(1) == ord('q'):
62+
break

examples/Camera/camera_preview.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import time
6+
7+
# Connect to device and start pipeline
8+
with dai.Device(dai.OpenVINO.DEFAULT_VERSION, dai.UsbSpeed.SUPER_PLUS) as device:
9+
# Device name
10+
print('Device name:', device.getDeviceName())
11+
# Bootloader version
12+
if device.getBootloaderVersion() is not None:
13+
print('Bootloader version:', device.getBootloaderVersion())
14+
# Print out usb speed
15+
print('Usb speed:', device.getUsbSpeed().name)
16+
# Connected cameras
17+
print('Connected cameras:', device.getConnectedCameraFeatures())
18+
19+
# Create pipeline
20+
pipeline = dai.Pipeline()
21+
cams = device.getConnectedCameraFeatures()
22+
streams = []
23+
for cam in cams:
24+
print(str(cam), str(cam.socket), cam.socket)
25+
c = pipeline.create(dai.node.Camera)
26+
x = pipeline.create(dai.node.XLinkOut)
27+
c.preview.link(x.input)
28+
c.setBoardSocket(cam.socket)
29+
stream = str(cam.socket)
30+
if cam.name:
31+
stream = f'{cam.name} ({stream})'
32+
x.setStreamName(stream)
33+
streams.append(stream)
34+
35+
# Start pipeline
36+
device.startPipeline(pipeline)
37+
fpsCounter = {}
38+
lastFpsCount = {}
39+
tfps = time.time()
40+
while not device.isClosed():
41+
queueNames = device.getQueueEvents(streams)
42+
for stream in queueNames:
43+
messages = device.getOutputQueue(stream).tryGetAll()
44+
fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages)
45+
for message in messages:
46+
# Display arrived frames
47+
if type(message) == dai.ImgFrame:
48+
# render fps
49+
fps = lastFpsCount.get(stream, 0)
50+
frame = message.getCvFrame()
51+
cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255))
52+
cv2.imshow(stream, frame)
53+
54+
if time.time() - tfps >= 1.0:
55+
scale = time.time() - tfps
56+
for stream in fpsCounter.keys():
57+
lastFpsCount[stream] = fpsCounter[stream] / scale
58+
fpsCounter = {}
59+
tfps = time.time()
60+
61+
if cv2.waitKey(1) == ord('q'):
62+
break

examples/ColorCamera/rgb_preview.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
# Connect to device and start pipeline
2424
with dai.Device(pipeline) as device:
2525

26-
print('Connected cameras:', device.getConnectedCameras())
26+
print('Connected cameras:', device.getConnectedCameraFeatures())
2727
# Print out usb speed
2828
print('Usb speed:', device.getUsbSpeed().name)
2929
# Bootloader version

examples/ImageManip/image_manip_warp_mesh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3
1313

1414
# Warp preview frame 1
15-
manip1 = pipeline.create(dai.node.Warp)
15+
manip1 = pipeline.create(dai.node.ImageManip)
1616
# Create a custom warp mesh
1717
tl = dai.Point2f(20, 20)
1818
tr = dai.Point2f(460, 20)
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import numpy as np
6+
7+
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
8+
extended_disparity = False
9+
# Better accuracy for longer distance, fractional disparity 32-levels:
10+
subpixel = False
11+
# Better handling for occlusions:
12+
lr_check = True
13+
14+
# Create pipeline
15+
pipeline = dai.Pipeline()
16+
17+
# Define sources and outputs
18+
monoLeft = pipeline.create(dai.node.MonoCamera)
19+
monoRight = pipeline.create(dai.node.MonoCamera)
20+
depth = pipeline.create(dai.node.StereoDepth)
21+
xout = pipeline.create(dai.node.XLinkOut)
22+
23+
xout.setStreamName("disparity")
24+
25+
# Properties
26+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
27+
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
28+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
29+
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
30+
31+
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
32+
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
33+
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
34+
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
35+
depth.setLeftRightCheck(lr_check)
36+
depth.setExtendedDisparity(extended_disparity)
37+
depth.setSubpixel(subpixel)
38+
39+
# Create a colormap
40+
colormap = pipeline.create(dai.node.ImageManip)
41+
colormap.initialConfig.setColormap(dai.Colormap.STEREO_TURBO, depth.initialConfig.getMaxDisparity())
42+
colormap.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
43+
44+
# Linking
45+
monoLeft.out.link(depth.left)
46+
monoRight.out.link(depth.right)
47+
depth.disparity.link(colormap.inputImage)
48+
colormap.out.link(xout.input)
49+
50+
# Connect to device and start pipeline
51+
with dai.Device(pipeline) as device:
52+
53+
# Output queue will be used to get the disparity frames from the outputs defined above
54+
q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False)
55+
56+
while True:
57+
inDisparity = q.get() # blocking call, will wait until a new data has arrived
58+
frame = inDisparity.getCvFrame()
59+
cv2.imshow("disparity", frame)
60+
61+
if cv2.waitKey(1) == ord('q'):
62+
break
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import numpy as np
6+
7+
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
8+
extended_disparity = True
9+
# Better accuracy for longer distance, fractional disparity 32-levels:
10+
subpixel = True
11+
# Better handling for occlusions:
12+
lr_check = True
13+
14+
# Create pipeline
15+
pipeline = dai.Pipeline()
16+
17+
# Define sources and outputs
18+
left = pipeline.create(dai.node.ColorCamera)
19+
right = pipeline.create(dai.node.ColorCamera)
20+
depth = pipeline.create(dai.node.StereoDepth)
21+
xout = pipeline.create(dai.node.XLinkOut)
22+
xoutl = pipeline.create(dai.node.XLinkOut)
23+
xoutr = pipeline.create(dai.node.XLinkOut)
24+
25+
xout.setStreamName("disparity")
26+
xoutl.setStreamName("rectifiedLeft")
27+
xoutr.setStreamName("rectifiedRight")
28+
29+
# Properties
30+
left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
31+
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
32+
right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
33+
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
34+
right.setIspScale(2, 3)
35+
left.setIspScale(2, 3)
36+
37+
38+
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
39+
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
40+
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
41+
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
42+
depth.setInputResolution(1280, 800)
43+
depth.setLeftRightCheck(lr_check)
44+
depth.setExtendedDisparity(extended_disparity)
45+
depth.setSubpixel(subpixel)
46+
depth.setInputResolution(1280, 800)
47+
48+
# Linking
49+
left.isp.link(depth.left)
50+
right.isp.link(depth.right)
51+
depth.disparity.link(xout.input)
52+
depth.rectifiedLeft.link(xoutl.input)
53+
depth.rectifiedRight.link(xoutr.input)
54+
55+
# Connect to device and start pipeline
56+
with dai.Device(pipeline) as device:
57+
while not device.isClosed():
58+
queueNames = device.getQueueEvents()
59+
for q in queueNames:
60+
message = device.getOutputQueue(q).get()
61+
# Display arrived frames
62+
if type(message) == dai.ImgFrame:
63+
frame = message.getCvFrame()
64+
if 'disparity' in q:
65+
maxDisp = depth.initialConfig.getMaxDisparity()
66+
disp = (frame * (255.0 / maxDisp)).astype(np.uint8)
67+
disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET)
68+
cv2.imshow(q, disp)
69+
else:
70+
cv2.imshow(q, frame)
71+
if cv2.waitKey(1) == ord('q'):
72+
break

0 commit comments

Comments
 (0)