Skip to content

Commit a001a2a

Browse files
Merge pull request #1043 from luxonis/develop
DepthAI SDK 1.11.0
2 parents 251296f + 492501e commit a001a2a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+1128
-342
lines changed

.github/workflows/python-publish.yml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,3 +43,11 @@ jobs:
4343
user: __token__
4444
password: ${{ secrets.PYPI_API_TOKEN }}
4545
packages_dir: depthai_sdk/dist/
46+
47+
- name: Repository Dispatch
48+
uses: peter-evans/repository-dispatch@v2
49+
with:
50+
token: ${{ secrets.GITHUB_TOKEN }}
51+
event-type: robothub-oak-release
52+
repository: luxonis/robothub-images
53+
client-payload: '{"ref": "${{ github.ref }}"}'

.gitmodules

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
[submodule "depthai_sdk/src/depthai_sdk/integrations/depthai_pipeline_graph"]
2-
path = depthai_sdk/src/depthai_sdk/integrations/depthai_pipeline_graph
3-
url = https://github.com/luxonis/depthai_pipeline_graph
41
[submodule "resources/depthai_boards"]
52
path = resources/depthai_boards
63
url = https://github.com/luxonis/depthai-boards

depthai_sdk/docs/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
author = 'Luxonis'
2323

2424
# The full version, including alpha/beta/rc tags
25-
release = '1.10.1'
25+
release = '1.11.0'
2626

2727

2828
# -- General configuration ---------------------------------------------------
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
Automatic IR power control
2+
==========================
3+
4+
.. note:: This feature is only available on OAK devices with IR lights.
5+
6+
.. note:: This feature is experimental, please report any issues you encounter
7+
to the Luxonis team.
8+
9+
**Automatic IR power control** is a feature that allows the device to automatically
10+
adjust the IR power based on the scene. This is useful for applications where
11+
the scene is not always the same, for example when the camera is used in an
12+
outdoor environment.
13+
14+
To enable automatic IR power control, you need to use :meth:`auto_ir <depthai_sdk.StereoComponent.auto_ir>` method
15+
that accepts two parameters:
16+
17+
- ``auto_mode`` - ``True`` to enable automatic IR power control, ``False`` to disable it.
18+
- ``continuous_mode`` - ``True`` to enable continuous mode, ``False`` otherwise. Requires ``auto_mode`` to be enabled.
19+
20+
When **automatic mode** is enabled, the device will automatically adjust the IR power after the startup.
21+
The disparity map will be analyzed with different dot projector and illumination settings,
22+
and once the best settings are found, the device will use them for the rest of the session.
23+
The whole process takes around **25 seconds**.
24+
25+
If **continuous mode** is enabled, the device will continue to search for better settings.
26+
In case the scene changes and disparity map quality drops below a certain threshold,
27+
the device will automatically adjust the IR power again.
28+
29+
Usage
30+
-----
31+
32+
The following example shows how to enable automatic IR power control in continuous mode:
33+
34+
.. literalinclude:: ../../../examples/StereoComponent/stereo_auto_ir.py
35+
:language: python

depthai_sdk/docs/source/features/replaying.rst

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,11 @@ The following table lists all available recordings:
110110
- ``color.mp4``
111111
- 5.3 MB
112112
- Top-down view at an angle, source video `here <https://pixabay.com/videos/people-commerce-shop-busy-mall-6387/>`__
113+
* - ``people-tracking-above-05``
114+
- ``CAM_A.mp4``, ``CAM_A.mp4``, ``calib.json``
115+
- 12 MB (35sec)
116+
- Top-down view, left+right stereo cameras, `demo usage at replay.py <https://github.com/luxonis/depthai-experiments/tree/master/gen2-record-replay>`__
117+
113118

114119

115120
..
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from depthai_sdk import OakCamera
2+
from depthai_sdk.classes.packets import IMUPacket
3+
import rerun as rr
4+
import subprocess
5+
import depthai as dai
6+
7+
def callback(packet: IMUPacket):
8+
for d in packet.data:
9+
gyro: dai.IMUReportGyroscope = d.gyroscope
10+
accel: dai.IMUReportAccelerometer = d.acceleroMeter
11+
mag: dai.IMUReportMagneticField = d.magneticField
12+
rot: dai.IMUReportRotationVectorWAcc = d.rotationVector
13+
print(accel.x, accel.y, accel.z)
14+
rr.log_scalar('world/accel_x', accel.x, color=(255,0,0))
15+
rr.log_scalar('world/accel_y', accel.y, color=(0,255,0))
16+
rr.log_scalar('world/accel_z', accel.z, color=(0,0,255))
17+
18+
19+
with OakCamera() as oak:
20+
subprocess.Popen(["rerun", "--memory-limit", "200MB"])
21+
rr.init("Rerun ", spawn=False)
22+
rr.connect()
23+
24+
25+
imu = oak.create_imu()
26+
imu.config_imu(report_rate=10, batch_report_threshold=2)
27+
print(oak.device.getConnectedIMU())
28+
oak.callback(imu, callback=callback)
29+
oak.start(blocking=True)
30+
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import cv2
2+
from depthai_sdk import OakCamera
3+
from depthai_sdk.classes.packets import PointcloudPacket, FramePacket
4+
import rerun as rr
5+
import subprocess
6+
subprocess.Popen(["rerun", "--memory-limit", "200MB"])
7+
rr.init("Rerun ", spawn=False)
8+
rr.connect()
9+
def callback(packet: PointcloudPacket):
10+
colors = packet.color_frame.getCvFrame()[..., ::-1] # BGR to RGB
11+
rr.log_image('Color Image', colors)
12+
points = packet.points.reshape(-1, 3)
13+
rr.log_points("Pointcloud", points, colors=colors.reshape(-1, 3))
14+
15+
with OakCamera() as oak:
16+
pcl = oak.create_pointcloud()
17+
oak.callback(pcl, callback=callback)
18+
oak.start(blocking=True)

depthai_sdk/examples/StereoComponent/stereo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,5 +11,5 @@
1111
stereo.config_postprocessing(colorize=StereoColor.RGBD, colormap=cv2.COLORMAP_MAGMA)
1212
stereo.config_wls(wls_level=WLSLevel.MEDIUM) # WLS filtering, use for smoother results
1313

14-
oak.visualize(stereo.out.disparity)
14+
oak.visualize(stereo.out.depth)
1515
oak.start(blocking=True)
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
from depthai_sdk import OakCamera
2+
3+
with OakCamera() as oak:
4+
left = oak.create_camera('left')
5+
right = oak.create_camera('right')
6+
stereo = oak.create_stereo(left=left, right=right)
7+
8+
# Automatically estimate IR brightness and adjust it continuously
9+
stereo.set_auto_ir(auto_mode=True, continuous_mode=True)
10+
11+
oak.visualize([stereo.out.disparity, left])
12+
oak.start(blocking=True)
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
from depthai_sdk import OakCamera
2+
from depthai_sdk.visualize.configs import StereoColor
3+
from depthai_sdk.classes.packets import DepthPacket
4+
import math
5+
import depthai as dai
6+
import cv2
7+
8+
# User-defined constants
9+
WARNING = 1000 # 1m, orange
10+
CRITICAL = 500 # 50cm, red
11+
12+
slc_data = []
13+
14+
def cb(packet: DepthPacket):
15+
global slc_data
16+
fontType = cv2.FONT_HERSHEY_TRIPLEX
17+
18+
depthFrameColor = packet.visualizer.draw(packet.frame)
19+
20+
for depthData in slc_data:
21+
roi = depthData.config.roi
22+
roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0])
23+
24+
xmin = int(roi.topLeft().x)
25+
ymin = int(roi.topLeft().y)
26+
xmax = int(roi.bottomRight().x)
27+
ymax = int(roi.bottomRight().y)
28+
29+
coords = depthData.spatialCoordinates
30+
distance = math.sqrt(coords.x ** 2 + coords.y ** 2 + coords.z ** 2)
31+
32+
if distance == 0: # Invalid
33+
continue
34+
35+
if distance < CRITICAL:
36+
color = (0, 0, 255)
37+
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, thickness=4)
38+
cv2.putText(depthFrameColor, "{:.1f}m".format(distance/1000), (xmin + 10, ymin + 20), fontType, 0.5, color)
39+
elif distance < WARNING:
40+
color = (0, 140, 255)
41+
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, thickness=2)
42+
cv2.putText(depthFrameColor, "{:.1f}m".format(distance/1000), (xmin + 10, ymin + 20), fontType, 0.5, color)
43+
44+
cv2.imshow('0_depth', depthFrameColor)
45+
46+
with OakCamera() as oak:
47+
stereo = oak.create_stereo('720p')
48+
# We don't need high fill rate, just very accurate depth, that's why we enable some filters, and
49+
# set the confidence threshold to 50
50+
config = stereo.node.initialConfig.get()
51+
config.postProcessing.brightnessFilter.minBrightness = 0
52+
config.postProcessing.brightnessFilter.maxBrightness = 255
53+
stereo.node.initialConfig.set(config)
54+
stereo.config_postprocessing(colorize=StereoColor.RGBD, colormap=cv2.COLORMAP_BONE)
55+
stereo.config_stereo(confidence=50, lr_check=True, extended=True)
56+
57+
oak.visualize([stereo], fps=True, callback=cb)
58+
59+
oak.build()
60+
61+
slc = oak.pipeline.create(dai.node.SpatialLocationCalculator)
62+
for x in range(15):
63+
for y in range(9):
64+
config = dai.SpatialLocationCalculatorConfigData()
65+
config.depthThresholds.lowerThreshold = 200
66+
config.depthThresholds.upperThreshold = 10000
67+
config.roi = dai.Rect(dai.Point2f((x+0.5)*0.0625, (y+0.5)*0.1), dai.Point2f((x+1.5)*0.0625, (y+1.5)*0.1))
68+
# TODO: change from median to 10th percentile once supported
69+
config.calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN
70+
slc.initialConfig.addROI(config)
71+
72+
stereo.depth.link(slc.inputDepth)
73+
74+
slc_out = oak.pipeline.create(dai.node.XLinkOut)
75+
slc_out.setStreamName('slc')
76+
slc.out.link(slc_out.input)
77+
78+
oak.start() # Start the pipeline (upload it to the OAK)
79+
80+
q = oak.device.getOutputQueue('slc') # Create output queue after calling start()
81+
while oak.running():
82+
if q.has():
83+
slc_data = q.get().getSpatialLocations()
84+
oak.poll()

0 commit comments

Comments
 (0)