Skip to content

Commit d5f9473

Browse files
committed
Merge commit 'f90eed9a3af09a0c43c36e0383c095ef4786fe4e' into develop
2 parents 7ff599b + f90eed9 commit d5f9473

17 files changed

+631
-10
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ wheelhouse/
3838
.venv
3939
env/
4040
venv/
41+
venv_*/
4142
ENV/
4243
env.bak/
4344
venv.bak/

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ pybind11_add_module(${TARGET_NAME}
107107
src/pipeline/node/XLinkInBindings.cpp
108108
src/pipeline/node/XLinkOutBindings.cpp
109109
src/pipeline/node/ColorCameraBindings.cpp
110+
src/pipeline/node/CameraBindings.cpp
110111
src/pipeline/node/MonoCameraBindings.cpp
111112
src/pipeline/node/StereoDepthBindings.cpp
112113
src/pipeline/node/NeuralNetworkBindings.cpp

examples/Camera/camera_isp.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import time
6+
7+
# Connect to device and start pipeline
8+
with dai.Device() as device:
9+
# Device name
10+
print('Device name:', device.getDeviceName())
11+
# Bootloader version
12+
if device.getBootloaderVersion() is not None:
13+
print('Bootloader version:', device.getBootloaderVersion())
14+
# Print out usb speed
15+
print('Usb speed:', device.getUsbSpeed().name)
16+
# Connected cameras
17+
print('Connected cameras:', device.getConnectedCameraFeatures())
18+
19+
# Create pipeline
20+
pipeline = dai.Pipeline()
21+
cams = device.getConnectedCameraFeatures()
22+
streams = []
23+
for cam in cams:
24+
print(str(cam), str(cam.socket), cam.socket)
25+
c = pipeline.create(dai.node.Camera)
26+
x = pipeline.create(dai.node.XLinkOut)
27+
c.isp.link(x.input)
28+
c.setBoardSocket(cam.socket)
29+
stream = str(cam.socket)
30+
if cam.name:
31+
stream = f'{cam.name} ({stream})'
32+
x.setStreamName(stream)
33+
streams.append(stream)
34+
35+
# Start pipeline
36+
device.startPipeline(pipeline)
37+
fpsCounter = {}
38+
lastFpsCount = {}
39+
tfps = time.time()
40+
while not device.isClosed():
41+
queueNames = device.getQueueEvents(streams)
42+
for stream in queueNames:
43+
messages = device.getOutputQueue(stream).tryGetAll()
44+
fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages)
45+
for message in messages:
46+
# Display arrived frames
47+
if type(message) == dai.ImgFrame:
48+
# render fps
49+
fps = lastFpsCount.get(stream, 0)
50+
frame = message.getCvFrame()
51+
cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255))
52+
cv2.imshow(stream, frame)
53+
54+
if time.time() - tfps >= 1.0:
55+
scale = time.time() - tfps
56+
for stream in fpsCounter.keys():
57+
lastFpsCount[stream] = fpsCounter[stream] / scale
58+
fpsCounter = {}
59+
tfps = time.time()
60+
61+
if cv2.waitKey(1) == ord('q'):
62+
break

examples/Camera/camera_preview.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import time
6+
7+
# Connect to device and start pipeline
8+
with dai.Device(dai.OpenVINO.DEFAULT_VERSION, dai.UsbSpeed.SUPER_PLUS) as device:
9+
# Device name
10+
print('Device name:', device.getDeviceName())
11+
# Bootloader version
12+
if device.getBootloaderVersion() is not None:
13+
print('Bootloader version:', device.getBootloaderVersion())
14+
# Print out usb speed
15+
print('Usb speed:', device.getUsbSpeed().name)
16+
# Connected cameras
17+
print('Connected cameras:', device.getConnectedCameraFeatures())
18+
19+
# Create pipeline
20+
pipeline = dai.Pipeline()
21+
cams = device.getConnectedCameraFeatures()
22+
streams = []
23+
for cam in cams:
24+
print(str(cam), str(cam.socket), cam.socket)
25+
c = pipeline.create(dai.node.Camera)
26+
x = pipeline.create(dai.node.XLinkOut)
27+
c.preview.link(x.input)
28+
c.setBoardSocket(cam.socket)
29+
stream = str(cam.socket)
30+
if cam.name:
31+
stream = f'{cam.name} ({stream})'
32+
x.setStreamName(stream)
33+
streams.append(stream)
34+
35+
# Start pipeline
36+
device.startPipeline(pipeline)
37+
fpsCounter = {}
38+
lastFpsCount = {}
39+
tfps = time.time()
40+
while not device.isClosed():
41+
queueNames = device.getQueueEvents(streams)
42+
for stream in queueNames:
43+
messages = device.getOutputQueue(stream).tryGetAll()
44+
fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages)
45+
for message in messages:
46+
# Display arrived frames
47+
if type(message) == dai.ImgFrame:
48+
# render fps
49+
fps = lastFpsCount.get(stream, 0)
50+
frame = message.getCvFrame()
51+
cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255))
52+
cv2.imshow(stream, frame)
53+
54+
if time.time() - tfps >= 1.0:
55+
scale = time.time() - tfps
56+
for stream in fpsCounter.keys():
57+
lastFpsCount[stream] = fpsCounter[stream] / scale
58+
fpsCounter = {}
59+
tfps = time.time()
60+
61+
if cv2.waitKey(1) == ord('q'):
62+
break

examples/ImageManip/image_manip_warp_mesh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3
1313

1414
# Warp preview frame 1
15-
manip1 = pipeline.create(dai.node.Warp)
15+
manip1 = pipeline.create(dai.node.ImageManip)
1616
# Create a custom warp mesh
1717
tl = dai.Point2f(20, 20)
1818
tr = dai.Point2f(460, 20)
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import numpy as np
6+
7+
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
8+
extended_disparity = False
9+
# Better accuracy for longer distance, fractional disparity 32-levels:
10+
subpixel = False
11+
# Better handling for occlusions:
12+
lr_check = True
13+
14+
# Create pipeline
15+
pipeline = dai.Pipeline()
16+
17+
# Define sources and outputs
18+
monoLeft = pipeline.create(dai.node.MonoCamera)
19+
monoRight = pipeline.create(dai.node.MonoCamera)
20+
depth = pipeline.create(dai.node.StereoDepth)
21+
xout = pipeline.create(dai.node.XLinkOut)
22+
23+
xout.setStreamName("disparity")
24+
25+
# Properties
26+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
27+
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
28+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
29+
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
30+
31+
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
32+
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
33+
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
34+
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
35+
depth.setLeftRightCheck(lr_check)
36+
depth.setExtendedDisparity(extended_disparity)
37+
depth.setSubpixel(subpixel)
38+
39+
# Create a colormap
40+
colormap = pipeline.create(dai.node.ImageManip)
41+
colormap.initialConfig.setColormap(dai.Colormap.STEREO_TURBO, depth.initialConfig.getMaxDisparity())
42+
colormap.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
43+
44+
# Linking
45+
monoLeft.out.link(depth.left)
46+
monoRight.out.link(depth.right)
47+
depth.disparity.link(colormap.inputImage)
48+
colormap.out.link(xout.input)
49+
50+
# Connect to device and start pipeline
51+
with dai.Device(pipeline) as device:
52+
53+
# Output queue will be used to get the disparity frames from the outputs defined above
54+
q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False)
55+
56+
while True:
57+
inDisparity = q.get() # blocking call, will wait until a new data has arrived
58+
frame = inDisparity.getCvFrame()
59+
cv2.imshow("disparity", frame)
60+
61+
if cv2.waitKey(1) == ord('q'):
62+
break
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
3+
import depthai as dai
4+
5+
# Create pipeline
6+
pipeline = dai.Pipeline()
7+
8+
# Create left/right mono cameras for Stereo depth
9+
monoLeft = pipeline.create(dai.node.MonoCamera)
10+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
11+
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
12+
13+
monoRight = pipeline.create(dai.node.MonoCamera)
14+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
15+
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
16+
17+
# Create a node that will produce the depth map
18+
depth = pipeline.create(dai.node.StereoDepth)
19+
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
20+
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
21+
depth.setLeftRightCheck(False)
22+
depth.setExtendedDisparity(False)
23+
# Subpixel disparity is of UINT16 format, which is unsupported by VideoEncoder
24+
depth.setSubpixel(False)
25+
monoLeft.out.link(depth.left)
26+
monoRight.out.link(depth.right)
27+
28+
# Colormap
29+
colormap = pipeline.create(dai.node.ImageManip)
30+
colormap.initialConfig.setColormap(dai.Colormap.TURBO, depth.initialConfig.getMaxDisparity())
31+
colormap.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
32+
33+
videoEnc = pipeline.create(dai.node.VideoEncoder)
34+
# Depth resolution/FPS will be the same as mono resolution/FPS
35+
videoEnc.setDefaultProfilePreset(monoLeft.getFps(), dai.VideoEncoderProperties.Profile.H264_HIGH)
36+
37+
# Link
38+
depth.disparity.link(colormap.inputImage)
39+
colormap.out.link(videoEnc.input)
40+
41+
xout = pipeline.create(dai.node.XLinkOut)
42+
xout.setStreamName("enc")
43+
videoEnc.bitstream.link(xout.input)
44+
45+
# Connect to device and start pipeline
46+
with dai.Device(pipeline) as device:
47+
48+
# Output queue will be used to get the encoded data from the output defined above
49+
q = device.getOutputQueue(name="enc")
50+
51+
# The .h265 file is a raw stream file (not playable yet)
52+
with open('disparity.h264', 'wb') as videoFile:
53+
print("Press Ctrl+C to stop encoding...")
54+
try:
55+
while True:
56+
videoFile.write(q.get().getData())
57+
except KeyboardInterrupt:
58+
# Keyboard interrupt (Ctrl + C) detected
59+
pass
60+
61+
print("To view the encoded data, convert the stream file (.mjpeg) into a video file (.mp4) using a command below:")
62+
print("ffmpeg -framerate 30 -i disparity.mjpeg -c copy video.mp4")

src/DeviceBindings.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){
555555
.def("flashFactoryEepromClear", [](DeviceBase& d) { py::gil_scoped_release release; d.flashFactoryEepromClear(); }, DOC(dai, DeviceBase, flashFactoryEepromClear))
556556
.def("setTimesync", [](DeviceBase& d, std::chrono::milliseconds p, int s, bool r) { py::gil_scoped_release release; return d.setTimesync(p,s,r); }, DOC(dai, DeviceBase, setTimesync))
557557
.def("setTimesync", [](DeviceBase& d, bool e) { py::gil_scoped_release release; return d.setTimesync(e); }, py::arg("enable"), DOC(dai, DeviceBase, setTimesync, 2))
558-
.def("getDeviceName", [](DeviceBase& d) { py::gil_scoped_release release; return d.getDeviceName(); }, DOC(dai, DeviceBase, getDeviceName))
558+
.def("getDeviceName", [](DeviceBase& d) { std::string name; { py::gil_scoped_release release; name = d.getDeviceName(); } return py::bytes(name).attr("decode")("utf-8", "replace"); }, DOC(dai, DeviceBase, getDeviceName))
559559
;
560560

561561

src/pipeline/CommonBindings.cpp

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "depthai-shared/common/DetectionParserOptions.hpp"
2020
#include "depthai-shared/common/RotatedRect.hpp"
2121
#include "depthai-shared/common/Rect.hpp"
22+
#include "depthai-shared/common/Colormap.hpp"
2223

2324
// depthai
2425
#include "depthai/common/CameraFeatures.hpp"
@@ -52,6 +53,7 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){
5253
py::class_<RotatedRect> rotatedRect(m, "RotatedRect", DOC(dai, RotatedRect));
5354
py::class_<Rect> rect(m, "Rect", DOC(dai, Rect));
5455
py::enum_<CameraExposureOffset> cameraExposureOffset(m, "CameraExposureOffset");
56+
py::enum_<Colormap> colormap(m, "Colormap", DOC(dai, Colormap));
5557

5658
///////////////////////////////////////////////////////////////////////
5759
///////////////////////////////////////////////////////////////////////
@@ -297,4 +299,33 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){
297299
.value("MIDDLE", CameraExposureOffset::MIDDLE)
298300
.value("END", CameraExposureOffset::END)
299301
;
302+
303+
colormap
304+
.value("NONE", Colormap::NONE)
305+
.value("JET", Colormap::JET)
306+
.value("TURBO", Colormap::TURBO)
307+
.value("STEREO_JET", Colormap::STEREO_JET)
308+
.value("STEREO_TURBO", Colormap::STEREO_TURBO)
309+
// .value("AUTUMN", Colormap::AUTUMN)
310+
// .value("BONE", Colormap::BONE)
311+
// .value("WINTER", Colormap::WINTER)
312+
// .value("RAINBOW", Colormap::RAINBOW)
313+
// .value("OCEAN", Colormap::OCEAN)
314+
// .value("SUMMER", Colormap::SUMMER)
315+
// .value("SPRING", Colormap::SPRING)
316+
// .value("COOL", Colormap::COOL)
317+
// .value("HSV", Colormap::HSV)
318+
// .value("PINK", Colormap::PINK)
319+
// .value("HOT", Colormap::HOT)
320+
// .value("PARULA", Colormap::PARULA)
321+
// .value("MAGMA", Colormap::MAGMA)
322+
// .value("INFERNO", Colormap::INFERNO)
323+
// .value("PLASMA", Colormap::PLASMA)
324+
// .value("VIRIDIS", Colormap::VIRIDIS)
325+
// .value("CIVIDIS", Colormap::CIVIDIS)
326+
// .value("TWILIGHT", Colormap::TWILIGHT)
327+
// .value("TWILIGHT_SHIFTED", Colormap::TWILIGHT_SHIFTED)
328+
// .value("DEEPGREEN", Colormap::DEEPGREEN)
329+
;
330+
300331
}

0 commit comments

Comments
 (0)