Skip to content

Commit 11a9a34

Browse files
Merge pull request #352 from luxonis/develop
Release v2.10.0.0
2 parents de1692a + 9b24e44 commit 11a9a34

File tree

9 files changed

+581
-104
lines changed

9 files changed

+581
-104
lines changed

examples/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,5 @@ add_python_example(imu_rotation_vector imu_rotation_vector.py)
127127
add_python_example(rgb_depth_aligned rgb_depth_aligned.py)
128128
add_python_example(edge_detector edge_detector.py)
129129
add_python_example(script_camera_control script_camera_control.py)
130+
add_python_example(feature_tracker feature_tracker.py)
131+
add_python_example(corner_detector corner_detector.py)

examples/corner_detector.py

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
6+
7+
# Create pipeline
8+
pipeline = dai.Pipeline()
9+
10+
# Define sources and outputs
11+
monoLeft = pipeline.create(dai.node.MonoCamera)
12+
monoRight = pipeline.create(dai.node.MonoCamera)
13+
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
14+
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)
15+
16+
xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
17+
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
18+
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
19+
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
20+
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)
21+
22+
xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
23+
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
24+
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
25+
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
26+
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
27+
28+
# Properties
29+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
30+
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
31+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
32+
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
33+
34+
# Disable optical flow
35+
featureTrackerLeft.initialConfig.setMotionEstimator(False)
36+
featureTrackerRight.initialConfig.setMotionEstimator(False)
37+
38+
# Linking
39+
monoLeft.out.link(featureTrackerLeft.inputImage)
40+
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
41+
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
42+
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)
43+
44+
monoRight.out.link(featureTrackerRight.inputImage)
45+
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
46+
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
47+
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)
48+
49+
featureTrackerConfig = featureTrackerRight.initialConfig.get()
50+
51+
print("Press 's' to switch between Harris and Shi-Thomasi corner detector!")
52+
53+
# Connect to device and start pipeline
54+
with dai.Device(pipeline) as device:
55+
56+
# Output queues used to receive the results
57+
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
58+
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
59+
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
60+
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)
61+
62+
inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")
63+
64+
leftWindowName = "left"
65+
rightWindowName = "right"
66+
67+
def drawFeatures(frame, features):
68+
pointColor = (0, 0, 255)
69+
circleRadius = 2
70+
for feature in features:
71+
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, pointColor, -1, cv2.LINE_AA, 0)
72+
73+
while True:
74+
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
75+
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
76+
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)
77+
78+
inPassthroughFrameRight = passthroughImageRightQueue.get()
79+
passthroughFrameRight = inPassthroughFrameRight.getFrame()
80+
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)
81+
82+
trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
83+
drawFeatures(leftFrame, trackedFeaturesLeft)
84+
85+
trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
86+
drawFeatures(rightFrame, trackedFeaturesRight)
87+
88+
# Show the frame
89+
cv2.imshow(leftWindowName, leftFrame)
90+
cv2.imshow(rightWindowName, rightFrame)
91+
92+
key = cv2.waitKey(1)
93+
if key == ord('q'):
94+
break
95+
elif key == ord('s'):
96+
if featureTrackerConfig.cornerDetector.type == dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS:
97+
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.SHI_THOMASI
98+
print("Switching to Shi-Thomasi")
99+
else:
100+
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS
101+
print("Switching to Harris")
102+
103+
cfg = dai.FeatureTrackerConfig()
104+
cfg.set(featureTrackerConfig)
105+
inputFeatureTrackerConfigQueue.send(cfg)

examples/feature_tracker.py

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
from collections import deque
6+
7+
class FeatureTrackerDrawer:
8+
9+
lineColor = (200, 0, 200)
10+
pointColor = (0, 0, 255)
11+
circleRadius = 2
12+
maxTrackedFeaturesPathLength = 30
13+
# for how many frames the feature is tracked
14+
trackedFeaturesPathLength = 10
15+
16+
trackedIDs = None
17+
trackedFeaturesPath = None
18+
19+
def onTrackBar(self, val):
20+
FeatureTrackerDrawer.trackedFeaturesPathLength = val
21+
pass
22+
23+
def trackFeaturePath(self, features):
24+
25+
newTrackedIDs = set()
26+
for currentFeature in features:
27+
currentID = currentFeature.id
28+
newTrackedIDs.add(currentID)
29+
30+
if currentID not in self.trackedFeaturesPath:
31+
self.trackedFeaturesPath[currentID] = deque()
32+
33+
path = self.trackedFeaturesPath[currentID]
34+
35+
path.append(currentFeature.position)
36+
while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)):
37+
path.popleft()
38+
39+
self.trackedFeaturesPath[currentID] = path
40+
41+
featuresToRemove = set()
42+
for oldId in self.trackedIDs:
43+
if oldId not in newTrackedIDs:
44+
featuresToRemove.add(oldId)
45+
46+
for id in featuresToRemove:
47+
self.trackedFeaturesPath.pop(id)
48+
49+
self.trackedIDs = newTrackedIDs
50+
51+
def drawFeatures(self, img):
52+
53+
cv2.setTrackbarPos(self.trackbarName, self.windowName, FeatureTrackerDrawer.trackedFeaturesPathLength)
54+
55+
for featurePath in self.trackedFeaturesPath.values():
56+
path = featurePath
57+
58+
for j in range(len(path) - 1):
59+
src = (int(path[j].x), int(path[j].y))
60+
dst = (int(path[j + 1].x), int(path[j + 1].y))
61+
cv2.line(img, src, dst, self.lineColor, 1, cv2.LINE_AA, 0)
62+
j = len(path) - 1
63+
cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0)
64+
65+
def __init__(self, trackbarName, windowName):
66+
self.trackbarName = trackbarName
67+
self.windowName = windowName
68+
cv2.namedWindow(windowName)
69+
cv2.createTrackbar(trackbarName, windowName, FeatureTrackerDrawer.trackedFeaturesPathLength, FeatureTrackerDrawer.maxTrackedFeaturesPathLength, self.onTrackBar)
70+
self.trackedIDs = set()
71+
self.trackedFeaturesPath = dict()
72+
73+
74+
# Create pipeline
75+
pipeline = dai.Pipeline()
76+
77+
# Define sources and outputs
78+
monoLeft = pipeline.create(dai.node.MonoCamera)
79+
monoRight = pipeline.create(dai.node.MonoCamera)
80+
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
81+
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)
82+
83+
xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
84+
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
85+
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
86+
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
87+
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)
88+
89+
xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
90+
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
91+
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
92+
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
93+
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
94+
95+
# Properties
96+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
97+
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
98+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
99+
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
100+
101+
# Linking
102+
monoLeft.out.link(featureTrackerLeft.inputImage)
103+
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
104+
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
105+
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)
106+
107+
monoRight.out.link(featureTrackerRight.inputImage)
108+
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
109+
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
110+
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)
111+
112+
# By default the least mount of resources are allocated
113+
# increasing it improves performance
114+
numShaves = 2
115+
numMemorySlices = 2
116+
featureTrackerLeft.setHardwareResources(numShaves, numMemorySlices)
117+
featureTrackerRight.setHardwareResources(numShaves, numMemorySlices)
118+
119+
featureTrackerConfig = featureTrackerRight.initialConfig.get()
120+
print("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation!")
121+
122+
# Connect to device and start pipeline
123+
with dai.Device(pipeline) as device:
124+
125+
# Output queues used to receive the results
126+
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
127+
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
128+
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
129+
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)
130+
131+
inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")
132+
133+
leftWindowName = "left"
134+
leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)
135+
136+
rightWindowName = "right"
137+
rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName)
138+
139+
while True:
140+
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
141+
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
142+
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)
143+
144+
inPassthroughFrameRight = passthroughImageRightQueue.get()
145+
passthroughFrameRight = inPassthroughFrameRight.getFrame()
146+
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)
147+
148+
trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
149+
leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft)
150+
leftFeatureDrawer.drawFeatures(leftFrame)
151+
152+
trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
153+
rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight)
154+
rightFeatureDrawer.drawFeatures(rightFrame)
155+
156+
# Show the frame
157+
cv2.imshow(leftWindowName, leftFrame)
158+
cv2.imshow(rightWindowName, rightFrame)
159+
160+
key = cv2.waitKey(1)
161+
if key == ord('q'):
162+
break
163+
elif key == ord('s'):
164+
if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW:
165+
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION
166+
print("Switching to hardware accelerated motion estimation")
167+
else:
168+
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW
169+
print("Switching to Lucas-Kanade optical flow")
170+
171+
cfg = dai.FeatureTrackerConfig()
172+
cfg.set(featureTrackerConfig)
173+
inputFeatureTrackerConfigQueue.send(cfg)

examples/install_requirements.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#!/usr/bin/env python3
2+
import platform
23
import sys, os, subprocess
34
import argparse
45
import re
@@ -45,7 +46,29 @@ def hasWhitespace(string):
4546
# Check if in virtual environment
4647
in_venv = getattr(sys, "real_prefix", getattr(sys, "base_prefix", sys.prefix)) != sys.prefix
4748
pip_call = [sys.executable, "-m", "pip"]
49+
pip_installed = True
4850
pip_install = pip_call + ["install"]
51+
52+
try:
53+
subprocess.check_call(pip_call + ["--version"])
54+
except subprocess.CalledProcessError as ex:
55+
pip_installed = False
56+
57+
if not pip_installed:
58+
err_str = "Issues with \"pip\" package detected! Follow the official instructions to install - https://pip.pypa.io/en/stable/installation/"
59+
raise RuntimeError(err_str)
60+
61+
if sys.version_info[0] != 3:
62+
raise RuntimeError("Examples require Python 3 to run (detected: Python {})".format(sys.version_info[0]))
63+
64+
if platform.machine() == "arm64" and platform.system() == "Darwin":
65+
err_str = "There are no prebuilt wheels for M1 processors. Please open the following link for a solution - https://discuss.luxonis.com/d/69-running-depthai-on-apple-m1-based-macs"
66+
raise RuntimeError(err_str)
67+
68+
is_pi = platform.machine().startswith("arm") or platform.machine().startswith("aarch")
69+
if is_pi and sys.version_info[1] in (7, 9):
70+
print("[WARNING] There are no prebuilt wheels for Python 3.{} for OpenCV, building process on this device may be long and unstable".format(sys.version_info[1]))
71+
4972
if not in_venv:
5073
pip_install.append("--user")
5174

0 commit comments

Comments
 (0)