|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +import cv2 |
| 4 | +import depthai as dai |
| 5 | +from collections import deque |
| 6 | + |
| 7 | +class FeatureTrackerDrawer: |
| 8 | + |
| 9 | + lineColor = (200, 0, 200) |
| 10 | + pointColor = (0, 0, 255) |
| 11 | + circleRadius = 2 |
| 12 | + maxTrackedFeaturesPathLength = 30 |
| 13 | + # for how many frames the feature is tracked |
| 14 | + trackedFeaturesPathLength = 10 |
| 15 | + |
| 16 | + trackedIDs = None |
| 17 | + trackedFeaturesPath = None |
| 18 | + |
| 19 | + def onTrackBar(self, val): |
| 20 | + FeatureTrackerDrawer.trackedFeaturesPathLength = val |
| 21 | + pass |
| 22 | + |
| 23 | + def trackFeaturePath(self, features): |
| 24 | + |
| 25 | + newTrackedIDs = set() |
| 26 | + for currentFeature in features: |
| 27 | + currentID = currentFeature.id |
| 28 | + newTrackedIDs.add(currentID) |
| 29 | + |
| 30 | + if currentID not in self.trackedFeaturesPath: |
| 31 | + self.trackedFeaturesPath[currentID] = deque() |
| 32 | + |
| 33 | + path = self.trackedFeaturesPath[currentID] |
| 34 | + |
| 35 | + path.append(currentFeature.position) |
| 36 | + while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)): |
| 37 | + path.popleft() |
| 38 | + |
| 39 | + self.trackedFeaturesPath[currentID] = path |
| 40 | + |
| 41 | + featuresToRemove = set() |
| 42 | + for oldId in self.trackedIDs: |
| 43 | + if oldId not in newTrackedIDs: |
| 44 | + featuresToRemove.add(oldId) |
| 45 | + |
| 46 | + for id in featuresToRemove: |
| 47 | + self.trackedFeaturesPath.pop(id) |
| 48 | + |
| 49 | + self.trackedIDs = newTrackedIDs |
| 50 | + |
| 51 | + def drawFeatures(self, img): |
| 52 | + |
| 53 | + cv2.setTrackbarPos(self.trackbarName, self.windowName, FeatureTrackerDrawer.trackedFeaturesPathLength) |
| 54 | + |
| 55 | + for featurePath in self.trackedFeaturesPath.values(): |
| 56 | + path = featurePath |
| 57 | + |
| 58 | + for j in range(len(path) - 1): |
| 59 | + src = (int(path[j].x), int(path[j].y)) |
| 60 | + dst = (int(path[j + 1].x), int(path[j + 1].y)) |
| 61 | + cv2.line(img, src, dst, self.lineColor, 1, cv2.LINE_AA, 0) |
| 62 | + j = len(path) - 1 |
| 63 | + cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0) |
| 64 | + |
| 65 | + def __init__(self, trackbarName, windowName): |
| 66 | + self.trackbarName = trackbarName |
| 67 | + self.windowName = windowName |
| 68 | + cv2.namedWindow(windowName) |
| 69 | + cv2.createTrackbar(trackbarName, windowName, FeatureTrackerDrawer.trackedFeaturesPathLength, FeatureTrackerDrawer.maxTrackedFeaturesPathLength, self.onTrackBar) |
| 70 | + self.trackedIDs = set() |
| 71 | + self.trackedFeaturesPath = dict() |
| 72 | + |
| 73 | + |
| 74 | +# Create pipeline |
| 75 | +pipeline = dai.Pipeline() |
| 76 | + |
| 77 | +# Define sources and outputs |
| 78 | +colorCam = pipeline.create(dai.node.ColorCamera) |
| 79 | +featureTrackerColor = pipeline.create(dai.node.FeatureTracker) |
| 80 | + |
| 81 | +xoutPassthroughFrameColor = pipeline.create(dai.node.XLinkOut) |
| 82 | +xoutTrackedFeaturesColor = pipeline.create(dai.node.XLinkOut) |
| 83 | +xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn) |
| 84 | + |
| 85 | +xoutPassthroughFrameColor.setStreamName("passthroughFrameColor") |
| 86 | +xoutTrackedFeaturesColor.setStreamName("trackedFeaturesColor") |
| 87 | +xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig") |
| 88 | + |
| 89 | +# Properties |
| 90 | +colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) |
| 91 | + |
| 92 | +if 1: |
| 93 | + colorCam.setIspScale(2,3) |
| 94 | + colorCam.video.link(featureTrackerColor.inputImage) |
| 95 | +else: |
| 96 | + colorCam.isp.link(featureTrackerColor.inputImage) |
| 97 | + |
| 98 | +# Linking |
| 99 | +featureTrackerColor.passthroughInputImage.link(xoutPassthroughFrameColor.input) |
| 100 | +featureTrackerColor.outputFeatures.link(xoutTrackedFeaturesColor.input) |
| 101 | +xinTrackedFeaturesConfig.out.link(featureTrackerColor.inputConfig) |
| 102 | + |
| 103 | +# By default the least mount of resources are allocated |
| 104 | +# increasing it improves performance |
| 105 | +numShaves = 2 |
| 106 | +numMemorySlices = 2 |
| 107 | +featureTrackerColor.setHardwareResources(numShaves, numMemorySlices) |
| 108 | +featureTrackerConfig = featureTrackerColor.initialConfig.get() |
| 109 | + |
| 110 | +print("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation!") |
| 111 | + |
| 112 | +# Connect to device and start pipeline |
| 113 | +with dai.Device(pipeline) as device: |
| 114 | + |
| 115 | + # Output queues used to receive the results |
| 116 | + passthroughImageColorQueue = device.getOutputQueue("passthroughFrameColor", 8, False) |
| 117 | + outputFeaturesColorQueue = device.getOutputQueue("trackedFeaturesColor", 8, False) |
| 118 | + |
| 119 | + inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig") |
| 120 | + |
| 121 | + colorWindowName = "color" |
| 122 | + colorFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", colorWindowName) |
| 123 | + |
| 124 | + while True: |
| 125 | + inPassthroughFrameColor = passthroughImageColorQueue.get() |
| 126 | + passthroughFrameColor = inPassthroughFrameColor.getCvFrame() |
| 127 | + colorFrame = passthroughFrameColor |
| 128 | + |
| 129 | + trackedFeaturesColor = outputFeaturesColorQueue.get().trackedFeatures |
| 130 | + colorFeatureDrawer.trackFeaturePath(trackedFeaturesColor) |
| 131 | + colorFeatureDrawer.drawFeatures(colorFrame) |
| 132 | + |
| 133 | + # Show the frame |
| 134 | + cv2.imshow(colorWindowName, colorFrame) |
| 135 | + |
| 136 | + key = cv2.waitKey(1) |
| 137 | + if key == ord('q'): |
| 138 | + break |
| 139 | + elif key == ord('s'): |
| 140 | + if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW: |
| 141 | + featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION |
| 142 | + print("Switching to hardware accelerated motion estimation") |
| 143 | + else: |
| 144 | + featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW |
| 145 | + print("Switching to Lucas-Kanade optical flow") |
| 146 | + |
| 147 | + cfg = dai.FeatureTrackerConfig() |
| 148 | + cfg.set(featureTrackerConfig) |
| 149 | + inputFeatureTrackerConfigQueue.send(cfg) |
0 commit comments