Skip to content

Commit b010e8b

Browse files
author
Matevz Morato
committed
Remove Color/Mono camera nodes from the examples
1 parent 2630df4 commit b010e8b

File tree

12 files changed

+36
-465
lines changed

12 files changed

+36
-465
lines changed

examples/python/RVC2/NNArchive/nn_archive.py

Lines changed: 0 additions & 106 deletions
This file was deleted.

examples/python/RVC2/NNArchive/nn_archive_superblob.py

Lines changed: 0 additions & 69 deletions
This file was deleted.

examples/python/RVC2/ObjectTracker/object_tracker.py

Lines changed: 18 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,83 +1,54 @@
11
#!/usr/bin/env python3
22

3-
from pathlib import Path
43
import cv2
54
import depthai as dai
6-
import numpy as np
75
import time
8-
import argparse
96

10-
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
11-
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
127

13-
nnPathDefault = str((Path(__file__).parent / Path('../../models/mobilenet-ssd_openvino_2021.4_5shave.blob')).resolve().absolute())
14-
parser = argparse.ArgumentParser()
15-
parser.add_argument('nnPath', nargs='?', help="Path to mobilenet detection network blob", default=nnPathDefault)
16-
parser.add_argument('-ff', '--full_frame', action="store_true", help="Perform tracking on full RGB frame", default=False)
17-
18-
args = parser.parse_args()
19-
20-
fullFrameTracking = args.full_frame
8+
fullFrameTracking = False
219

2210
# Create pipeline
2311
with dai.Pipeline() as pipeline:
2412
# Define sources and outputs
25-
camRgb = pipeline.create(dai.node.ColorCamera)
26-
spatialDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork)
27-
monoLeft = pipeline.create(dai.node.MonoCamera)
28-
monoRight = pipeline.create(dai.node.MonoCamera)
29-
stereo = pipeline.create(dai.node.StereoDepth)
30-
objectTracker = pipeline.create(dai.node.ObjectTracker)
31-
32-
# Properties
33-
camRgb.setPreviewSize(300, 300)
34-
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
35-
camRgb.setInterleaved(False)
36-
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
13+
camRgb = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
14+
monoLeft = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
15+
monoRight = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
3716

38-
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
39-
monoLeft.setCamera("left")
40-
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
41-
monoRight.setCamera("right")
17+
stereo = pipeline.create(dai.node.StereoDepth)
18+
leftOutput = monoLeft.requestOutput((640, 400))
19+
rightOutput = monoRight.requestOutput((640, 400))
20+
leftOutput.link(stereo.left)
21+
rightOutput.link(stereo.right)
4222

43-
# setting node configs
44-
# Align depth map to the perspective of RGB camera, on which inference is done
45-
stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
46-
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
23+
spatialDetectionNetwork = pipeline.create(dai.node.SpatialDetectionNetwork).build(camRgb, stereo, "yolov6-nano")
24+
objectTracker = pipeline.create(dai.node.ObjectTracker)
4725

48-
spatialDetectionNetwork.setBlobPath(args.nnPath)
4926
spatialDetectionNetwork.setConfidenceThreshold(0.5)
5027
spatialDetectionNetwork.input.setBlocking(False)
5128
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
5229
spatialDetectionNetwork.setDepthLowerThreshold(100)
5330
spatialDetectionNetwork.setDepthUpperThreshold(5000)
31+
labelMap = spatialDetectionNetwork.getClasses()
5432

55-
objectTracker.setDetectionLabelsToTrack([15]) # track only person
33+
objectTracker.setDetectionLabelsToTrack([0]) # track only person
5634
# possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF
5735
objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
5836
# take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID
5937
objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.SMALLEST_ID)
6038

61-
# Linking
62-
monoLeft.out.link(stereo.left)
63-
monoRight.out.link(stereo.right)
64-
65-
camRgb.preview.link(spatialDetectionNetwork.input)
6639
preview = objectTracker.passthroughTrackerFrame.createOutputQueue()
6740
tracklets = objectTracker.out.createOutputQueue()
6841

6942
if fullFrameTracking:
70-
camRgb.setPreviewKeepAspectRatio(False)
71-
camRgb.video.link(objectTracker.inputTrackerFrame)
72-
objectTracker.inputTrackerFrame.setBlocking(False)
43+
camRgb.requestFullResolutionOutput().link(objectTracker.inputTrackerFrame)
7344
# do not block the pipeline if it's too slow on full frame
74-
objectTracker.inputTrackerFrame.setQueueSize(2)
45+
objectTracker.inputTrackerFrame.setBlocking(False)
46+
objectTracker.inputTrackerFrame.setMaxSize(1)
7547
else:
7648
spatialDetectionNetwork.passthrough.link(objectTracker.inputTrackerFrame)
7749

7850
spatialDetectionNetwork.passthrough.link(objectTracker.inputDetectionFrame)
7951
spatialDetectionNetwork.out.link(objectTracker.inputDetections)
80-
stereo.depth.link(spatialDetectionNetwork.inputDepth)
8152

8253
startTime = time.monotonic()
8354
counter = 0
@@ -87,6 +58,8 @@
8758
while(pipeline.isRunning()):
8859
imgFrame = preview.get()
8960
track = tracklets.get()
61+
assert isinstance(imgFrame, dai.ImgFrame), "Expected ImgFrame"
62+
assert isinstance(track, dai.Tracklets), "Expected Tracklets"
9063

9164
counter+=1
9265
current_time = time.monotonic()

examples/python/RVC2/StereoDepth/stereo.py

Lines changed: 0 additions & 81 deletions
This file was deleted.

0 commit comments

Comments
 (0)