|
| 1 | +#include <chrono> |
| 2 | +#include <iostream> |
| 3 | + |
| 4 | +// Includes common necessary includes for development using depthai library |
| 5 | +#include "depthai/depthai.hpp" |
| 6 | + |
| 7 | +static constexpr auto FPS = 15; |
| 8 | + |
| 9 | +int main() { |
| 10 | + dai::Pipeline pipeline; |
| 11 | + |
| 12 | + // Define a source - color camera |
| 13 | + auto camRgb = pipeline.create<dai::node::ColorCamera>(); |
| 14 | + camRgb->setInterleaved(true); |
| 15 | + camRgb->setPreviewSize(640, 360); |
| 16 | + camRgb->setFps(FPS); |
| 17 | + |
| 18 | + auto left = pipeline.create<dai::node::MonoCamera>(); |
| 19 | + left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); |
| 20 | + left->setBoardSocket(dai::CameraBoardSocket::LEFT); |
| 21 | + left->setFps(FPS); |
| 22 | + |
| 23 | + auto right = pipeline.create<dai::node::MonoCamera>(); |
| 24 | + right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); |
| 25 | + right->setBoardSocket(dai::CameraBoardSocket::RIGHT); |
| 26 | + right->setFps(FPS); |
| 27 | + |
| 28 | + auto stereo = pipeline.create<dai::node::StereoDepth>(); |
| 29 | + stereo->initialConfig.setMedianFilter(dai::MedianFilter::KERNEL_7x7); |
| 30 | + stereo->setLeftRightCheck(true); |
| 31 | + stereo->setExtendedDisparity(false); |
| 32 | + stereo->setSubpixel(false); |
| 33 | + stereo->setDepthAlign(dai::CameraBoardSocket::RGB); |
| 34 | + left->out.link(stereo->left); |
| 35 | + right->out.link(stereo->right); |
| 36 | + |
| 37 | + // Script node will sync high-res frames |
| 38 | + auto script = pipeline.create<dai::node::Script>(); |
| 39 | + |
| 40 | + // Send all streams to the Script node so we can sync them |
| 41 | + stereo->disparity.link(script->inputs["disp_in"]); |
| 42 | + camRgb->preview.link(script->inputs["rgb_in"]); |
| 43 | + |
| 44 | + script->setScript(R"( |
| 45 | + FPS=15 |
| 46 | + import time |
| 47 | + from datetime import timedelta |
| 48 | + import math |
| 49 | +
|
| 50 | + MS_THRESHOL=math.ceil(500 / FPS) # Timestamp threshold (in miliseconds) under which frames will be considered synced |
| 51 | +
|
| 52 | + def check_sync(queues, timestamp): |
| 53 | + matching_frames = [] |
| 54 | + for name, list in queues.items(): # Go through each available stream |
| 55 | + # node.warn(f"List {name}, len {str(len(list))}") |
| 56 | + for i, msg in enumerate(list): # Go through each frame of this stream |
| 57 | + time_diff = abs(msg.getTimestamp() - timestamp) |
| 58 | + if time_diff <= timedelta(milliseconds=MS_THRESHOL): # If time diff is below threshold, this frame is considered in-sync |
| 59 | + matching_frames.append(i) # Append the position of the synced frame, so we can later remove all older frames |
| 60 | + break |
| 61 | +
|
| 62 | + if len(matching_frames) == len(queues): |
| 63 | + # We have all frames synced. Remove the excess ones |
| 64 | + i = 0 |
| 65 | + for name, list in queues.items(): |
| 66 | + queues[name] = queues[name][matching_frames[i]:] # Remove older (excess) frames |
| 67 | + i+=1 |
| 68 | + return True |
| 69 | + else: |
| 70 | + return False # We don't have synced frames yet |
| 71 | +
|
| 72 | + names = ['disp', 'rgb'] |
| 73 | + frames = dict() # Dict where we store all received frames |
| 74 | + for name in names: |
| 75 | + frames[name] = [] |
| 76 | +
|
| 77 | + while True: |
| 78 | + for name in names: |
| 79 | + f = node.io[name+"_in"].tryGet() |
| 80 | + if f is not None: |
| 81 | + frames[name].append(f) # Save received frame |
| 82 | +
|
| 83 | + if check_sync(frames, f.getTimestamp()): # Check if we have any synced frames |
| 84 | + # Frames synced! |
| 85 | + node.info(f"Synced frame!") |
| 86 | + for name, list in frames.items(): |
| 87 | + syncedF = list.pop(0) # We have removed older (excess) frames, so at positions 0 in dict we have synced frames |
| 88 | + node.info(f"{name}, ts: {str(syncedF.getTimestamp())}, seq {str(syncedF.getSequenceNum())}") |
| 89 | + node.io[name+'_out'].send(syncedF) # Send synced frames to the host |
| 90 | +
|
| 91 | +
|
| 92 | + time.sleep(0.001) # Avoid lazy looping |
| 93 | + )"); |
| 94 | + |
| 95 | + std::vector<std::string> scriptOut{"disp", "rgb"}; |
| 96 | + for(auto& name : scriptOut) { |
| 97 | + auto xout = pipeline.create<dai::node::XLinkOut>(); |
| 98 | + xout->setStreamName(name); |
| 99 | + script->outputs[name + "_out"].link(xout->input); |
| 100 | + } |
| 101 | + |
| 102 | + dai::Device device(pipeline); |
| 103 | + // Rgb should be the first - as we will first.get() that frame, as it will arrive the latest to the host |
| 104 | + // because it first needs to be converted to NV12 and then encoded to H264. |
| 105 | + std::vector<std::string> names{"rgb", "disp"}; |
| 106 | + std::map<std::string, std::shared_ptr<dai::DataOutputQueue>> streams; |
| 107 | + for(auto& name : names) { |
| 108 | + streams[name] = device.getOutputQueue(name); |
| 109 | + } |
| 110 | + while(true) { |
| 111 | + for(auto& iter : streams) { |
| 112 | + auto name = iter.first; |
| 113 | + auto queue = iter.second; |
| 114 | + auto img = queue->get<dai::ImgFrame>(); |
| 115 | + std::cout << "Stream " << name << ", timestamp: " << img->getTimestamp().time_since_epoch().count() |
| 116 | + << ", sequence number: " << img->getSequenceNum() << std::endl; |
| 117 | + } |
| 118 | + } |
| 119 | +} |
0 commit comments