-
Notifications
You must be signed in to change notification settings - Fork 158
Feature/snaps events v2 integration #1492
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from 26 commits
8c05663
83543c5
c74b28c
ba51384
bd9471e
46d164e
3aa210a
903b41b
f3e290a
3380fad
30d403d
96f3e24
f4bca4a
c1133d4
6c6b5cf
77debeb
8146673
cb4e7bd
38ed31c
a13351e
9c293e7
3716c1f
007e44f
940ca89
94fd634
529b24c
0096d24
0cb59a8
15a8366
622de45
f33d957
501c5fd
d66489c
d99d0f6
72bc656
0e2aac1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,44 +1,100 @@ | ||
| #include <chrono> | ||
| #include <iostream> | ||
| #include <opencv2/opencv.hpp> | ||
| #include <string> | ||
|
|
||
| #include "depthai/depthai.hpp" | ||
| #include "depthai/utility/EventsManager.hpp" | ||
|
|
||
| int main(int argc, char* argv[]) { | ||
| // Helper function to normalize frame coordinates | ||
| cv::Rect frameNorm(const cv::Mat& frame, const dai::Point2f& topLeft, const dai::Point2f& bottomRight) { | ||
| float width = frame.cols, height = frame.rows; | ||
| return cv::Rect(cv::Point(topLeft.x * width, topLeft.y * height), cv::Point(bottomRight.x * width, bottomRight.y * height)); | ||
| } | ||
|
|
||
| int main() { | ||
| dai::Pipeline pipeline(true); | ||
|
|
||
| // Enter you hub team's api-key | ||
| auto eventsManager = std::make_shared<dai::utility::EventsManager>(); | ||
| eventsManager->setLogResponse(true); | ||
| // Color camera node | ||
| eventsManager->setToken(""); | ||
| eventsManager->setLogResponse(false); | ||
| auto fileGroup = std::make_shared<dai::utility::FileGroup>(); | ||
|
|
||
| auto camRgb = pipeline.create<dai::node::Camera>()->build(); | ||
| auto* preview = camRgb->requestOutput(std::make_pair(256, 256)); | ||
| auto detectionNetwork = pipeline.create<dai::node::DetectionNetwork>(); | ||
|
|
||
| auto previewQ = preview->createOutputQueue(); | ||
| dai::NNModelDescription modelDescription; | ||
| modelDescription.model = "yolov6-nano"; | ||
| detectionNetwork->build(camRgb, modelDescription); | ||
| auto labelMap = detectionNetwork->getClasses(); | ||
|
|
||
| pipeline.start(); | ||
| bool sent = false; | ||
| eventsManager->sendEvent("test", nullptr, {}, {"tag1", "tag2"}, {{"key1", "value1"}}); | ||
| // Create output queues | ||
| auto qRgb = detectionNetwork->passthrough.createOutputQueue(); | ||
| auto qDet = detectionNetwork->out.createOutputQueue(); | ||
|
|
||
| std::this_thread::sleep_for(std::chrono::milliseconds(7000)); | ||
| pipeline.start(); | ||
|
|
||
| auto fileData = std::make_shared<dai::utility::EventData>("abc", "test_bin.txt", "text/plain"); | ||
| std::vector<std::shared_ptr<dai::utility::EventData>> data; | ||
| data.emplace_back(fileData); | ||
| eventsManager->sendEvent("testdata", nullptr, data, {"tag3", "tag4"}, {{"key8", "value8"}}); | ||
| int counter = 0; | ||
| while(pipeline.isRunning()) { | ||
| auto rgb = previewQ->get<dai::ImgFrame>(); | ||
| auto inRgb = qRgb->get<dai::ImgFrame>(); | ||
| auto inDet = qDet->get<dai::ImgDetections>(); | ||
| if(inRgb == nullptr || inDet == nullptr) { | ||
| continue; | ||
| } | ||
|
|
||
| // Display the video stream and detections | ||
| cv::Mat frame = inRgb->getCvFrame(); | ||
| if(!frame.empty()) { | ||
| // Display detections | ||
| for(const auto& detection : inDet->detections) { | ||
| auto bbox = frameNorm(frame, dai::Point2f(detection.xmin, detection.ymin), dai::Point2f(detection.xmax, detection.ymax)); | ||
|
|
||
| // Draw label | ||
| cv::putText( | ||
| frame, labelMap.value()[detection.label], cv::Point(bbox.x + 10, bbox.y + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, cv::Scalar(255, 255, 255)); | ||
|
|
||
| // Draw confidence | ||
| cv::putText(frame, | ||
| std::to_string(static_cast<int>(detection.confidence * 100)) + "%", | ||
| cv::Point(bbox.x + 10, bbox.y + 40), | ||
| cv::FONT_HERSHEY_TRIPLEX, | ||
| 0.5, | ||
| cv::Scalar(255, 255, 255)); | ||
|
|
||
| // Do something with the data | ||
| // ... | ||
| // Draw rectangle | ||
| cv::rectangle(frame, bbox, cv::Scalar(255, 0, 0), 2); | ||
| } | ||
|
|
||
| if(!sent) { | ||
| eventsManager->sendSnap("rgb", rgb, {}, {"tag11", "tag12"}, {{"key", "value"}}); | ||
| sent = true; | ||
| // Show the frame | ||
| cv::imshow("rgb", frame); | ||
| } | ||
|
|
||
| // Suppose we are only interested in the detections with confidence between 50% and 60% | ||
| auto borderDetections = std::make_shared<dai::ImgDetections>(); | ||
| for(const auto& detection : inDet->detections) { | ||
| if(detection.confidence > 0.5f && detection.confidence < 0.6f) { | ||
| borderDetections->detections.emplace_back(detection); | ||
| } | ||
| } | ||
|
|
||
| // Are there any border detections | ||
| if(borderDetections->detections.size() > 0) { | ||
| std::string fileName = "ImageDetection_"; | ||
| std::stringstream ss; | ||
| ss << fileName << counter; | ||
|
|
||
| fileGroup->clearFiles(); | ||
| fileGroup->addImageDetectionsPair(ss.str(), inRgb, borderDetections); | ||
| eventsManager->sendSnap("ImageDetection", fileGroup, {"EventsExample", "C++"}, {{"key_0", "value_0"}, {"key_1", "value_1"}}, ""); | ||
|
|
||
| counter++; | ||
| } | ||
|
|
||
| if(cv::waitKey(1) == 'q') { | ||
| break; | ||
| } | ||
| // | ||
| std::this_thread::sleep_for(std::chrono::milliseconds(200)); | ||
| } | ||
|
|
||
| return EXIT_SUCCESS; | ||
| } | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,37 +8,84 @@ | |
|
|
||
| # Create pipeline | ||
| with dai.Pipeline() as pipeline: | ||
| # Define sources and outputs | ||
| camRgb = pipeline.create(dai.node.Camera).build() | ||
| # Properties | ||
| # Enter you hub team's api-key | ||
| eventMan = dai.EventsManager() | ||
| eventMan.setToken("") | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Needed?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I left it here to "alert" the user that the token must be set - specifically in the peripheral mode. I can remove it, if this doesn't make sense |
||
| eventMan.setLogResponse(False) | ||
aljazdu marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| fileGroup = dai.FileGroup() | ||
|
|
||
| qRgb = camRgb.requestOutput((256,256)).createOutputQueue() | ||
| cameraNode = pipeline.create(dai.node.Camera).build() | ||
| detectionNetwork = pipeline.create(dai.node.DetectionNetwork).build(cameraNode, dai.NNModelDescription("yolov6-nano")) | ||
| labelMap = detectionNetwork.getClasses() | ||
|
|
||
| eventMan = dai.EventsManager() | ||
| eventMan.setLogResponse(True) | ||
| # Create output queues | ||
| qRgb = detectionNetwork.passthrough.createOutputQueue() | ||
| qDet = detectionNetwork.out.createOutputQueue() | ||
|
|
||
| eventMan.sendEvent("test1", None, [], ["tag1", "tag2"], {"key1": "value1"}) | ||
| time.sleep(2) | ||
| fileData = dai.EventData(b'Hello, world!', "hello.txt", "text/plain") | ||
| eventMan.sendEvent("test2", None, [fileData], ["tag1", "tag2"], {"key1": "value1"}) | ||
| pipeline.start() | ||
|
|
||
| frame = None | ||
| counter = 0 | ||
|
|
||
| # nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height | ||
| def frameNorm(frame, bbox): | ||
| normVals = np.full(len(bbox), frame.shape[0]) | ||
| normVals[::2] = frame.shape[1] | ||
| return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int) | ||
|
|
||
|
|
||
| eventSent = False | ||
| counter = 0 | ||
| while pipeline.isRunning(): | ||
| inRgb: dai.ImgFrame = qRgb.get() | ||
| if inRgb is not None: | ||
| frame = inRgb.getCvFrame() | ||
| if not eventSent: | ||
| eventMan.sendSnap("rgb", inRgb, [], ["tag1", "tag2"], {"key1": "value1"}) | ||
| eventSent = True | ||
| inDet: dai.ImgDetections = qDet.get() | ||
| if inRgb is None or inDet is None: | ||
| continue | ||
|
|
||
| # Display the video stream and detections | ||
| color = (255, 0, 0) | ||
| frame = inRgb.getCvFrame() | ||
| if frame is not None: | ||
| for detection in inDet.detections: | ||
| bbox = frameNorm( | ||
| frame, | ||
| (detection.xmin, detection.ymin, detection.xmax, detection.ymax), | ||
| ) | ||
| cv2.putText( | ||
| frame, | ||
| labelMap[detection.label], | ||
| (bbox[0] + 10, bbox[1] + 20), | ||
| cv2.FONT_HERSHEY_TRIPLEX, | ||
| 0.5, | ||
| 255, | ||
| ) | ||
| cv2.putText( | ||
| frame, | ||
| f"{int(detection.confidence * 100)}%", | ||
| (bbox[0] + 10, bbox[1] + 40), | ||
| cv2.FONT_HERSHEY_TRIPLEX, | ||
| 0.5, | ||
| 255, | ||
| ) | ||
| cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2) | ||
| # Show the frame | ||
| cv2.imshow("rgb", frame) | ||
|
|
||
| # Suppose we are only interested in the detections with confidence between 50% and 60% | ||
| borderDetectionsList = [] | ||
| for detection in inDet.detections: | ||
| if detection.confidence > 0.5 and detection.confidence < 0.6: | ||
| borderDetectionsList.append(detection) | ||
|
|
||
| # Are there any border detections | ||
| if len(borderDetectionsList) > 0: | ||
| borderDetections = dai.ImgDetections() | ||
| borderDetections.detections = borderDetectionsList | ||
| fileName = f"ImageDetection_{counter}" | ||
aljazdu marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| fileGroup.clearFiles(); | ||
| fileGroup.addImageDetectionsPair(fileName, inRgb, borderDetections) | ||
| eventMan.sendSnap("ImageDetection", fileGroup, ["EventsExample", "Python"], {"key_0" : "value_0", "key_1" : "value_1", "key_2" : "value_2"}, "") | ||
aljazdu marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| counter += 1 | ||
|
|
||
| if cv2.waitKey(1) == ord("q"): | ||
| pipeline.stop() | ||
| break | ||
| break | ||
Uh oh!
There was an error while loading. Please reload this page.