Skip to content

Commit c04833a

Browse files
author
Matevz Morato
committed
Formatting
1 parent bed8920 commit c04833a

File tree

9 files changed

+29
-29
lines changed

9 files changed

+29
-29
lines changed

bindings/python/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ set(SOURCE_LIST
140140
src/pipeline/datatype/ImageAlignConfigBindings.cpp
141141
src/pipeline/datatype/ImgAnnotationsBindings.cpp
142142
src/remote_connection/RemoteConnectionBindings.cpp
143+
143144
src/capabilities/CapabilityBindings.cpp
144145
src/capabilities/CapabilityRangeBindings.cpp
145146
src/capabilities/ImgFrameCapabilityBindings.cpp

examples/cpp/DetectionNetwork/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,4 @@ dai_add_example(detection_and_segmentation RVC4/detection_and_segmentation.cpp O
2727
dai_set_example_test_labels(detection_and_segmentation rvc4)
2828

2929
dai_add_example(detection_and_keypoints RVC4/detection_and_keypoints.cpp ON OFF)
30-
dai_set_example_test_labels(detection_and_keypoints rvc4)
30+
dai_set_example_test_labels(detection_and_keypoints rvc4)

examples/cpp/DetectionNetwork/RVC4/detection_and_segmentation.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,7 @@ int main() {
6565
if(inDet != nullptr) {
6666
counter++;
6767

68-
// get all labels as sorted list
69-
68+
// Get all labels as sorted list
7069
auto labels = std::set<int>();
7170
for(const auto& detection : inDet->detections) {
7271
labels.insert(detection.label);

examples/python/DetectionNetwork/RVC4/detection_and_keypoints.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,15 @@ def displayFrame(name, frame):
5252
255,
5353
)
5454
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
55-
55+
5656
keypoints = detection.getKeypoints()
5757
for keypoint in keypoints:
5858
keypoint_pos = frameNorm(
5959
frame,
6060
(keypoint.imageCoordinates.x, keypoint.imageCoordinates.y),
6161
)
6262
cv2.circle(frame, (keypoint_pos[0], keypoint_pos[1]), 3, (0, 255, 0), -1)
63-
63+
6464
for edge in detection.getEdges():
6565
kp1 = keypoints[edge[0]]
6666
kp2 = keypoints[edge[1]]
@@ -75,14 +75,12 @@ def displayFrame(name, frame):
7575
cv2.line(frame, (kp1_pos[0], kp1_pos[1]), (kp2_pos[0], kp2_pos[1]), (0, 255, 0), 2)
7676
# Show the frame
7777
cv2.imshow(name, frame)
78-
79-
78+
79+
8080
while pipeline.isRunning():
8181
inRgb: dai.ImgFrame = qRgb.get()
8282
inDet: dai.ImgDetections = qDet.get()
83-
inRgb.getHeight()
84-
85-
83+
8684
if inRgb is not None:
8785
frame = inRgb.getCvFrame()
8886
cv2.putText(
@@ -97,7 +95,7 @@ def displayFrame(name, frame):
9795
if inDet is not None:
9896
detections = inDet.detections
9997
counter += 1
100-
98+
10199
if frame is not None:
102100
displayFrame("rgb", frame)
103101
if cv2.waitKey(1) == ord("q"):

examples/python/DetectionNetwork/RVC4/detection_and_segmentation.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -53,27 +53,27 @@ def displayFrame(frame):
5353
)
5454
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
5555
return frame
56-
56+
5757
filtered_label = -1
5858
while pipeline.isRunning():
5959
inRgb: dai.ImgFrame = qRgb.get()
6060
inDet: dai.ImgDetections = qDet.get()
61-
61+
6262
key = cv2.waitKey(1) & 0xFF
6363
if key == ord('q'):
6464
pipeline.stop()
6565
break
66-
66+
6767
if inRgb is not None:
6868
frame = inRgb.getCvFrame()
69-
69+
7070
side_panel = np.ones((frame.shape[0], 400, 3), dtype=np.uint8) * 255
7171
if inDet is not None:
7272
detections = inDet.detections
7373
counter += 1
74-
74+
7575
labels = sorted(list(set(detection.label for detection in detections)))
76-
76+
7777
label_maps = [labelMap[l] for l in labels]
7878
cv2.putText(side_panel, "Press index to filter by class:", (10, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 0, 0), 1)
7979
for i, label in enumerate(label_maps):
@@ -95,7 +95,7 @@ def displayFrame(frame):
9595
(0, 0, 0),
9696
1
9797
)
98-
98+
9999
if key == ord('0') :
100100
print("Showing all labels")
101101
filtered_label = -1
@@ -104,24 +104,24 @@ def displayFrame(frame):
104104
if len(labels) > int_key:
105105
print(f"Showing only: {labelMap[labels[int_key]]}")
106106
filtered_label = labels[int_key]
107-
107+
108108
width = inDet.getSegmentationMaskWidth()
109109
height = inDet.getSegmentationMaskHeight()
110-
110+
111111
segmentation_mask = cv2.Mat(np.zeros((height, width), dtype=np.uint8))
112112
if filtered_label == -1:
113113
segmentation_mask = inDet.getCvSegmentationMask()
114114
else:
115115
segmentation_mask = inDet.getCvSegmentationMaskByClass(filtered_label)
116116
detections = [det for det in detections if det.label == filtered_label]
117-
117+
118118
if segmentation_mask is not None:
119119
scaled_mask = segmentation_mask.copy()
120120
scaled_mask[segmentation_mask != 255] = segmentation_mask[segmentation_mask != 255] * 25 # scale for better visualization
121121
colored_mask = cv2.applyColorMap(scaled_mask, cv2.COLORMAP_JET)
122122
colored_mask[segmentation_mask == 255] = frame[segmentation_mask == 255]
123123
frame = cv2.addWeighted(frame, 0.7, colored_mask, 0.3, 0)
124-
124+
125125
cv2.putText(
126126
frame,
127127
"NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)),
@@ -130,7 +130,7 @@ def displayFrame(frame):
130130
0.4,
131131
color2,
132132
)
133-
133+
134134
if frame is not None:
135135
frame = displayFrame(frame)
136136
concatenated_frame = cv2.hconcat([frame, side_panel])

include/depthai/common/Keypoint.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
namespace dai {
1414

1515
struct Keypoint {
16-
Point3f imageCoordinates{};
16+
Point3f imageCoordinates;
1717
float confidence = 0.f;
1818
uint32_t label = 0;
1919

include/depthai/common/YoloDecodingFamily.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,4 @@ enum class YoloDecodingFamily : std::int32_t {
77
R1AF, // anchor free: yolo v6r1
88
v5AB, // anchor based yolo v5, v7, P
99
v3AB, // anchor based yolo v3 v3-Tiny
10-
};
10+
};

include/depthai/pipeline/datatype/ImgDetections.hpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@
1010
#include "depthai/common/RotatedRect.hpp"
1111
#include "depthai/common/optional.hpp"
1212
#include "depthai/pipeline/datatype/Buffer.hpp"
13-
#include "depthai/pipeline/datatype/ImgAnnotations.hpp"
1413
#include "depthai/pipeline/datatype/ImgDetectionsT.hpp"
15-
#include "depthai/pipeline/datatype/ImgFrame.hpp"
1614
#include "depthai/utility/ProtoSerializable.hpp"
1715

1816
#ifdef DEPTHAI_XTENSOR_SUPPORT
@@ -21,7 +19,6 @@
2119
#include <xtensor/core/xlayout.hpp>
2220
#include <xtensor/core/xmath.hpp>
2321
#include <xtensor/core/xtensor_forward.hpp>
24-
2522
#endif
2623

2724
#ifdef DEPTHAI_HAVE_OPENCV_SUPPORT

tests/CMakeLists.txt

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,7 @@ private_data(
220220
FILE "recording.tar"
221221
LOCATION recording_path
222222
)
223+
223224
private_data(
224225
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4"
225226
SHA1 "271d8d0b702e683ce02957db7c100843de5ceaec"
@@ -233,24 +234,28 @@ private_data(
233234
FILE "yolov6-nano_r2-coco-512x288_a26d1ee-detections.json"
234235
LOCATION yolo_v6_r2_coco_512x288_gt
235236
)
237+
236238
private_data(
237239
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/misc/parser_testing/ground_truth_files/detection_parser/yolov6-nano_r2-coco-512x384_fb1429e-detections.json"
238240
SHA1 "af6cea1f293622fcc2c9cb8d78fadb510a2270c9"
239241
FILE "yolov6-nano_r2-coco-512x384_fb1429e-detections.json"
240242
LOCATION yolo_v6_r2_coco_512x384_gt
241243
)
244+
242245
private_data(
243246
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/misc/parser_testing/ground_truth_files/detection_parser/yolov8-instance-segmentation-nano_coco-512x288_6c0402a-detections.json"
244247
SHA1 "bb29fb9a6bc041f7e90769df4cde3a6ca890e521"
245248
FILE "yolov8-instance-segmentation-nano_coco-512x288_6c0402a-detections.json"
246249
LOCATION yolo_v8_instance_segmentation_nano_coco_512x288_gt
247250
)
251+
248252
private_data(
249253
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/misc/parser_testing/ground_truth_files/detection_parser/yolov8-large-pose-estimation_coco-640x352_1868e39-detections.json"
250254
SHA1 "397fd5e1b80f47050cb827af92b5b13fb4f35bc6"
251255
FILE "yolov8-large-pose-estimation_coco-640x352_1868e39-detections.json"
252256
LOCATION yolo_v8_large_pose_estimation_coco_640x352_gt
253257
)
258+
254259
private_data(
255260
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/misc/parser_testing/ground_truth_files/detection_parser/yolov8-instance-segmentation-large_coco-640x352_701031f-detections.json"
256261
SHA1 "21ee42c2f560d7f977f1895387f6e6834c7dd2b5"
@@ -299,7 +304,7 @@ private_data(
299304
FILE "fire_hd_1920_1080_24fps.mp4"
300305
LOCATION fire_video
301306
)
302-
307+
303308
private_data(
304309
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/misc/parser_testing/images/kitchen1.png"
305310
SHA1 "e566d4d0dce0b30a3e4cdc3b693e1c6385867822"

0 commit comments

Comments
 (0)