Skip to content

Commit fb53880

Browse files
committed
Refactor DetectionResult class and YOLO
1 parent 80dac84 commit fb53880

File tree

5 files changed

+227
-156
lines changed

5 files changed

+227
-156
lines changed

model_api/python/model_api/models/detection_model.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def parameters(cls):
6767

6868
return parameters
6969

70-
def _resize_detections(self, detection_result: DetectionResult, meta: dict) -> DetectionResult:
70+
def _resize_detections(self, detection_result: DetectionResult, meta: dict):
7171
"""Resizes detection bounding boxes according to initial image shape.
7272
7373
It implements image resizing depending on the set `resize_type`(see `ImageModel` for details).
@@ -76,9 +76,6 @@ def _resize_detections(self, detection_result: DetectionResult, meta: dict) -> D
7676
Args:
7777
detection_result (DetectionList): detection result with coordinates in normalized form
7878
meta (dict): the input metadata obtained from `preprocess` method
79-
80-
Returns:
81-
- list of detections with resized and clipped coordinates to fit the initial image
8279
"""
8380
input_img_height, input_img_widht = meta["original_shape"][:2]
8481
inverted_scale_x = input_img_widht / self.w
@@ -99,8 +96,7 @@ def _resize_detections(self, detection_result: DetectionResult, meta: dict) -> D
9996
boxes[:, 1::2] = (boxes[:, 1::2] * self.h - pad_top) * inverted_scale_y
10097
boxes[:, 0::2] = np.clip(boxes[:, 0::2], 0, input_img_widht)
10198
boxes[:, 1::2] = np.clip(boxes[:, 1::2], 0, input_img_height)
102-
detection_result.bboxes = boxes
103-
return detection_result
99+
np.round(boxes, out=boxes)
104100

105101
def _filter_detections(self, detection_result: DetectionResult, box_area_threshold=0.0):
106102
"""Filters detections by confidence threshold and box size threshold

model_api/python/model_api/models/result_types/detection.py

Lines changed: 81 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -45,20 +45,23 @@ def __init__(
4545
feature_vector: np.ndarray | None = None,
4646
):
4747
super().__init__()
48-
self.bboxes = bboxes
49-
self.labels = labels
50-
self.scores = scores if scores is not None else np.zeros(len(bboxes))
51-
self.label_names = None if label_names is None else label_names
52-
self.saliency_map = saliency_map
53-
self.feature_vector = feature_vector
48+
self._bboxes = bboxes
49+
self._labels = labels
50+
self._scores = scores if scores is not None else np.zeros(len(bboxes))
51+
self._label_names = ["#"] * len(bboxes) if label_names is None else label_names
52+
self._saliency_map = saliency_map
53+
self._feature_vector = feature_vector
54+
55+
def __len__(self) -> int:
56+
return len(self._bboxes)
5457

5558
def __str__(self) -> str:
5659
return (
57-
f"bboxes: {self.bboxes.shape}, "
58-
f"labels: {len(self.labels)}, "
59-
f"scores: {len(self.scores)}, "
60-
f"{array_shape_to_str(self.saliency_map)}, "
61-
f"{array_shape_to_str(self.feature_vector)}"
60+
f"Num of boxes: {self._bboxes.shape}, "
61+
f"Num of labels: {len(self._labels)}, "
62+
f"Num of scores: {len(self._scores)}, "
63+
f"Saliency Map: {array_shape_to_str(self._saliency_map)}, "
64+
f"Feature Vec: {array_shape_to_str(self._feature_vector)}"
6265
)
6366

6467
def get_obj_sizes(self) -> np.ndarray:
@@ -67,7 +70,73 @@ def get_obj_sizes(self) -> np.ndarray:
6770
Returns:
6871
np.ndarray: Object sizes in dim of (N,).
6972
"""
70-
return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1])
73+
return (self._bboxes[:, 2] - self._bboxes[:, 0]) * (self._bboxes[:, 3] - self._bboxes[:, 1])
74+
75+
@property
76+
def bboxes(self) -> np.ndarray:
77+
return self._bboxes
78+
79+
@bboxes.setter
80+
def bboxes(self, value):
81+
if not isinstance(value, np.ndarray):
82+
msg = "Bounding boxes must be numpy array."
83+
raise ValueError(msg)
84+
self._bboxes = value
85+
86+
@property
87+
def labels(self) -> np.ndarray:
88+
return self._labels
89+
90+
@labels.setter
91+
def labels(self, value):
92+
if not isinstance(value, np.ndarray):
93+
msg = "Labels must be numpy array."
94+
raise ValueError(msg)
95+
self._labels = value
96+
97+
@property
98+
def scores(self) -> np.ndarray:
99+
return self._scores
100+
101+
@scores.setter
102+
def scores(self, value):
103+
if not isinstance(value, np.ndarray):
104+
msg = "Scores must be numpy array."
105+
raise ValueError(msg)
106+
self._scores = value
107+
108+
@property
109+
def label_names(self) -> list[str]:
110+
return self._label_names
111+
112+
@label_names.setter
113+
def label_names(self, value):
114+
if not isinstance(value, list):
115+
msg = "Label names must be list."
116+
raise ValueError(msg)
117+
self._label_names = value
118+
119+
@property
120+
def saliency_map(self) -> np.ndarray:
121+
return self._saliency_map
122+
123+
@saliency_map.setter
124+
def saliency_map(self, value):
125+
if not isinstance(value, np.ndarray):
126+
msg = "Saliency map must be numpy array."
127+
raise ValueError(msg)
128+
self._saliency_map = value
129+
130+
@property
131+
def feature_vector(self) -> np.ndarray:
132+
return self._feature_vector
133+
134+
@feature_vector.setter
135+
def feature_vector(self, value):
136+
if not isinstance(value, np.ndarray):
137+
msg = "Feature vector must be numpy array."
138+
raise ValueError(msg)
139+
self._feature_vector = value
71140

72141

73142
class SingleOutputParser:

model_api/python/model_api/models/utils.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,19 +53,15 @@ def get_contours(
5353
return combined_contours
5454

5555

56-
def clip_detections(detections: DetectionResult, size: tuple[int, int]) -> DetectionResult:
56+
def clip_detections(detections: DetectionResult, size: tuple[int, int]):
5757
"""Clip bounding boxes to image size.
5858
5959
Args:
6060
detections (DetectionResult): detection results including boxes, labels and scores.
6161
size (tuple[int, int]): image size of format (height, width).
62-
63-
Returns:
64-
DetectionResult: clipped detection results.
6562
"""
6663
detections.bboxes[:, 0::2] = np.clip(detections.bboxes[:, 0::2], 0, size[1])
6764
detections.bboxes[:, 1::2] = np.clip(detections.bboxes[:, 1::2], 0, size[0])
68-
return detections
6965

7066

7167
class OutputTransform:

0 commit comments

Comments
 (0)