Skip to content

Commit d921f75

Browse files
authored
Move results from utils (#222)
1 parent c5d0dbe commit d921f75

15 files changed

+277
-265
lines changed

model_api/python/model_api/models/__init__.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,25 +11,26 @@
1111
from .instance_segmentation import MaskRCNNModel
1212
from .keypoint_detection import KeypointDetectionModel, TopDownKeypointDetectionPipeline
1313
from .model import Model
14-
from .sam_models import SAMDecoder, SAMImageEncoder
15-
from .segmentation import SalientObjectDetectionModel, SegmentationModel
16-
from .ssd import SSD
17-
from .utils import (
14+
from .result_types import (
1815
AnomalyResult,
1916
ClassificationResult,
2017
Contour,
2118
DetectedKeypoints,
2219
Detection,
2320
DetectionResult,
24-
DetectionWithLandmarks,
2521
ImageResultWithSoftPrediction,
2622
InstanceSegmentationResult,
27-
OutputTransform,
2823
PredictedMask,
2924
SegmentedObject,
3025
SegmentedObjectWithRects,
3126
VisualPromptingResult,
3227
ZSLVisualPromptingResult,
28+
)
29+
from .sam_models import SAMDecoder, SAMImageEncoder
30+
from .segmentation import SalientObjectDetectionModel, SegmentationModel
31+
from .ssd import SSD
32+
from .utils import (
33+
OutputTransform,
3334
add_rotated_rects,
3435
get_contours,
3536
)
@@ -62,8 +63,6 @@
6263
"ClassificationModel",
6364
"Contour",
6465
"DetectionModel",
65-
"DetectionWithLandmarks",
66-
"ImageMattingWithBackground",
6766
"ImageModel",
6867
"ImageResultWithSoftPrediction",
6968
"InstanceSegmentationResult",
@@ -77,11 +76,9 @@
7776
"MaskRCNNModel",
7877
"Model",
7978
"OutputTransform",
80-
"PortraitBackgroundMatting",
8179
"SalientObjectDetectionModel",
8280
"SegmentationModel",
8381
"SSD",
84-
"VideoBackgroundMatting",
8582
"YOLO",
8683
"YoloV3ONNX",
8784
"YoloV4",
@@ -95,7 +92,6 @@
9592
"Prompt",
9693
"Detection",
9794
"DetectionResult",
98-
"DetectionWithLandmarks",
9995
"DetectedKeypoints",
10096
"classification_models",
10197
"detection_models",

model_api/python/model_api/models/action_classification.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,9 @@
1212
from model_api.adapters.utils import RESIZE_TYPES, InputTransform
1313

1414
from .model import Model
15+
from .result_types import ClassificationResult
1516
from .types import BooleanValue, ListValue, NumericalValue, StringValue
16-
from .utils import ClassificationResult, load_labels
17+
from .utils import load_labels
1718

1819
if TYPE_CHECKING:
1920
from model_api.adapters.inference_adapter import InferenceAdapter

model_api/python/model_api/models/anomaly.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
from model_api.adapters.inference_adapter import InferenceAdapter
1818

1919
from .image_model import ImageModel
20+
from .result_types import AnomalyResult
2021
from .types import ListValue, NumericalValue, StringValue
21-
from .utils import AnomalyResult
2222

2323

2424
class AnomalyDetection(ImageModel):

model_api/python/model_api/models/classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
from model_api.adapters.inference_adapter import InferenceAdapter
1818

1919
from .image_model import ImageModel
20+
from .result_types import ClassificationResult
2021
from .types import BooleanValue, ListValue, NumericalValue, StringValue
21-
from .utils import ClassificationResult
2222

2323

2424
class ClassificationModel(ImageModel):

model_api/python/model_api/models/instance_segmentation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,9 @@
77
import numpy as np
88

99
from .image_model import ImageModel
10+
from .result_types import InstanceSegmentationResult, SegmentedObject
1011
from .types import BooleanValue, ListValue, NumericalValue, StringValue
11-
from .utils import InstanceSegmentationResult, SegmentedObject, load_labels
12+
from .utils import load_labels
1213

1314

1415
class MaskRCNNModel(ImageModel):

model_api/python/model_api/models/keypoint_detection.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,8 @@
1010
import numpy as np
1111

1212
from .image_model import ImageModel
13+
from .result_types import DetectedKeypoints, Detection
1314
from .types import ListValue
14-
from .utils import DetectedKeypoints, Detection
1515

1616

1717
class KeypointDetectionModel(ImageModel):
Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
#
2+
# Copyright (C) 2024 Intel Corporation
3+
# SPDX-License-Identifier: Apache-2.0
4+
#
5+
6+
from __future__ import annotations # TODO: remove when Python3.9 support is dropped
7+
8+
from typing import NamedTuple
9+
10+
import cv2 as cv
11+
import numpy as np
12+
13+
14+
class AnomalyResult(NamedTuple):
15+
"""Results for anomaly models."""
16+
17+
anomaly_map: np.ndarray | None = None
18+
pred_boxes: np.ndarray | None = None
19+
pred_label: str | None = None
20+
pred_mask: np.ndarray | None = None
21+
pred_score: float | None = None
22+
23+
def _compute_min_max(self, tensor: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
24+
"""Computes min and max values of the tensor."""
25+
return tensor.min(), tensor.max()
26+
27+
def __str__(self) -> str:
28+
assert self.anomaly_map is not None
29+
assert self.pred_mask is not None
30+
anomaly_map_min, anomaly_map_max = self._compute_min_max(self.anomaly_map)
31+
pred_mask_min, pred_mask_max = self._compute_min_max(self.pred_mask)
32+
return (
33+
f"anomaly_map min:{anomaly_map_min} max:{anomaly_map_max};"
34+
f"pred_score:{np.round(self.pred_score, 1) if self.pred_score else 0.0};"
35+
f"pred_label:{self.pred_label};"
36+
f"pred_mask min:{pred_mask_min} max:{pred_mask_max};"
37+
)
38+
39+
40+
class ClassificationResult(NamedTuple):
41+
"""Results for classification models."""
42+
43+
top_labels: list[tuple[int, str, float]] | None = None
44+
saliency_map: np.ndarray | None = None
45+
feature_vector: np.ndarray | None = None
46+
raw_scores: np.ndarray | None = None
47+
48+
def __str__(self) -> str:
49+
assert self.top_labels is not None
50+
labels = ", ".join(f"{idx} ({label}): {confidence:.3f}" for idx, label, confidence in self.top_labels)
51+
return (
52+
f"{labels}, {_array_shape_to_str(self.saliency_map)}, {_array_shape_to_str(self.feature_vector)}, "
53+
f"{_array_shape_to_str(self.raw_scores)}"
54+
)
55+
56+
57+
class Detection:
58+
def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label=None):
59+
self.xmin: int = xmin
60+
self.ymin: int = ymin
61+
self.xmax: int = xmax
62+
self.ymax: int = ymax
63+
self.score: float = score
64+
self.id: int = int(id)
65+
self.str_label: str | None = str_label
66+
67+
def __str__(self):
68+
return f"{self.xmin}, {self.ymin}, {self.xmax}, {self.ymax}, {self.id} ({self.str_label}): {self.score:.3f}"
69+
70+
71+
class DetectionResult(NamedTuple):
72+
"""Result for detection model."""
73+
74+
objects: list[Detection] | None = None
75+
saliency_map: np.ndarray | None = None
76+
feature_vector: np.ndarray | None = None
77+
78+
def __str__(self):
79+
assert self.objects is not None
80+
obj_str = "; ".join(str(obj) for obj in self.objects)
81+
if obj_str:
82+
obj_str += "; "
83+
return f"{obj_str}{_array_shape_to_str(self.saliency_map)}; {_array_shape_to_str(self.feature_vector)}"
84+
85+
86+
class SegmentedObject(Detection):
87+
def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label, mask):
88+
super().__init__(xmin, ymin, xmax, ymax, score, id, str_label)
89+
self.mask = mask
90+
91+
def __str__(self):
92+
return f"{super().__str__()}, {(self.mask > 0.5).sum()}"
93+
94+
95+
class SegmentedObjectWithRects(SegmentedObject):
96+
def __init__(self, segmented_object, rotated_rect):
97+
super().__init__(
98+
segmented_object.xmin,
99+
segmented_object.ymin,
100+
segmented_object.xmax,
101+
segmented_object.ymax,
102+
segmented_object.score,
103+
segmented_object.id,
104+
segmented_object.str_label,
105+
segmented_object.mask,
106+
)
107+
self.rotated_rect = rotated_rect
108+
109+
def __str__(self):
110+
res = super().__str__()
111+
rect = self.rotated_rect
112+
res += f", RotatedRect: {rect[0][0]:.3f} {rect[0][1]:.3f} {rect[1][0]:.3f} {rect[1][1]:.3f} {rect[2]:.3f}"
113+
return res
114+
115+
116+
class InstanceSegmentationResult(NamedTuple):
117+
segmentedObjects: list[SegmentedObject | SegmentedObjectWithRects]
118+
# Contain per class saliency_maps and "feature_vector" model output if feature_vector exists
119+
saliency_map: list[np.ndarray]
120+
feature_vector: np.ndarray
121+
122+
def __str__(self):
123+
obj_str = "; ".join(str(obj) for obj in self.segmentedObjects)
124+
filled = 0
125+
for cls_map in self.saliency_map:
126+
if cls_map.size:
127+
filled += 1
128+
prefix = f"{obj_str}; " if len(obj_str) else ""
129+
return prefix + f"{filled}; [{','.join(str(i) for i in self.feature_vector.shape)}]"
130+
131+
132+
class VisualPromptingResult(NamedTuple):
133+
upscaled_masks: list[np.ndarray] | None = None
134+
processed_mask: list[np.ndarray] | None = None
135+
low_res_masks: list[np.ndarray] | None = None
136+
iou_predictions: list[np.ndarray] | None = None
137+
scores: list[np.ndarray] | None = None
138+
labels: list[np.ndarray] | None = None
139+
hard_predictions: list[np.ndarray] | None = None
140+
soft_predictions: list[np.ndarray] | None = None
141+
best_iou: list[float] | None = None
142+
143+
def _compute_min_max(self, tensor: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
144+
return tensor.min(), tensor.max()
145+
146+
def __str__(self) -> str:
147+
assert self.hard_predictions is not None
148+
assert self.upscaled_masks is not None
149+
upscaled_masks_min, upscaled_masks_max = self._compute_min_max(
150+
self.upscaled_masks[0],
151+
)
152+
153+
return (
154+
f"upscaled_masks min:{upscaled_masks_min:.3f} max:{upscaled_masks_max:.3f};"
155+
f"hard_predictions shape:{self.hard_predictions[0].shape};"
156+
)
157+
158+
159+
class PredictedMask(NamedTuple):
160+
mask: list[np.ndarray]
161+
points: list[np.ndarray] | np.ndarray
162+
scores: list[float] | np.ndarray
163+
164+
def __str__(self) -> str:
165+
obj_str = ""
166+
obj_str += f"mask sum: {np.sum(sum(self.mask))}; "
167+
168+
if isinstance(self.points, list):
169+
for i, point in enumerate(self.points):
170+
obj_str += "["
171+
obj_str += ", ".join(str(round(c, 2)) for c in point)
172+
obj_str += "] "
173+
obj_str += "iou: " + f"{float(self.scores[i]):.3f} "
174+
else:
175+
for i in range(self.points.shape[0]):
176+
point = self.points[i]
177+
obj_str += "["
178+
obj_str += ", ".join(str(round(c, 2)) for c in point)
179+
obj_str += "] "
180+
obj_str += "iou: " + f"{float(self.scores[i]):.3f} "
181+
182+
return obj_str.strip()
183+
184+
185+
class ZSLVisualPromptingResult(NamedTuple):
186+
data: dict[int, PredictedMask]
187+
188+
def __str__(self) -> str:
189+
return ", ".join(str(self.data[k]) for k in self.data)
190+
191+
def get_mask(self, label: int) -> PredictedMask:
192+
"""Returns a mask belonging to a given label"""
193+
return self.data[label]
194+
195+
196+
class DetectedKeypoints(NamedTuple):
197+
keypoints: np.ndarray
198+
scores: np.ndarray
199+
200+
def __str__(self):
201+
return (
202+
f"keypoints: {self.keypoints.shape}, "
203+
f"keypoints_x_sum: {np.sum(self.keypoints[:, :1]):.3f}, "
204+
f"scores: {self.scores.shape}"
205+
)
206+
207+
208+
class Contour(NamedTuple):
209+
label: str
210+
probability: float
211+
shape: list[tuple[int, int]]
212+
213+
def __str__(self):
214+
return f"{self.label}: {self.probability:.3f}, {len(self.shape)}"
215+
216+
217+
class ImageResultWithSoftPrediction(NamedTuple):
218+
resultImage: np.ndarray
219+
soft_prediction: np.ndarray
220+
# Contain per class saliency_maps and "feature_vector" model output if feature_vector exists
221+
saliency_map: np.ndarray # Requires return_soft_prediction==True
222+
feature_vector: np.ndarray
223+
224+
def __str__(self):
225+
outHist = cv.calcHist(
226+
[self.resultImage.astype(np.uint8)],
227+
channels=None,
228+
mask=None,
229+
histSize=[256],
230+
ranges=[0, 255],
231+
)
232+
hist = ""
233+
for i, count in enumerate(outHist):
234+
if count > 0:
235+
hist += f"{i}: {count[0] / self.resultImage.size:.3f}, "
236+
return (
237+
f"{hist}{_array_shape_to_str(self.soft_prediction)}, "
238+
f"{_array_shape_to_str(self.saliency_map)}, "
239+
f"{_array_shape_to_str(self.feature_vector)}"
240+
)
241+
242+
243+
def _array_shape_to_str(array: np.ndarray | None) -> str:
244+
if array is not None:
245+
return f"[{','.join(str(i) for i in array.shape)}]"
246+
else:
247+
return "[]"

model_api/python/model_api/models/segmentation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,9 @@
1111
import numpy as np
1212

1313
from .image_model import ImageModel
14+
from .result_types import Contour, ImageResultWithSoftPrediction
1415
from .types import BooleanValue, ListValue, NumericalValue, StringValue
15-
from .utils import Contour, ImageResultWithSoftPrediction, load_labels
16+
from .utils import load_labels
1617

1718

1819
def create_hard_prediction_from_soft_prediction(

model_api/python/model_api/models/ssd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import numpy as np
77

88
from .detection_model import DetectionModel
9-
from .utils import Detection, DetectionResult
9+
from .result_types import Detection, DetectionResult
1010

1111

1212
class SSD(DetectionModel):

0 commit comments

Comments
 (0)