Skip to content
Closed
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
271 changes: 183 additions & 88 deletions model_api/python/model_api/models/result_types.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,41 @@
#
"""Result types."""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

from __future__ import annotations # TODO: remove when Python3.9 support is dropped
from __future__ import annotations

from abc import ABC
from typing import NamedTuple

import cv2 as cv
import cv2
import numpy as np

from model_api.visualizer.primitives import BoundingBoxes, Label, Overlay, Polygon
from model_api.visualizer.visualize_mixin import VisualizeMixin


class Result(VisualizeMixin, ABC):
"""Base result type."""

class AnomalyResult(NamedTuple):

class AnomalyResult(Result):
"""Results for anomaly models."""

anomaly_map: np.ndarray | None = None
pred_boxes: np.ndarray | None = None
pred_label: str | None = None
pred_mask: np.ndarray | None = None
pred_score: float | None = None
def __init__(
self,
anomaly_map: np.ndarray | None = None,
pred_boxes: np.ndarray | None = None,
pred_label: str | None = None,
pred_mask: np.ndarray | None = None,
pred_score: float | None = None,
) -> None:
super().__init__()
self.anomaly_map = anomaly_map
self.pred_boxes = pred_boxes
self.pred_label = pred_label
self.pred_mask = pred_mask
self.pred_score = pred_score

def _compute_min_max(self, tensor: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Computes min and max values of the tensor."""
Expand All @@ -36,14 +53,32 @@ def __str__(self) -> str:
f"pred_mask min:{pred_mask_min} max:{pred_mask_max};"
)

def _register_primitives(self) -> None:
"""Converts the result to primitives."""
anomaly_map = cv2.applyColorMap(self.anomaly_map, cv2.COLORMAP_JET)
self._add_primitive(Overlay(anomaly_map))
for box in self.pred_boxes:
self._add_primitive(BoundingBoxes(*box))
self._add_primitive(Label(self.pred_label, bg_color="red" if self.pred_label == "Anomaly" else "green"))
self._add_primitive(Label(f"Score: {self.pred_score}"))
self._add_primitive(Polygon(mask=self.pred_mask))


class ClassificationResult(NamedTuple):
class ClassificationResult(Result):
"""Results for classification models."""

top_labels: list[tuple[int, str, float]] | None = None
saliency_map: np.ndarray | None = None
feature_vector: np.ndarray | None = None
raw_scores: np.ndarray | None = None
def __init__(
self,
top_labels: list[tuple[int, str, float]] | None = None,
saliency_map: np.ndarray | None = None,
feature_vector: np.ndarray | None = None,
raw_scores: np.ndarray | None = None,
) -> None:
super().__init__()
self.top_labels = top_labels
self.saliency_map = saliency_map
self.feature_vector = feature_vector
self.raw_scores = raw_scores

def __str__(self) -> str:
assert self.top_labels is not None
Expand All @@ -53,6 +88,11 @@ def __str__(self) -> str:
f"{_array_shape_to_str(self.raw_scores)}"
)

def _register_primitives(self) -> None:
# TODO add saliency map
for idx, label, confidence in self.top_labels:
self._add_primitive(Label(f"Rank: {idx}, {label}: {confidence:.3f}"))


class Detection:
def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label=None) -> None:
Expand All @@ -68,7 +108,7 @@ def __str__(self):
return f"{self.xmin}, {self.ymin}, {self.xmax}, {self.ymax}, {self.id} ({self.str_label}): {self.score:.3f}"


class DetectionResult(NamedTuple):
class DetectionResult(Detection, Result):
"""Result for detection model."""

objects: list[Detection] | None = None
Expand All @@ -83,17 +123,46 @@ def __str__(self):
return f"{obj_str}{_array_shape_to_str(self.saliency_map)}; {_array_shape_to_str(self.feature_vector)}"


class SegmentedObject(Detection):
def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label, mask):
super().__init__(xmin, ymin, xmax, ymax, score, id, str_label)
class DetectedKeypoints:
def __init__(self, keypoints: np.ndarray, scores: np.ndarray) -> None:
self.keypoints = keypoints
self.scores = scores

def __str__(self):
return (
f"keypoints: {self.keypoints.shape}, "
f"keypoints_x_sum: {np.sum(self.keypoints[:, :1]):.3f}, "
f"scores: {self.scores.shape}"
)


class SegmentedObject:
def __init__(
self,
xmin: int,
ymin: int,
xmax: int,
ymax: int,
score: float,
id: int,
mask: np.ndarray,
str_label: str | None = None,
) -> None:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.score = score
self.id = id
self.str_label = str_label
self.mask = mask

def __str__(self):
return f"{super().__str__()}, {(self.mask > 0.5).sum()}"


class SegmentedObjectWithRects(SegmentedObject):
def __init__(self, segmented_object, rotated_rect):
def __init__(self, segmented_object: SegmentedObject, rotated_rect: RotatedRect) -> None:
super().__init__(
segmented_object.xmin,
segmented_object.ymin,
Expand All @@ -113,11 +182,17 @@ def __str__(self):
return res


class InstanceSegmentationResult(NamedTuple):
segmentedObjects: list[SegmentedObject | SegmentedObjectWithRects]
# Contain per class saliency_maps and "feature_vector" model output if feature_vector exists
saliency_map: list[np.ndarray]
feature_vector: np.ndarray
class InstanceSegmentationResult:
def __init__(
self,
segmentedObjects: list[SegmentedObject | SegmentedObjectWithRects],
saliency_map: list[np.ndarray],
feature_vector: np.ndarray,
) -> None:
self.segmentedObjects = segmentedObjects
# Contain per class saliency_maps and "feature_vector" model output if feature_vector exists
self.saliency_map = saliency_map
self.feature_vector = feature_vector

def __str__(self):
obj_str = "; ".join(str(obj) for obj in self.segmentedObjects)
Expand All @@ -129,16 +204,76 @@ def __str__(self):
return prefix + f"{filled}; [{','.join(str(i) for i in self.feature_vector.shape)}]"


class VisualPromptingResult(NamedTuple):
upscaled_masks: list[np.ndarray] | None = None
processed_mask: list[np.ndarray] | None = None
low_res_masks: list[np.ndarray] | None = None
iou_predictions: list[np.ndarray] | None = None
scores: list[np.ndarray] | None = None
labels: list[np.ndarray] | None = None
hard_predictions: list[np.ndarray] | None = None
soft_predictions: list[np.ndarray] | None = None
best_iou: list[float] | None = None
class Contour:
def __init__(
self,
label: str,
probability: float,
shape: list[tuple[int, int]],
) -> None:
self.label = label
self.probability = probability
self.shape = shape

def __str__(self):
return f"{self.label}: {self.probability:.3f}, {len(self.shape)}"


class ImageResultWithSoftPrediction:
def __init__(
self,
resultImage: np.ndarray,
soft_prediction: np.ndarray,
saliency_map: np.ndarray,
feature_vector: np.ndarray,
) -> None:
self.resultImage = resultImage
self.soft_prediction = soft_prediction
# Contain per class saliency_maps and "feature_vector" model output if feature_vector exists
self.saliency_map = saliency_map # Requires return_soft_prediction==True
self.feature_vector = feature_vector

def __str__(self):
outHist = cv2.calcHist(
[self.resultImage.astype(np.uint8)],
channels=None,
mask=None,
histSize=[256],
ranges=[0, 255],
)
hist = ""
for i, count in enumerate(outHist):
if count > 0:
hist += f"{i}: {count[0] / self.resultImage.size:.3f}, "
return (
f"{hist}{_array_shape_to_str(self.soft_prediction)}, "
f"{_array_shape_to_str(self.saliency_map)}, "
f"{_array_shape_to_str(self.feature_vector)}"
)


class VisualPromptingResult:
def __init__(
self,
upscaled_masks: list[np.ndarray] | None = None,
processed_mask: list[np.ndarray] | None = None,
low_res_masks: list[np.ndarray] | None = None,
iou_predictions: list[np.ndarray] | None = None,
scores: list[np.ndarray] | None = None,
labels: list[np.ndarray] | None = None,
hard_predictions: list[np.ndarray] | None = None,
soft_predictions: list[np.ndarray] | None = None,
best_iou: list[float] | None = None,
) -> None:
self.upscaled_masks = upscaled_masks
self.processed_mask = processed_mask
self.low_res_masks = low_res_masks
self.iou_predictions = iou_predictions
self.scores = scores
self.labels = labels
self.hard_predictions = hard_predictions
self.soft_predictions = soft_predictions
self.best_iou = best_iou

def _compute_min_max(self, tensor: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
return tensor.min(), tensor.max()
Expand All @@ -156,10 +291,16 @@ def __str__(self) -> str:
)


class PredictedMask(NamedTuple):
mask: list[np.ndarray]
points: list[np.ndarray] | np.ndarray
scores: list[float] | np.ndarray
class PredictedMask:
def __init__(
self,
mask: list[np.ndarray],
points: list[np.ndarray] | np.ndarray,
scores: list[float] | np.ndarray,
) -> None:
self.mask = mask
self.points = points
self.scores = scores

def __str__(self) -> str:
obj_str = ""
Expand All @@ -182,8 +323,9 @@ def __str__(self) -> str:
return obj_str.strip()


class ZSLVisualPromptingResult(NamedTuple):
data: dict[int, PredictedMask]
class ZSLVisualPromptingResult:
def __init__(self, data: dict[int, PredictedMask]) -> None:
self.data: dict[int, PredictedMask]

def __str__(self) -> str:
return ", ".join(str(self.data[k]) for k in self.data)
Expand All @@ -193,53 +335,6 @@ def get_mask(self, label: int) -> PredictedMask:
return self.data[label]


class DetectedKeypoints(NamedTuple):
keypoints: np.ndarray
scores: np.ndarray

def __str__(self):
return (
f"keypoints: {self.keypoints.shape}, "
f"keypoints_x_sum: {np.sum(self.keypoints[:, :1]):.3f}, "
f"scores: {self.scores.shape}"
)


class Contour(NamedTuple):
label: str
probability: float
shape: list[tuple[int, int]]

def __str__(self):
return f"{self.label}: {self.probability:.3f}, {len(self.shape)}"


class ImageResultWithSoftPrediction(NamedTuple):
resultImage: np.ndarray
soft_prediction: np.ndarray
# Contain per class saliency_maps and "feature_vector" model output if feature_vector exists
saliency_map: np.ndarray # Requires return_soft_prediction==True
feature_vector: np.ndarray

def __str__(self):
outHist = cv.calcHist(
[self.resultImage.astype(np.uint8)],
channels=None,
mask=None,
histSize=[256],
ranges=[0, 255],
)
hist = ""
for i, count in enumerate(outHist):
if count > 0:
hist += f"{i}: {count[0] / self.resultImage.size:.3f}, "
return (
f"{hist}{_array_shape_to_str(self.soft_prediction)}, "
f"{_array_shape_to_str(self.saliency_map)}, "
f"{_array_shape_to_str(self.feature_vector)}"
)


def _array_shape_to_str(array: np.ndarray | None) -> str:
if array is not None:
return f"[{','.join(str(i) for i in array.shape)}]"
Expand Down
8 changes: 8 additions & 0 deletions model_api/python/model_api/visualizer/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""Visualizer."""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from .visualizer import Visualizer

__all__ = ["Visualizer"]
Loading
Loading