diff --git a/model_api/python/model_api/models/__init__.py b/model_api/python/model_api/models/__init__.py index 364e7cac..949501dd 100644 --- a/model_api/python/model_api/models/__init__.py +++ b/model_api/python/model_api/models/__init__.py @@ -11,7 +11,7 @@ from .instance_segmentation import MaskRCNNModel from .keypoint_detection import KeypointDetectionModel, TopDownKeypointDetectionPipeline from .model import Model -from .result_types import ( +from .result import ( AnomalyResult, ClassificationResult, Contour, diff --git a/model_api/python/model_api/models/action_classification.py b/model_api/python/model_api/models/action_classification.py index 52b33561..515fe509 100644 --- a/model_api/python/model_api/models/action_classification.py +++ b/model_api/python/model_api/models/action_classification.py @@ -12,7 +12,7 @@ from model_api.adapters.utils import RESIZE_TYPES, InputTransform from .model import Model -from .result_types import ClassificationResult +from .result import ClassificationResult from .types import BooleanValue, ListValue, NumericalValue, StringValue from .utils import load_labels diff --git a/model_api/python/model_api/models/anomaly.py b/model_api/python/model_api/models/anomaly.py index 8968af30..f319092d 100644 --- a/model_api/python/model_api/models/anomaly.py +++ b/model_api/python/model_api/models/anomaly.py @@ -15,7 +15,7 @@ import numpy as np from model_api.models.image_model import ImageModel -from model_api.models.result_types import AnomalyResult +from model_api.models.result import AnomalyResult from model_api.models.types import ListValue, NumericalValue, StringValue if TYPE_CHECKING: diff --git a/model_api/python/model_api/models/classification.py b/model_api/python/model_api/models/classification.py index 9a291840..804a8c24 100644 --- a/model_api/python/model_api/models/classification.py +++ b/model_api/python/model_api/models/classification.py @@ -17,7 +17,7 @@ from openvino.runtime import opset10 as opset from model_api.models.image_model import ImageModel -from model_api.models.result_types import ClassificationResult +from model_api.models.result import ClassificationResult from model_api.models.types import BooleanValue, ListValue, NumericalValue, StringValue from model_api.models.utils import softmax diff --git a/model_api/python/model_api/models/detection_model.py b/model_api/python/model_api/models/detection_model.py index b56d0074..4fb47cbb 100644 --- a/model_api/python/model_api/models/detection_model.py +++ b/model_api/python/model_api/models/detection_model.py @@ -4,7 +4,7 @@ # from .image_model import ImageModel -from .result_types import Detection +from .result import Detection from .types import ListValue, NumericalValue, StringValue from .utils import load_labels diff --git a/model_api/python/model_api/models/instance_segmentation.py b/model_api/python/model_api/models/instance_segmentation.py index 621c4d61..faf74a6e 100644 --- a/model_api/python/model_api/models/instance_segmentation.py +++ b/model_api/python/model_api/models/instance_segmentation.py @@ -9,7 +9,7 @@ from model_api.adapters.inference_adapter import InferenceAdapter from .image_model import ImageModel -from .result_types import InstanceSegmentationResult, SegmentedObject +from .result import InstanceSegmentationResult, SegmentedObject from .types import BooleanValue, ListValue, NumericalValue, StringValue from .utils import load_labels diff --git a/model_api/python/model_api/models/keypoint_detection.py b/model_api/python/model_api/models/keypoint_detection.py index 5ecaa718..668d053e 100644 --- a/model_api/python/model_api/models/keypoint_detection.py +++ b/model_api/python/model_api/models/keypoint_detection.py @@ -10,7 +10,7 @@ import numpy as np from .image_model import ImageModel -from .result_types import DetectedKeypoints, Detection +from .result import DetectedKeypoints, Detection from .types import ListValue diff --git a/model_api/python/model_api/models/result/__init__.py b/model_api/python/model_api/models/result/__init__.py new file mode 100644 index 00000000..e1b99bbe --- /dev/null +++ b/model_api/python/model_api/models/result/__init__.py @@ -0,0 +1,36 @@ +"""Model results.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .types import ( + AnomalyResult, + ClassificationResult, + Contour, + DetectedKeypoints, + Detection, + DetectionResult, + ImageResultWithSoftPrediction, + InstanceSegmentationResult, + PredictedMask, + SegmentedObject, + SegmentedObjectWithRects, + VisualPromptingResult, + ZSLVisualPromptingResult, +) + +__all__ = [ + "AnomalyResult", + "ClassificationResult", + "Contour", + "Detection", + "DetectionResult", + "DetectedKeypoints", + "SegmentedObject", + "SegmentedObjectWithRects", + "ImageResultWithSoftPrediction", + "InstanceSegmentationResult", + "PredictedMask", + "VisualPromptingResult", + "ZSLVisualPromptingResult", +] diff --git a/model_api/python/model_api/models/result/media/__init__.py b/model_api/python/model_api/models/result/media/__init__.py new file mode 100644 index 00000000..0e7de761 --- /dev/null +++ b/model_api/python/model_api/models/result/media/__init__.py @@ -0,0 +1,10 @@ +"""Result visualization media.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .anomaly import AnomalyMedia + +__all__ = [ + "AnomalyMedia", +] diff --git a/model_api/python/model_api/models/result/media/anomaly.py b/model_api/python/model_api/models/result/media/anomaly.py new file mode 100644 index 00000000..6e07c5d5 --- /dev/null +++ b/model_api/python/model_api/models/result/media/anomaly.py @@ -0,0 +1,23 @@ +"""Anomaly result media.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import cv2 + +from model_api.models.result.types import AnomalyResult +from model_api.visualizer.layout import Flatten, Layout +from model_api.visualizer.media import Media +from model_api.visualizer.primitives import Overlay + + +class AnomalyMedia(Media): + """Anomaly result media.""" + + def __init__(self, result: AnomalyResult) -> None: + anomaly_map = cv2.applyColorMap(result.anomaly_map, cv2.COLORMAP_JET) + super().__init__(Overlay(anomaly_map)) + + @property + def default_layout(self) -> Layout: + return Flatten(Overlay) diff --git a/model_api/python/model_api/models/result_types/__init__.py b/model_api/python/model_api/models/result/types/__init__.py similarity index 97% rename from model_api/python/model_api/models/result_types/__init__.py rename to model_api/python/model_api/models/result/types/__init__.py index 54447d79..9499e814 100644 --- a/model_api/python/model_api/models/result_types/__init__.py +++ b/model_api/python/model_api/models/result/types/__init__.py @@ -1,4 +1,4 @@ -"""Result types.""" +"""Result containers.""" # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/model_api/python/model_api/models/result_types/anomaly.py b/model_api/python/model_api/models/result/types/anomaly.py similarity index 90% rename from model_api/python/model_api/models/result_types/anomaly.py rename to model_api/python/model_api/models/result/types/anomaly.py index 61f5384c..eab9b24b 100644 --- a/model_api/python/model_api/models/result_types/anomaly.py +++ b/model_api/python/model_api/models/result/types/anomaly.py @@ -5,10 +5,15 @@ from __future__ import annotations +import cv2 import numpy as np +from model_api.visualizer.layout import Flatten, Layout -class AnomalyResult: +from .base import Result + + +class AnomalyResult(Result): """Results for anomaly models.""" def __init__( @@ -19,6 +24,7 @@ def __init__( pred_mask: np.ndarray | None = None, pred_score: float | None = None, ) -> None: + super().__init__() self.anomaly_map = anomaly_map self.pred_boxes = pred_boxes self.pred_label = pred_label diff --git a/model_api/python/model_api/models/result/types/base.py b/model_api/python/model_api/models/result/types/base.py new file mode 100644 index 00000000..1d9624cd --- /dev/null +++ b/model_api/python/model_api/models/result/types/base.py @@ -0,0 +1,10 @@ +"""Base result type""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC + + +class Result(ABC): + """Base result type.""" diff --git a/model_api/python/model_api/models/result_types/classification.py b/model_api/python/model_api/models/result/types/classification.py similarity index 94% rename from model_api/python/model_api/models/result_types/classification.py rename to model_api/python/model_api/models/result/types/classification.py index 37fa3c33..37cdb241 100644 --- a/model_api/python/model_api/models/result_types/classification.py +++ b/model_api/python/model_api/models/result/types/classification.py @@ -7,13 +7,14 @@ from typing import TYPE_CHECKING +from .base import Result from .utils import array_shape_to_str if TYPE_CHECKING: import numpy as np -class ClassificationResult: +class ClassificationResult(Result): """Results for classification models.""" def __init__( diff --git a/model_api/python/model_api/models/result_types/detection.py b/model_api/python/model_api/models/result/types/detection.py similarity index 100% rename from model_api/python/model_api/models/result_types/detection.py rename to model_api/python/model_api/models/result/types/detection.py diff --git a/model_api/python/model_api/models/result_types/keypoint.py b/model_api/python/model_api/models/result/types/keypoint.py similarity index 100% rename from model_api/python/model_api/models/result_types/keypoint.py rename to model_api/python/model_api/models/result/types/keypoint.py diff --git a/model_api/python/model_api/models/result_types/segmentation.py b/model_api/python/model_api/models/result/types/segmentation.py similarity index 100% rename from model_api/python/model_api/models/result_types/segmentation.py rename to model_api/python/model_api/models/result/types/segmentation.py diff --git a/model_api/python/model_api/models/result_types/utils.py b/model_api/python/model_api/models/result/types/utils.py similarity index 100% rename from model_api/python/model_api/models/result_types/utils.py rename to model_api/python/model_api/models/result/types/utils.py diff --git a/model_api/python/model_api/models/result_types/visual_prompting.py b/model_api/python/model_api/models/result/types/visual_prompting.py similarity index 100% rename from model_api/python/model_api/models/result_types/visual_prompting.py rename to model_api/python/model_api/models/result/types/visual_prompting.py diff --git a/model_api/python/model_api/models/segmentation.py b/model_api/python/model_api/models/segmentation.py index cbb5f7c8..dc530e11 100644 --- a/model_api/python/model_api/models/segmentation.py +++ b/model_api/python/model_api/models/segmentation.py @@ -11,7 +11,7 @@ import numpy as np from model_api.models.image_model import ImageModel -from model_api.models.result_types import Contour, ImageResultWithSoftPrediction +from model_api.models.result import Contour, ImageResultWithSoftPrediction from model_api.models.types import BooleanValue, ListValue, NumericalValue, StringValue from model_api.models.utils import load_labels diff --git a/model_api/python/model_api/models/ssd.py b/model_api/python/model_api/models/ssd.py index ee6776c9..80e0d8e0 100644 --- a/model_api/python/model_api/models/ssd.py +++ b/model_api/python/model_api/models/ssd.py @@ -6,7 +6,7 @@ import numpy as np from .detection_model import DetectionModel -from .result_types import Detection, DetectionResult +from .result import Detection, DetectionResult class SSD(DetectionModel): diff --git a/model_api/python/model_api/models/utils.py b/model_api/python/model_api/models/utils.py index 339e232f..028958c2 100644 --- a/model_api/python/model_api/models/utils.py +++ b/model_api/python/model_api/models/utils.py @@ -10,7 +10,7 @@ import cv2 import numpy as np -from model_api.models.result_types import Contour, Detection, SegmentedObject, SegmentedObjectWithRects +from model_api.models.result import Contour, Detection, SegmentedObject, SegmentedObjectWithRects def add_rotated_rects(segmented_objects: list[SegmentedObject]) -> list[SegmentedObjectWithRects]: diff --git a/model_api/python/model_api/models/yolo.py b/model_api/python/model_api/models/yolo.py index 2d950072..bf7b4a33 100644 --- a/model_api/python/model_api/models/yolo.py +++ b/model_api/python/model_api/models/yolo.py @@ -11,7 +11,7 @@ from model_api.adapters.utils import INTERPOLATION_TYPES, resize_image_ocv from .detection_model import DetectionModel -from .result_types import Detection, DetectionResult +from .result import Detection, DetectionResult from .types import BooleanValue, ListValue, NumericalValue from .utils import clip_detections, multiclass_nms, nms @@ -744,8 +744,7 @@ def parameters(cls): { "agnostic_nms": BooleanValue( description=( - "If True, the model is agnostic to the number of classes, " - "and all classes are considered as one" + "If True, the model is agnostic to the number of classes, and all classes are considered as one" ), default_value=False, ), diff --git a/model_api/python/model_api/visualizer/__init__.py b/model_api/python/model_api/visualizer/__init__.py new file mode 100644 index 00000000..0a29dc3b --- /dev/null +++ b/model_api/python/model_api/visualizer/__init__.py @@ -0,0 +1,10 @@ +"""Visualizer.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .media import Media +from .primitives import Overlay +from .visualizer import Visualizer + +__all__ = ["Media", "Visualizer"] diff --git a/model_api/python/model_api/visualizer/layout.py b/model_api/python/model_api/visualizer/layout.py new file mode 100644 index 00000000..c881898c --- /dev/null +++ b/model_api/python/model_api/visualizer/layout.py @@ -0,0 +1,40 @@ +"""Visualization Layout""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from abc import ABC +from typing import TYPE_CHECKING, Type + +if TYPE_CHECKING: + from PIL import Image + + from model_api.visualizer.primitives import Primitive + + from .media import Media + + +class Layout(ABC): + """Base class for layouts.""" + + def _compute_on_primitive(self, primitive: Primitive, image: Image, media: Media) -> Image | None: + if media.has_primitive(primitive): + primitives = media.get_primitive(primitive) + for primitive in primitives: + image = primitive.compute(image) + return image + return None + + +class Flatten(Layout): + """Put all primitives on top of each other""" + + def __init__(self, *args: Type[Primitive]) -> None: + self.children = args + + def __call__(self, image: Image, media: Media) -> Image: + _image: Image = image.copy() + for child in self.children: + _image = self._compute_on_primitive(child, _image, media) + return _image diff --git a/model_api/python/model_api/visualizer/media.py b/model_api/python/model_api/visualizer/media.py new file mode 100644 index 00000000..2a843e31 --- /dev/null +++ b/model_api/python/model_api/visualizer/media.py @@ -0,0 +1,58 @@ +"""Media object.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from abc import abstractmethod +from typing import Type + +from .layout import Layout +from .primitives import Overlay, Primitive + + +class Media: + """Media object. + + Media object that is used by the visualizer to render the prediction. + + Args: + *args: Primitives to be added to the prediction. + + Example: + >>> media = Media(Label("Label"), BoundingBoxes(0, 0, 10, 10)) + """ + + def __init__(self, *args: Primitive) -> None: + self._overlays: list[Overlay] = [] + self._add_primitives(args) + + def _add_primitives(self, primitives: list[Primitive]) -> None: + """Add primitives to the prediction.""" + for primitive in primitives: + self._add_primitive(primitive) + + def _add_primitive(self, primitive: Primitive) -> None: + """Add primitive.""" + if isinstance(primitive, Overlay): + self._overlays.append(primitive) + else: + msg = f"Primitive {primitive} not supported" + raise ValueError(msg) + + def has_primitive(self, primitive: Type[Primitive]) -> bool: + """Check if the primitive type is registered.""" + if primitive == Overlay: + return bool(self._overlays) + return False + + def get_primitive(self, primitive: Type[Primitive]) -> Primitive: + """Get primitive.""" + if primitive == Overlay: + return self._overlays + msg = f"Primitive {primitive} not found" + raise ValueError(msg) + + @property + @abstractmethod + def default_layout(self) -> Layout: + """Default layout for the media.""" diff --git a/model_api/python/model_api/visualizer/primitives.py b/model_api/python/model_api/visualizer/primitives.py new file mode 100644 index 00000000..13116414 --- /dev/null +++ b/model_api/python/model_api/visualizer/primitives.py @@ -0,0 +1,44 @@ +"""Base class for primitives.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +from abc import ABC, abstractmethod +from io import BytesIO +from typing import TYPE_CHECKING + +import cv2 +from PIL import Image, ImageDraw, ImageFont + +if TYPE_CHECKING: + import numpy as np + + +class Primitive(ABC): + """Primitive class.""" + + @abstractmethod + def compute(self, **kwargs) -> Image: + pass + + +class Overlay(Primitive): + """Overlay an image. + + Useful for XAI and Anomaly Maps. + """ + + def __init__(self, image: Image | np.ndarray, opacity: float = 0.4) -> None: + self.image = self._to_image(image) + self.opacity = opacity + + def _to_image(self, image: Image | np.ndarray) -> Image: + if isinstance(image, Image.Image): + return image + return Image.fromarray(image) + + def compute(self, image: Image) -> Image: + _image = self.image.resize(image.size) + return Image.blend(image, _image, self.opacity) diff --git a/model_api/python/model_api/visualizer/visualizer.py b/model_api/python/model_api/visualizer/visualizer.py new file mode 100644 index 00000000..8f67395b --- /dev/null +++ b/model_api/python/model_api/visualizer/visualizer.py @@ -0,0 +1,42 @@ +"""Visualizer.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from PIL import Image + + from .layout import Layout + from .media import Media + + +class Visualizer: + def __init__(self, layout: Layout | None = None) -> None: + self.layout = layout + + def show( + self, + image: Image, + result: Media, + ) -> None: + result: Image = self._generate(image, result) + result.show() + + def save( + self, + image: Image, + result: Media, + path: str, + ) -> None: + result: Image = self._generate(image, result) + result.save(path) + + def _generate(self, image: Image, result: Media) -> Image: + if self.layout is not None: + return self.layout(image, result) + return result.default_layout(image, result) diff --git a/model_api/python/pyproject.toml b/model_api/python/pyproject.toml index c011e57f..ed2ba565 100644 --- a/model_api/python/pyproject.toml +++ b/model_api/python/pyproject.toml @@ -30,6 +30,7 @@ dependencies = [ "openvino>=2024.0", "openvino-dev>=2024.0", "omz_tools @ git+https://github.com/openvinotoolkit/open_model_zoo.git@master#egg=omz_tools&subdirectory=tools/model_tools", + "pillow", ] [project.optional-dependencies]