diff --git a/src/python/model_api/visualizer/scene/segmentation/instance_segmentation.py b/src/python/model_api/visualizer/scene/segmentation/instance_segmentation.py index 9a2f8033..8c89ca01 100644 --- a/src/python/model_api/visualizer/scene/segmentation/instance_segmentation.py +++ b/src/python/model_api/visualizer/scene/segmentation/instance_segmentation.py @@ -20,7 +20,8 @@ class InstanceSegmentationScene(Scene): def __init__(self, image: Image, result: InstanceSegmentationResult, layout: Union[Layout, None] = None) -> None: # nosec as random is used for color generation - self.color_per_label = {label: f"#{random.randint(0, 0xFFFFFF):06x}" for label in set(result.label_names)} # noqa: S311 # nosec B311 + g = random.Random(0) # noqa: S311 # nosec B311 + self.color_per_label = {label: f"#{g.randint(0, 0xFFFFFF):06x}" for label in set(result.label_names)} # nosec B311 super().__init__( base=image, label=self._get_labels(result), @@ -54,7 +55,7 @@ def _get_bounding_boxes(self, result: InstanceSegmentationResult) -> list[Boundi def _get_overlays(self, result: InstanceSegmentationResult) -> list[Overlay]: overlays = [] - if len(result.saliency_map) > 0: + if result.saliency_map is not None and len(result.saliency_map) > 0: labels_label_names_mapping = dict(zip(result.labels, result.label_names)) for label, label_name in labels_label_names_mapping.items(): saliency_map = result.saliency_map[label - 1] diff --git a/src/python/model_api/visualizer/scene/segmentation/segmentation.py b/src/python/model_api/visualizer/scene/segmentation/segmentation.py index 5129b3f9..12bbaa49 100644 --- a/src/python/model_api/visualizer/scene/segmentation/segmentation.py +++ b/src/python/model_api/visualizer/scene/segmentation/segmentation.py @@ -37,7 +37,7 @@ def _get_overlays(self, result: ImageResultWithSoftPrediction) -> list[Overlay]: overlays.append(Overlay(class_map, label=f"Class {i}")) # Add saliency map - if result.saliency_map.size > 0: + if result.saliency_map is not None and result.saliency_map.size > 0: saliency_map = cv2.cvtColor(result.saliency_map, cv2.COLOR_BGR2RGB) overlays.append(Overlay(saliency_map, label="Saliency Map")) diff --git a/src/python/model_api/visualizer/visualizer.py b/src/python/model_api/visualizer/visualizer.py index b2f269be..492481c4 100644 --- a/src/python/model_api/visualizer/visualizer.py +++ b/src/python/model_api/visualizer/visualizer.py @@ -1,6 +1,6 @@ """Visualizer for modelAPI.""" -# Copyright (C) 2024 Intel Corporation +# Copyright (C) 2024-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations # TODO: remove when Python3.9 support is dropped @@ -42,18 +42,32 @@ class Visualizer: def __init__(self, layout: Layout | None = None) -> None: self.layout = layout - def show(self, image: Image | np.ndarray, result: Result) -> None: + def show(self, image: Image.Image | np.ndarray, result: Result) -> None: if isinstance(image, np.ndarray): image = Image.fromarray(image) scene = self._scene_from_result(image, result) return scene.show() - def save(self, image: Image | np.ndarray, result: Result, path: Path) -> None: + def save(self, image: Image.Image | np.ndarray, result: Result, path: Path) -> None: if isinstance(image, np.ndarray): image = Image.fromarray(image) scene = self._scene_from_result(image, result) scene.save(path) + def render(self, image: Image.Image | np.ndarray, result: Result) -> Image.Image | np.ndarray: + is_numpy = isinstance(image, np.ndarray) + + if is_numpy: + image = Image.fromarray(image) + + scene = self._scene_from_result(image, result) + result_img: Image = scene.render() + + if is_numpy: + return np.array(result_img) + + return result_img + def _scene_from_result(self, image: Image, result: Result) -> Scene: scene: Scene if isinstance(result, AnomalyResult): diff --git a/tests/python/unit/visualizer/test_scene.py b/tests/python/unit/visualizer/test_scene.py index dd9351a5..c15ea62a 100644 --- a/tests/python/unit/visualizer/test_scene.py +++ b/tests/python/unit/visualizer/test_scene.py @@ -7,6 +7,7 @@ import numpy as np from PIL import Image +import pytest from model_api.models.result import ( AnomalyResult, @@ -71,7 +72,8 @@ def test_detection_scene(mock_image: Image, tmpdir: Path): assert Path(tmpdir / "detection_scene.jpg").exists() -def test_segmentation_scene(mock_image: Image, tmpdir: Path): +@pytest.mark.parametrize("with_saliency_map", [True, False]) +def test_segmentation_scene(mock_image: Image, tmpdir: Path, with_saliency_map: bool): """Test if the segmentation scene is created.""" visualizer = Visualizer() @@ -85,7 +87,9 @@ def test_segmentation_scene(mock_image: Image, tmpdir: Path): ), scores=np.array([0.85, 0.75]), label_names=["person", "car"], - saliency_map=[np.ones((128, 128), dtype=np.uint8) * 255], + saliency_map=[np.ones((128, 128), dtype=np.uint8) * 255] + if with_saliency_map + else None, feature_vector=np.array([1, 2, 3, 4]), ) @@ -104,7 +108,9 @@ def test_segmentation_scene(mock_image: Image, tmpdir: Path): soft_prediction=np.ones( (3, 3, 3), dtype=np.float32 ), # 3 classes, 3x3 prediction - saliency_map=np.ones((3, 3), dtype=np.uint8) * 255, + saliency_map=np.ones((3, 3), dtype=np.uint8) * 255 + if with_saliency_map + else None, feature_vector=np.array([1, 2, 3, 4]), ) diff --git a/tests/python/unit/visualizer/test_visualizer.py b/tests/python/unit/visualizer/test_visualizer.py new file mode 100644 index 00000000..9d000a49 --- /dev/null +++ b/tests/python/unit/visualizer/test_visualizer.py @@ -0,0 +1,43 @@ +"""Tests for visualizer.""" + +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from pathlib import Path + +import numpy as np +from PIL import Image + +from model_api.models.result import ( + AnomalyResult, +) +from model_api.visualizer import Visualizer + + +def test_render(mock_image: Image, tmpdir: Path): + """Test Visualizer.render().""" + heatmap = np.ones(mock_image.size, dtype=np.uint8) + heatmap *= 255 + + mask = np.zeros(mock_image.size, dtype=np.uint8) + mask[32:96, 32:96] = 255 + mask[40:80, 0:128] = 255 + + anomaly_result = AnomalyResult( + anomaly_map=heatmap, + pred_boxes=np.array([[0, 0, 128, 128], [32, 32, 96, 96]]), + pred_label="Anomaly", + pred_mask=mask, + pred_score=0.85, + ) + + visualizer = Visualizer() + rendered_img = visualizer.render(mock_image, anomaly_result) + + assert isinstance(rendered_img, Image.Image) + assert np.array(rendered_img).shape == np.array(mock_image).shape + + rendered_img_np = visualizer.render(np.array(mock_image), anomaly_result) + + assert isinstance(rendered_img_np, np.ndarray) + assert rendered_img_np.shape == np.array(mock_image).shape