From 1ccec81208a191786fc6cdf2297faeb745ae30ed Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Thu, 31 Oct 2024 10:58:11 +0100 Subject: [PATCH 1/6] add mypy Signed-off-by: Ashwin Vaidya --- .pre-commit-config.yaml | 7 ++++ docs/source/conf.py | 2 +- examples/python/__init__.py | 2 + .../model_api/adapters/inference_adapter.py | 40 ++++++++++--------- model_api/python/model_api/adapters/utils.py | 4 +- .../model_api/models/action_classification.py | 11 ++++- model_api/python/model_api/models/anomaly.py | 6 ++- .../python/model_api/models/classification.py | 23 ++++++++--- model_api/python/model_api/models/model.py | 37 ++++++----------- .../python/model_api/models/sam_models.py | 6 +++ model_api/python/model_api/models/types.py | 2 +- .../model_api/models/visual_prompting.py | 21 +++++----- model_api/python/model_api/tilers/tiler.py | 4 +- model_api/python/pyproject.toml | 12 ++++++ tests/python/accuracy/__init__.py | 2 + tests/python/accuracy/test_accuracy.py | 27 ++----------- tests/python/precommit/__init__.py | 2 + 17 files changed, 118 insertions(+), 90 deletions(-) create mode 100644 examples/python/__init__.py create mode 100644 tests/python/accuracy/__init__.py create mode 100644 tests/python/precommit/__init__.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 073226ab..94815e97 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,6 +13,13 @@ repos: - id: debug-statements - id: detect-private-key + # python static type checking + - repo: https://github.com/pre-commit/mirrors-mypy + rev: "v1.11.2" + hooks: + - id: mypy + additional_dependencies: [types-PyYAML, types-setuptools] + - repo: https://github.com/pre-commit/mirrors-prettier rev: v4.0.0-alpha.8 hooks: diff --git a/docs/source/conf.py b/docs/source/conf.py index c5d8d49f..e3cb504a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -42,7 +42,7 @@ ] templates_path = ['_templates'] -exclude_patterns = [] +exclude_patterns: list[str] = [] # Automatic exclusion of prompts from the copies # https://sphinx-copybutton.readthedocs.io/en/latest/use.html#automatic-exclusion-of-prompts-from-the-copies diff --git a/examples/python/__init__.py b/examples/python/__init__.py new file mode 100644 index 00000000..916f3a44 --- /dev/null +++ b/examples/python/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/model_api/python/model_api/adapters/inference_adapter.py b/model_api/python/model_api/adapters/inference_adapter.py index 40112a0c..e0e90d93 100644 --- a/model_api/python/model_api/adapters/inference_adapter.py +++ b/model_api/python/model_api/adapters/inference_adapter.py @@ -14,9 +14,9 @@ limitations under the License. """ -import abc +from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import Dict, List, Set, Tuple +from typing import Any, Dict, List, Set, Tuple @dataclass @@ -29,7 +29,7 @@ class Metadata: meta: Dict = field(default_factory=dict) -class InferenceAdapter(metaclass=abc.ABCMeta): +class InferenceAdapter(ABC): """ An abstract Model Adapter with the following interface: @@ -43,20 +43,25 @@ class InferenceAdapter(metaclass=abc.ABCMeta): precisions = ("FP32", "I32", "FP16", "I16", "I8", "U8") - @abc.abstractmethod - def __init__(self): + @abstractmethod + def __init__(self) -> None: """ An abstract Model Adapter constructor. Reads the model from disk or other place. """ + self.model: Any - @abc.abstractmethod + @abstractmethod def load_model(self): """ Loads the model on the device. """ - @abc.abstractmethod + @abstractmethod + def get_model(self): + """Get the model.""" + + @abstractmethod def get_input_layers(self): """ Gets the names of model inputs and for each one creates the Metadata structure, @@ -67,7 +72,7 @@ def get_input_layers(self): - the dict containing Metadata for all inputs """ - @abc.abstractmethod + @abstractmethod def get_output_layers(self): """ Gets the names of model outputs and for each one creates the Metadata structure, @@ -78,7 +83,7 @@ def get_output_layers(self): - the dict containing Metadata for all outputs """ - @abc.abstractmethod + @abstractmethod def reshape_model(self, new_shape): """ Reshapes the model inputs to fit the new input shape. @@ -93,7 +98,7 @@ def reshape_model(self, new_shape): } """ - @abc.abstractmethod + @abstractmethod def infer_sync(self, dict_data): """ Performs the synchronous model inference. The infer is a blocking method. @@ -115,8 +120,8 @@ def infer_sync(self, dict_data): } """ - @abc.abstractmethod - def infer_async(self, dict_data, callback_fn, callback_data): + @abstractmethod + def infer_async(self, dict_data, callback_data): """ Performs the asynchronous model inference and sets the callback for inference completion. Also, it should @@ -130,11 +135,10 @@ def infer_async(self, dict_data, callback_fn, callback_data): 'input_layer_name_2': data_2, ... } - - callback_fn: the callback function, which is defined outside the adapter - callback_data: the data for callback, that will be taken after the model inference is ended """ - @abc.abstractmethod + @abstractmethod def is_ready(self): """ In case of asynchronous execution checks if one can submit input data @@ -145,27 +149,27 @@ def is_ready(self): submitted to the model for inference or not """ - @abc.abstractmethod + @abstractmethod def await_all(self): """ In case of asynchronous execution waits the completion of all busy infer requests. """ - @abc.abstractmethod + @abstractmethod def await_any(self): """ In case of asynchronous execution waits the completion of any busy infer request until it becomes available for the data submission. """ - @abc.abstractmethod + @abstractmethod def get_rt_info(self, path): """ Forwards to openvino.Model.get_rt_info(path) """ - @abc.abstractmethod + @abstractmethod def embed_preprocessing( self, layout, diff --git a/model_api/python/model_api/adapters/utils.py b/model_api/python/model_api/adapters/utils.py index cba16073..29aeb594 100644 --- a/model_api/python/model_api/adapters/utils.py +++ b/model_api/python/model_api/adapters/utils.py @@ -16,7 +16,7 @@ import math from functools import partial -from typing import Optional +from typing import Callable, Optional import cv2 import numpy as np @@ -492,7 +492,7 @@ def crop_resize_ocv(image, size): return cv2.resize(cropped_frame, size) -RESIZE_TYPES = { +RESIZE_TYPES:dict[str, Callable] = { "crop": crop_resize_ocv, "standard": resize_image_ocv, "fit_to_window": resize_image_with_aspect_ocv, diff --git a/model_api/python/model_api/models/action_classification.py b/model_api/python/model_api/models/action_classification.py index f887642c..836009d8 100644 --- a/model_api/python/model_api/models/action_classification.py +++ b/model_api/python/model_api/models/action_classification.py @@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Any import numpy as np + from model_api.adapters.utils import RESIZE_TYPES, InputTransform from .model import Model @@ -74,6 +75,14 @@ def __init__( self.image_blob_names = self._get_inputs() self.image_blob_name = self.image_blob_names[0] self.nscthw_layout = "NSCTHW" in self.inputs[self.image_blob_name].layout + self.labels: list[str] + self.path_to_labels: str + self.mean_values:list[int|float] + self.pad_value: int + self.resize_type: str + self.reverse_input_channels: bool + self.scale_values: list[int|float] + if self.nscthw_layout: self.n, self.s, self.c, self.t, self.h, self.w = self.inputs[ self.image_blob_name @@ -129,7 +138,7 @@ def parameters(cls) -> dict[str, Any]: ) return parameters - def _get_inputs(self) -> tuple[list[str], list[str]]: + def _get_inputs(self) -> list[str]: """Defines the model inputs for images and additional info. Raises: diff --git a/model_api/python/model_api/models/anomaly.py b/model_api/python/model_api/models/anomaly.py index 717a5406..b482d41a 100644 --- a/model_api/python/model_api/models/anomaly.py +++ b/model_api/python/model_api/models/anomaly.py @@ -24,6 +24,8 @@ import cv2 import numpy as np +from model_api.adapters.inference_adapter import InferenceAdapter + from .image_model import ImageModel from .types import ListValue, NumericalValue, StringValue from .utils import AnomalyResult @@ -32,7 +34,7 @@ class AnomalyDetection(ImageModel): __model__ = "AnomalyDetection" - def __init__(self, inference_adapter, configuration=dict(), preload=False): + def __init__(self, inference_adapter:InferenceAdapter, configuration:dict=dict(), preload:bool=False) -> None: super().__init__(inference_adapter, configuration, preload) self._check_io_number(1, 1) self.normalization_scale: float @@ -41,7 +43,7 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False): self.task: str self.labels: list[str] - def postprocess(self, outputs: dict[str, np.ndarray], meta: dict[str, Any]): + def postprocess(self, outputs: dict[str, np.ndarray], meta: dict[str, Any]) -> AnomalyResult: """Post-processes the outputs and returns the results. Args: diff --git a/model_api/python/model_api/models/classification.py b/model_api/python/model_api/models/classification.py index b0470c7d..5958bed0 100644 --- a/model_api/python/model_api/models/classification.py +++ b/model_api/python/model_api/models/classification.py @@ -25,6 +25,8 @@ from openvino.runtime import Model, Type from openvino.runtime import opset10 as opset +from model_api.adapters.inference_adapter import InferenceAdapter + from .image_model import ImageModel from .types import BooleanValue, ListValue, NumericalValue, StringValue from .utils import ClassificationResult @@ -33,13 +35,24 @@ class ClassificationModel(ImageModel): __model__ = "Classification" - def __init__(self, inference_adapter, configuration=dict(), preload=False): + def __init__(self, inference_adapter: InferenceAdapter, configuration:dict=dict(), preload:bool=False): super().__init__(inference_adapter, configuration, preload=False) + self.topk:int + self.labels:list[str] + self.path_to_labels:str + self.multilabel: bool + self.hierarchical: bool + self.hierarchical_config: str + self.confidence_threshold: float + self.output_raw_scores: bool + self.hierarchical_postproc: str + self.labels_resolver: GreedyLabelsResolver | ProbabilisticLabelsResolver + self._check_io_number(1, (1, 2, 3, 4, 5)) if self.path_to_labels: self.labels = self._load_labels(self.path_to_labels) if 1 == len(self.outputs): - self._verify_signle_output() + self._verify_single_output() self.raw_scores_name = _raw_scores_name if self.hierarchical: @@ -108,7 +121,7 @@ def _load_labels(self, labels_file): labels.append(s[(begin_idx + 1) : end_idx]) return labels - def _verify_signle_output(self): + def _verify_single_output(self): layer_name = next(iter(self.outputs)) layer_shape = self.outputs[layer_name].shape @@ -207,7 +220,7 @@ def get_saliency_maps(self, outputs: dict) -> np.ndarray: if not self.hierarchical: return saliency_maps - reordered_saliency_maps = [[] for _ in range(len(saliency_maps))] + reordered_saliency_maps:list[list[ np.ndarray]] = [[] for _ in range(len(saliency_maps))] model_classes = self.hierarchical_info["cls_heads_info"]["class_to_group_idx"] label_to_model_out_idx = {lbl: i for i, lbl in enumerate(model_classes.keys())} for batch in range(len(saliency_maps)): @@ -296,7 +309,7 @@ def get_multiclass_predictions(self, outputs): return list(zip(indicesTensor, labels, scoresTensor)) -def addOrFindSoftmaxAndTopkOutputs(inference_adapter, topk, output_raw_scores): +def addOrFindSoftmaxAndTopkOutputs(inference_adapter:InferenceAdapter, topk:int, output_raw_scores:bool): softmaxNode = None for i in range(len(inference_adapter.model.outputs)): output_node = ( diff --git a/model_api/python/model_api/models/model.py b/model_api/python/model_api/models/model.py index 78f7ce97..4a208799 100644 --- a/model_api/python/model_api/models/model.py +++ b/model_api/python/model_api/models/model.py @@ -16,6 +16,7 @@ import logging as log import re +from abc import ABC from contextlib import contextmanager from model_api.adapters.inference_adapter import InferenceAdapter @@ -35,7 +36,7 @@ def __init__(self, wrapper_name, message): super().__init__(f"{wrapper_name}: {message}") -class Model: +class Model(ABC): """An abstract model wrapper The abstract model wrapper is free from any executor dependencies. @@ -61,7 +62,7 @@ class Model: model_loaded (bool): a flag whether the model is loaded to device """ - __model__ = None # Abstract wrapper has no name + __model__ :str def __init__(self, inference_adapter, configuration=dict(), preload=False): """Model constructor @@ -101,19 +102,11 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False): self.callback_fn = lambda _: None def get_model(self): - """Returns the ov.Model object stored in the InferenceAdapter. - - Note: valid only for local inference - - Returns: - ov.Model object - Raises: - RuntimeError: in case of remote inference (serving) - """ - if isinstance(self.inference_adapter, OpenvinoAdapter): - return self.inference_adapter.get_model() - - raise RuntimeError("get_model() is not supported for remote inference") + model = self.inference_adapter.get_model() + model.set_rt_info(self.__model__, ["model_info", "model_type"]) + for name in self.parameters(): + model.set_rt_info(getattr(self, name), ["model_info", name]) + return model @classmethod def get_model_class(cls, name): @@ -281,8 +274,8 @@ def _load_config(self, config): errors = parameters[name].validate(value) if errors: self.logger.error(f'Error with "{name}" parameter:') - for error in errors: - self.logger.error(f"\t{error}") + for _error in errors: + self.logger.error(f"\t{_error}") self.raise_error("Incorrect user configuration") value = parameters[name].get_value(value) self.__setattr__(name, value) @@ -359,7 +352,7 @@ def _check_io_number(self, number_of_inputs, number_of_outputs): ) ) else: - if not len(self.inputs) in number_of_inputs: + if len(self.inputs) not in number_of_inputs: self.raise_error( "Expected {} or {} input blobs, but {} found: {}".format( ", ".join(str(n) for n in number_of_inputs[:-1]), @@ -380,7 +373,7 @@ def _check_io_number(self, number_of_inputs, number_of_outputs): ) ) else: - if not len(self.outputs) in number_of_outputs: + if len(self.outputs) not in number_of_outputs: self.raise_error( "Expected {} or {} output blobs, but {} found: {}".format( ", ".join(str(n) for n in number_of_outputs[:-1]), @@ -523,12 +516,6 @@ def log_layers_info(self): ) ) - def get_model(self): - model = self.inference_adapter.get_model() - model.set_rt_info(self.__model__, ["model_info", "model_type"]) - for name in self.parameters(): - model.set_rt_info(getattr(self, name), ["model_info", name]) - return model def save(self, xml_path, bin_path="", version="UNSPECIFIED"): import openvino diff --git a/model_api/python/model_api/models/sam_models.py b/model_api/python/model_api/models/sam_models.py index e400fc7f..49088222 100644 --- a/model_api/python/model_api/models/sam_models.py +++ b/model_api/python/model_api/models/sam_models.py @@ -20,6 +20,7 @@ from typing import Any, Dict import numpy as np + from model_api.adapters.inference_adapter import InferenceAdapter from model_api.models.types import BooleanValue, NumericalValue @@ -40,6 +41,8 @@ def __init__( ): super().__init__(inference_adapter, configuration, preload) self.output_name: str = list(self.outputs.keys())[0] + self.resize_type:str + self.image_size: int @classmethod def parameters(cls) -> dict[str, Any]: @@ -82,6 +85,9 @@ def __init__( self.mask_input = np.zeros((1, 1, 256, 256), dtype=np.float32) self.has_mask_input = np.zeros((1, 1), dtype=np.float32) + self.image_size: int + self.mask_threshold: float + self.embed_dim: int @classmethod def parameters(cls) -> dict[str, Any]: diff --git a/model_api/python/model_api/models/types.py b/model_api/python/model_api/models/types.py index 1ab8faaa..5d2dc460 100644 --- a/model_api/python/model_api/models/types.py +++ b/model_api/python/model_api/models/types.py @@ -39,7 +39,7 @@ def get_value(self, value): if len(errors) == 0: return value if value is not None else self.default_value - def build_error(): + def build_error(self) -> None: pass def __str__(self) -> str: diff --git a/model_api/python/model_api/models/visual_prompting.py b/model_api/python/model_api/models/visual_prompting.py index 40c1cf89..2ed96289 100644 --- a/model_api/python/model_api/models/visual_prompting.py +++ b/model_api/python/model_api/models/visual_prompting.py @@ -19,6 +19,7 @@ import cv2 import numpy as np + from model_api.models import SAMDecoder, SAMImageEncoder from model_api.models.utils import ( PredictedMask, @@ -160,13 +161,12 @@ def __init__( """ self.encoder = encoder_model self.decoder = decoder_model + self._used_indices: np.ndarray | None = None + self._reference_features: np.ndarray | None = None if reference_features is not None: self._reference_features = reference_features.feature_vectors self._used_indices = reference_features.used_indices - else: - self._reference_features = None - self._used_indices = None self._point_labels_box = np.array([[2, 3]], dtype=np.float32) self._has_mask_inputs = [np.array([[0.0]]), np.array([[1.0]])] @@ -299,8 +299,9 @@ def learn( ) cur_default_threshold_reference -= 0.05 - self._reference_features[label] = ref_feat - self._used_indices: np.ndarray = np.concatenate( + if self._reference_features is not None: + self._reference_features[label] = ref_feat + self._used_indices = np.concatenate( (self._used_indices, [label]) ) ref_masks[label] = ref_mask @@ -374,7 +375,7 @@ def infer( downsizing=self._downsizing, ) - predicted_masks: defaultdict[int, list] = defaultdict(list) + predicted_masks: dict[int, list] = defaultdict(list) used_points: defaultdict[int, list] = defaultdict(list) for label in total_points_scores: points_scores = total_points_scores[label] @@ -405,18 +406,18 @@ def infer( } inputs_decoder["image_embeddings"] = image_embeddings - prediction = self._predict_masks( + _prediction:dict[str, np.ndarray] = self._predict_masks( inputs_decoder, original_shape, apply_masks_refinement ) - prediction.update({"scores": points_score[-1]}) + _prediction.update({"scores": points_score[-1]}) - predicted_masks[label].append(prediction[self.decoder.output_blob_name]) + predicted_masks[label].append(_prediction[self.decoder.output_blob_name]) used_points[label].append(points_score) # check overlapping area between different label masks _inspect_overlapping_areas(predicted_masks, used_points) - prediction = {} + prediction:dict[int, PredictedMask] = {} for k in used_points: processed_points = [] scores = [] diff --git a/model_api/python/model_api/tilers/tiler.py b/model_api/python/model_api/tilers/tiler.py index 6bae39a9..0f503369 100644 --- a/model_api/python/model_api/tilers/tiler.py +++ b/model_api/python/model_api/tilers/tiler.py @@ -149,8 +149,8 @@ def _load_config(self, config): errors = parameters[name].validate(value) if errors: self.logger.error(f'Error with "{name}" parameter:') - for error in errors: - self.logger.error(f"\t{error}") + for _error in errors: + self.logger.error(f"\t{_error}") raise RuntimeError("Incorrect user configuration") value = parameters[name].get_value(value) self.__setattr__(name, value) diff --git a/model_api/python/pyproject.toml b/model_api/python/pyproject.toml index c5bb769b..4c5c4f06 100644 --- a/model_api/python/pyproject.toml +++ b/model_api/python/pyproject.toml @@ -65,3 +65,15 @@ Repository = "https://github.com/openvinotoolkit/model_api.git" [tool.setuptools.packages.find] include = ["model_api*"] + + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# MYPY CONFIGURATION. # +[tool.mypy] +ignore_missing_imports = true +show_error_codes = true + + +[[tool.mypy.overrides]] +follow_imports = "skip" +follow_imports_for_stubs = true diff --git a/tests/python/accuracy/__init__.py b/tests/python/accuracy/__init__.py new file mode 100644 index 00000000..916f3a44 --- /dev/null +++ b/tests/python/accuracy/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/python/accuracy/test_accuracy.py b/tests/python/accuracy/test_accuracy.py index b9635c0a..91acde88 100644 --- a/tests/python/accuracy/test_accuracy.py +++ b/tests/python/accuracy/test_accuracy.py @@ -6,40 +6,24 @@ import numpy as np import onnx import pytest + from model_api.adapters.onnx_adapter import ONNXRuntimeAdapter from model_api.adapters.openvino_adapter import OpenvinoAdapter, create_core from model_api.adapters.utils import load_parameters_from_onnx from model_api.models import ( ActionClassificationModel, - AnomalyDetection, AnomalyResult, - ClassificationModel, ClassificationResult, DetectedKeypoints, - DetectionModel, DetectionResult, - ImageModel, ImageResultWithSoftPrediction, InstanceSegmentationResult, - KeypointDetectionModel, - MaskRCNNModel, - PredictedMask, Prompt, - SAMDecoder, - SAMImageEncoder, - SAMLearnableVisualPrompter, - SAMVisualPrompter, - SegmentationModel, VisualPromptingResult, ZSLVisualPromptingResult, add_rotated_rects, get_contours, ) -from model_api.tilers import ( - DetectionTiler, - InstanceSegmentationTiler, - SemanticSegmentationTiler, -) def read_config(path: Path): @@ -65,21 +49,18 @@ def create_models(model_type, model_path, download_dir, force_onnx_adapter=False ] if model_path.endswith(".xml"): wrapper_type = model_type.get_model_class( - create_models.core.read_model(model_path) + create_core().read_model(model_path) .get_rt_info(["model_info", "model_type"]) .astype(str) ) model = wrapper_type( - OpenvinoAdapter(create_models.core, model_path, device="CPU") + OpenvinoAdapter(create_core(), model_path, device="CPU") ) model.load() models.append(model) return models -create_models.core = create_core() - - @pytest.fixture(scope="session") def data(pytestconfig): return pytestconfig.getoption("data") @@ -262,7 +243,7 @@ def test_image_models(data, dump, result, model_data): model.save(data + "/serialized/" + save_name) if model_data.get("check_extra_rt_info", False): assert ( - create_models.core.read_model(data + "/serialized/" + save_name) + create_core().read_model(data + "/serialized/" + save_name) .get_rt_info(["model_info", "label_ids"]) .astype(str) ) diff --git a/tests/python/precommit/__init__.py b/tests/python/precommit/__init__.py new file mode 100644 index 00000000..916f3a44 --- /dev/null +++ b/tests/python/precommit/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 From e809f4eb81d0cca3cea0319848c968d1630f2d19 Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Thu, 31 Oct 2024 11:52:03 +0100 Subject: [PATCH 2/6] fix imports Signed-off-by: Ashwin Vaidya --- tests/python/accuracy/test_accuracy.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/python/accuracy/test_accuracy.py b/tests/python/accuracy/test_accuracy.py index 2e73c9c1..96524ba2 100644 --- a/tests/python/accuracy/test_accuracy.py +++ b/tests/python/accuracy/test_accuracy.py @@ -15,18 +15,35 @@ # flake8: noqa: F401 from model_api.models import ( ActionClassificationModel, + AnomalyDetection, AnomalyResult, + ClassificationModel, ClassificationResult, DetectedKeypoints, + DetectionModel, DetectionResult, + ImageModel, ImageResultWithSoftPrediction, InstanceSegmentationResult, + KeypointDetectionModel, + MaskRCNNModel, + PredictedMask, Prompt, + SAMDecoder, + SAMImageEncoder, + SAMLearnableVisualPrompter, + SAMVisualPrompter, + SegmentationModel, VisualPromptingResult, ZSLVisualPromptingResult, add_rotated_rects, get_contours, ) +from model_api.tilers import ( + DetectionTiler, + InstanceSegmentationTiler, + SemanticSegmentationTiler, +) def read_config(path: Path): From 1c4417365842bbd12f59a34a4273957d5d5d93cc Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Thu, 31 Oct 2024 12:27:51 +0100 Subject: [PATCH 3/6] add __model__ to detection Signed-off-by: Ashwin Vaidya --- model_api/python/model_api/models/detection_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/model_api/python/model_api/models/detection_model.py b/model_api/python/model_api/models/detection_model.py index 0e833af9..3471fb31 100644 --- a/model_api/python/model_api/models/detection_model.py +++ b/model_api/python/model_api/models/detection_model.py @@ -28,6 +28,8 @@ class DetectionModel(ImageModel): The `postprocess` method must be implemented in a specific inherited wrapper. """ + __model__ = "DetectionModel" + def __init__(self, inference_adapter, configuration=dict(), preload=False): """Detection Model constructor From 66d7e35ca30dc33a342bd64fcd59f013de8a941a Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Thu, 31 Oct 2024 12:38:32 +0100 Subject: [PATCH 4/6] add get_model to ovms_adapter Signed-off-by: Ashwin Vaidya --- model_api/python/model_api/adapters/ovms_adapter.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/model_api/python/model_api/adapters/ovms_adapter.py b/model_api/python/model_api/adapters/ovms_adapter.py index b18ef372..8d6c2a2c 100644 --- a/model_api/python/model_api/adapters/ovms_adapter.py +++ b/model_api/python/model_api/adapters/ovms_adapter.py @@ -97,6 +97,10 @@ def is_ready(self): def load_model(self): pass + def get_model(self): + """Return the reference to the GrpcClient.""" + return self.client + def await_all(self): pass From 053168132b79a6a412f526d7d071f2682a22f228 Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Thu, 31 Oct 2024 12:57:23 +0100 Subject: [PATCH 5/6] add __model__ to Model Signed-off-by: Ashwin Vaidya --- model_api/python/model_api/models/image_model.py | 2 ++ model_api/python/model_api/models/model.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/model_api/python/model_api/models/image_model.py b/model_api/python/model_api/models/image_model.py index 4e6407a9..25780c3d 100644 --- a/model_api/python/model_api/models/image_model.py +++ b/model_api/python/model_api/models/image_model.py @@ -30,6 +30,8 @@ class ImageModel(Model): input_transform (InputTransform): instance of the `InputTransform` for image normalization """ + __model__ = "ImageModel" + def __init__(self, inference_adapter, configuration=dict(), preload=False): """Image model constructor diff --git a/model_api/python/model_api/models/model.py b/model_api/python/model_api/models/model.py index 2a9388ac..3d2f4a1e 100644 --- a/model_api/python/model_api/models/model.py +++ b/model_api/python/model_api/models/model.py @@ -25,7 +25,7 @@ def __init__(self, wrapper_name, message): super().__init__(f"{wrapper_name}: {message}") -class Model(ABC): +class Model: """An abstract model wrapper The abstract model wrapper is free from any executor dependencies. @@ -51,7 +51,7 @@ class Model(ABC): model_loaded (bool): a flag whether the model is loaded to device """ - __model__: str + __model__: str = "Model" def __init__(self, inference_adapter, configuration=dict(), preload=False): """Model constructor @@ -150,7 +150,7 @@ def create_model( cache_dir (:obj:`str`, optional): directory where to store compiled models to reduce the load time before the inference Returns: - Model objcet + Model object """ if isinstance(model, InferenceAdapter): inference_adapter = model From 7d33688fe5fce1b1f4cbdaa42ecdaba07f3e449b Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Thu, 31 Oct 2024 13:10:56 +0100 Subject: [PATCH 6/6] Add get_model method to ONNXRuntimeAdapter Signed-off-by: Ashwin Vaidya --- model_api/python/model_api/adapters/onnx_adapter.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/model_api/python/model_api/adapters/onnx_adapter.py b/model_api/python/model_api/adapters/onnx_adapter.py index 08af61e6..6d03380b 100644 --- a/model_api/python/model_api/adapters/onnx_adapter.py +++ b/model_api/python/model_api/adapters/onnx_adapter.py @@ -162,6 +162,10 @@ def embed_preprocessing( reversed(preproc_funcs), ) + def get_model(self): + """Return the reference to the ONNXRuntime session.""" + return self.session + def reshape_model(self, new_shape): raise NotImplementedError