diff --git a/model_api/python/model_api/adapters/openvino_adapter.py b/model_api/python/model_api/adapters/openvino_adapter.py
index 52dd74f1..2e6b51ea 100644
--- a/model_api/python/model_api/adapters/openvino_adapter.py
+++ b/model_api/python/model_api/adapters/openvino_adapter.py
@@ -37,7 +37,8 @@
def create_core():
if openvino_absent:
- raise ImportError("The OpenVINO package is not installed")
+ msg = "The OpenVINO package is not installed"
+ raise ImportError(msg)
log.info("OpenVINO Runtime")
log.info(f"\tbuild: {get_version()}")
@@ -73,7 +74,8 @@ def parse_value_per_device(devices: set[str], values_string: str) -> dict[str, i
for device in devices:
result[device] = int(device_value_list[0])
elif device_value_list[0] != "":
- raise RuntimeError(f"Unknown string format: {values_string}")
+ msg = f"Unknown string format: {values_string}"
+ raise RuntimeError(msg)
return result
@@ -174,7 +176,8 @@ def __init__(
log.info(f"Reading model {self.model_path}")
self.model = core.read_model(self.model_path)
return
- raise RuntimeError("Model must be bytes, a file or existing OMZ model name")
+ msg = "Model must be bytes, a file or existing OMZ model name"
+ raise RuntimeError(msg)
def load_model(self):
self.compiled_model = self.core.compile_model(
@@ -377,9 +380,8 @@ def embed_preprocessing(
)
else:
- raise ValueError(
- f"Upsupported resize type in model preprocessing: {resize_mode}",
- )
+ msg = f"Upsupported resize type in model preprocessing: {resize_mode}"
+ raise ValueError(msg)
# Handle layout
ppp.input(input_idx).model().set_layout(ov.Layout(layout))
diff --git a/model_api/python/model_api/adapters/ovms_adapter.py b/model_api/python/model_api/adapters/ovms_adapter.py
index 78935233..77d11623 100644
--- a/model_api/python/model_api/adapters/ovms_adapter.py
+++ b/model_api/python/model_api/adapters/ovms_adapter.py
@@ -116,7 +116,8 @@ def reshape_model(self, new_shape):
raise NotImplementedError
def get_rt_info(self, path):
- raise NotImplementedError("OVMSAdapter does not support RT info getting")
+ msg = "OVMSAdapter does not support RT info getting"
+ raise NotImplementedError(msg)
_tf2ov_precision = {
@@ -147,13 +148,15 @@ def get_rt_info(self, path):
def _parse_model_arg(target_model: str):
if not isinstance(target_model, str):
- raise TypeError("target_model must be str")
+ msg = "target_model must be str"
+ raise TypeError(msg)
# Expected format:
:/models/[:]
if not re.fullmatch(
r"(\w+\.*\-*)*\w+:\d+\/models\/[a-zA-Z0-9._-]+(\:\d+)*",
target_model,
):
- raise ValueError("invalid --model option format")
+ msg = "invalid --model option format"
+ raise ValueError(msg)
service_url, _, model = target_model.split("/")
model_spec = model.split(":")
if len(model_spec) == 1:
@@ -161,7 +164,8 @@ def _parse_model_arg(target_model: str):
return service_url, model_spec[0], 0
if len(model_spec) == 2:
return service_url, model_spec[0], int(model_spec[1])
- raise ValueError("invalid target_model format")
+ msg = "invalid target_model format"
+ raise ValueError(msg)
def _verify_model_available(client, model_name, model_version):
@@ -171,22 +175,21 @@ def _verify_model_available(client, model_name, model_version):
try:
model_status = client.get_model_status(model_name, model_version)
except ovmsclient.ModelNotFoundError as e:
- raise RuntimeError(
- f"Requested model: {model_name}, version: {version} has not been found",
- ) from e
+ msg = f"Requested model: {model_name}, version: {version} has not been found"
+ raise RuntimeError(msg) from e
target_version = max(model_status.keys())
version_status = model_status[target_version]
if version_status["state"] != "AVAILABLE" or version_status["error_code"] != 0:
- raise RuntimeError(
- f"Requested model: {model_name}, version: {version} is not in available state",
- )
+ msg = f"Requested model: {model_name}, version: {version} is not in available state"
+ raise RuntimeError(msg)
def _prepare_inputs(dict_data, inputs_meta):
inputs = {}
for input_name, input_data in dict_data.items():
if input_name not in inputs_meta:
- raise ValueError("Input data does not match model inputs")
+ msg = "Input data does not match model inputs"
+ raise ValueError(msg)
input_info = inputs_meta[input_name]
model_precision = _tf2np_precision[input_info["dtype"]]
if isinstance(input_data, np.ndarray) and input_data.dtype != model_precision:
diff --git a/model_api/python/model_api/adapters/utils.py b/model_api/python/model_api/adapters/utils.py
index b8c7d867..042b069b 100644
--- a/model_api/python/model_api/adapters/utils.py
+++ b/model_api/python/model_api/adapters/utils.py
@@ -33,9 +33,8 @@ def from_shape(shape):
if len(shape) == 6:
return "NSTHWC" if shape[5] in range(1, 5) else "NSCTHW"
- raise RuntimeError(
- f"Get layout from shape method doesn't support {len(shape)}D shape",
- )
+ msg = f"Get layout from shape method doesn't support {len(shape)}D shape"
+ raise RuntimeError(msg)
@staticmethod
def from_openvino(input):
@@ -75,9 +74,11 @@ def parse_layouts(layout_string: str) -> dict | None:
def resize_image_letterbox_graph(input: Output, size, interpolation, pad_value):
if not isinstance(pad_value, int):
- raise RuntimeError("pad_value must be int")
+ msg = "pad_value must be int"
+ raise RuntimeError(msg)
if not 0 <= pad_value <= 255:
- raise RuntimeError("pad_value must be in range [0, 255]")
+ msg = "pad_value must be in range [0, 255]"
+ raise RuntimeError(msg)
w, h = size
h_axis = 1
w_axis = 2
@@ -291,9 +292,11 @@ def resize_image_graph(
pad_value,
):
if not isinstance(pad_value, int):
- raise RuntimeError("pad_value must be int")
+ msg = "pad_value must be int"
+ raise RuntimeError(msg)
if not 0 <= pad_value <= 255:
- raise RuntimeError("pad_value must be in range [0, 255]")
+ msg = "pad_value must be in range [0, 255]"
+ raise RuntimeError(msg)
h_axis = 1
w_axis = 2
w, h = size
@@ -426,9 +429,8 @@ def get_rt_info_from_dict(rt_info_dict, path):
value = value[item]
return OVAny(value)
except KeyError:
- raise RuntimeError(
- "Cannot get runtime attribute. Path to runtime attribute is incorrect.",
- )
+ msg = "Cannot get runtime attribute. Path to runtime attribute is incorrect."
+ raise RuntimeError(msg)
def resize_image_ocv(
diff --git a/model_api/python/model_api/models/action_classification.py b/model_api/python/model_api/models/action_classification.py
index 4c6a0049..52b33561 100644
--- a/model_api/python/model_api/models/action_classification.py
+++ b/model_api/python/model_api/models/action_classification.py
@@ -46,7 +46,7 @@ class ActionClassificationModel(Model):
def __init__(
self,
inference_adapter: InferenceAdapter,
- configuration: dict[str, Any] = dict(),
+ configuration: dict[str, Any] = {},
preload: bool = False,
) -> None:
"""Action classaification model constructor
diff --git a/model_api/python/model_api/models/anomaly.py b/model_api/python/model_api/models/anomaly.py
index d6b31235..598cd905 100644
--- a/model_api/python/model_api/models/anomaly.py
+++ b/model_api/python/model_api/models/anomaly.py
@@ -25,7 +25,10 @@ class AnomalyDetection(ImageModel):
__model__ = "AnomalyDetection"
def __init__(
- self, inference_adapter: InferenceAdapter, configuration: dict = dict(), preload: bool = False
+ self,
+ inference_adapter: InferenceAdapter,
+ configuration: dict = {},
+ preload: bool = False,
) -> None:
super().__init__(inference_adapter, configuration, preload)
self._check_io_number(1, 1)
diff --git a/model_api/python/model_api/models/classification.py b/model_api/python/model_api/models/classification.py
index 178ee0eb..cddf09bb 100644
--- a/model_api/python/model_api/models/classification.py
+++ b/model_api/python/model_api/models/classification.py
@@ -24,7 +24,7 @@
class ClassificationModel(ImageModel):
__model__ = "Classification"
- def __init__(self, inference_adapter: InferenceAdapter, configuration: dict = dict(), preload: bool = False):
+ def __init__(self, inference_adapter: InferenceAdapter, configuration: dict = {}, preload: bool = False):
super().__init__(inference_adapter, configuration, preload=False)
self.topk: int
self.labels: list[str]
@@ -605,9 +605,8 @@ def topological_sort(self):
nodes_deque.append(node)
if len(ordered) != len(self._v):
- raise RuntimeError(
- "Topological sort failed: input graph has been" "changed during the sorting or contains a cycle",
- )
+ msg = "Topological sort failed: input graph has been changed during the sorting or contains a cycle"
+ raise RuntimeError(msg)
return ordered
diff --git a/model_api/python/model_api/models/detection_model.py b/model_api/python/model_api/models/detection_model.py
index 3e72de4d..b16bfacd 100644
--- a/model_api/python/model_api/models/detection_model.py
+++ b/model_api/python/model_api/models/detection_model.py
@@ -20,7 +20,7 @@ class DetectionModel(ImageModel):
__model__ = "DetectionModel"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Detection Model constructor
It extends the `ImageModel` construtor.
@@ -36,7 +36,7 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False):
WrapperError: if the model has more than 1 image inputs
"""
super().__init__(inference_adapter, configuration, preload)
-
+ self.path_to_labels: str
if not self.image_blob_name:
self.raise_error(
f"The Wrapper supports only one image input, but {len(self.image_blob_names)} found",
diff --git a/model_api/python/model_api/models/image_model.py b/model_api/python/model_api/models/image_model.py
index 9e32f15c..f7ed1f9a 100644
--- a/model_api/python/model_api/models/image_model.py
+++ b/model_api/python/model_api/models/image_model.py
@@ -32,7 +32,7 @@ class ImageModel(Model):
__model__ = "ImageModel"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Image model constructor
It extends the `Model` constructor.
@@ -50,6 +50,14 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False):
super().__init__(inference_adapter, configuration, preload)
self.image_blob_names, self.image_info_blob_names = self._get_inputs()
self.image_blob_name = self.image_blob_names[0]
+ self.orig_height: int
+ self.orig_width: int
+ self.pad_value: int
+ self.resize_type: str
+ self.mean_values: list
+ self.scale_values: list
+ self.reverse_input_channels: bool
+ self.embedded_processing: bool
self.nchw_layout = self.inputs[self.image_blob_name].layout == "NCHW"
if self.nchw_layout:
diff --git a/model_api/python/model_api/models/instance_segmentation.py b/model_api/python/model_api/models/instance_segmentation.py
index 24cde4bc..4e067878 100644
--- a/model_api/python/model_api/models/instance_segmentation.py
+++ b/model_api/python/model_api/models/instance_segmentation.py
@@ -15,9 +15,10 @@
class MaskRCNNModel(ImageModel):
__model__ = "MaskRCNN"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self._check_io_number((1, 2), (3, 4, 5, 6, 8))
+ self.path_to_labels: str
if self.path_to_labels:
self.labels = load_labels(self.path_to_labels)
self.is_segmentoly = len(self.inputs) == 2
diff --git a/model_api/python/model_api/models/keypoint_detection.py b/model_api/python/model_api/models/keypoint_detection.py
index a193705b..5ecaa718 100644
--- a/model_api/python/model_api/models/keypoint_detection.py
+++ b/model_api/python/model_api/models/keypoint_detection.py
@@ -19,13 +19,13 @@ class KeypointDetectionModel(ImageModel):
__model__ = "keypoint_detection"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Initializes the keypoint detection model.
Args:
inference_adapter (InferenceAdapter): inference adapter containing the underlying model.
configuration (dict, optional): configuration overrides the model parameters (see parameters() method).
- Defaults to dict().
+ Defaults to {}.
preload (bool, optional): forces inference adapter to load the model. Defaults to False.
"""
super().__init__(inference_adapter, configuration, preload)
diff --git a/model_api/python/model_api/models/model.py b/model_api/python/model_api/models/model.py
index 5515dfa8..a1361d95 100644
--- a/model_api/python/model_api/models/model.py
+++ b/model_api/python/model_api/models/model.py
@@ -52,7 +52,7 @@ class Model:
__model__: str = "Model"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Model constructor
Args:
diff --git a/model_api/python/model_api/models/result_types.py b/model_api/python/model_api/models/result_types.py
index 5259cf80..37f17006 100644
--- a/model_api/python/model_api/models/result_types.py
+++ b/model_api/python/model_api/models/result_types.py
@@ -55,7 +55,7 @@ def __str__(self) -> str:
class Detection:
- def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label=None):
+ def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label=None) -> None:
self.xmin: int = xmin
self.ymin: int = ymin
self.xmax: int = xmax
diff --git a/model_api/python/model_api/models/sam_models.py b/model_api/python/model_api/models/sam_models.py
index b1d6f5af..06eeaf5f 100644
--- a/model_api/python/model_api/models/sam_models.py
+++ b/model_api/python/model_api/models/sam_models.py
@@ -25,7 +25,7 @@ class SAMImageEncoder(ImageModel):
def __init__(
self,
inference_adapter: InferenceAdapter,
- configuration: dict[str, Any] = dict(),
+ configuration: dict[str, Any] = {},
preload: bool = False,
):
super().__init__(inference_adapter, configuration, preload)
@@ -73,7 +73,7 @@ class SAMDecoder(SegmentationModel):
def __init__(
self,
model_adapter: InferenceAdapter,
- configuration: dict[str, Any] = dict(),
+ configuration: dict[str, Any] = {},
preload: bool = False,
):
super().__init__(model_adapter, configuration, preload)
diff --git a/model_api/python/model_api/models/segmentation.py b/model_api/python/model_api/models/segmentation.py
index 4f342f2d..3c08dbfe 100644
--- a/model_api/python/model_api/models/segmentation.py
+++ b/model_api/python/model_api/models/segmentation.py
@@ -51,9 +51,10 @@ def create_hard_prediction_from_soft_prediction(
class SegmentationModel(ImageModel):
__model__ = "Segmentation"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self._check_io_number(1, (1, 2))
+ self.path_to_labels: str
if self.path_to_labels:
self.labels = load_labels(self.path_to_labels)
@@ -162,7 +163,8 @@ def get_contours(
n_layers = prediction.soft_prediction.shape[2]
if n_layers == 1:
- raise RuntimeError("Cannot get contours from soft prediction with 1 layer")
+ msg = "Cannot get contours from soft prediction with 1 layer"
+ raise RuntimeError(msg)
combined_contours = []
for layer_index in range(1, n_layers): # ignoring background
label = self.get_label_name(layer_index - 1)
diff --git a/model_api/python/model_api/models/ssd.py b/model_api/python/model_api/models/ssd.py
index 385b9ae5..bfe5fa05 100644
--- a/model_api/python/model_api/models/ssd.py
+++ b/model_api/python/model_api/models/ssd.py
@@ -12,7 +12,7 @@
class SSD(DetectionModel):
__model__ = "SSD"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self.image_info_blob_name = self.image_info_blob_names[0] if len(self.image_info_blob_names) == 1 else None
self.output_parser = self._get_output_parser(self.image_blob_name)
@@ -70,10 +70,12 @@ def _parse_outputs(self, outputs):
def find_layer_by_name(name, layers):
suitable_layers = [layer_name for layer_name in layers if name in layer_name]
if not suitable_layers:
- raise ValueError(f'Suitable layer for "{name}" output is not found')
+ msg = f'Suitable layer for "{name}" output is not found'
+ raise ValueError(msg)
if len(suitable_layers) > 1:
- raise ValueError(f'More than 1 layer matched to "{name}" output')
+ msg = f'More than 1 layer matched to "{name}" output'
+ raise ValueError(msg)
return suitable_layers[0]
@@ -81,13 +83,13 @@ def find_layer_by_name(name, layers):
class SingleOutputParser:
def __init__(self, all_outputs):
if len(all_outputs) != 1:
- raise ValueError("Network must have only one output.")
+ msg = "Network must have only one output."
+ raise ValueError(msg)
self.output_name, output_data = next(iter(all_outputs.items()))
last_dim = output_data.shape[-1]
if last_dim != 7:
- raise ValueError(
- "The last dimension of the output blob must be equal to 7, " f"got {last_dim} instead.",
- )
+ msg = f"The last dimension of the output blob must be equal to 7, got {last_dim} instead."
+ raise ValueError(msg)
def __call__(self, outputs):
return [
@@ -134,9 +136,11 @@ def find_layer_bboxes_output(layers):
if (len(data.shape) == 2 or len(data.shape) == 3) and data.shape[-1] == 5
]
if not filter_outputs:
- raise ValueError("Suitable output with bounding boxes is not found")
+ msg = "Suitable output with bounding boxes is not found"
+ raise ValueError(msg)
if len(filter_outputs) > 1:
- raise ValueError("More than 1 candidate for output with bounding boxes.")
+ msg = "More than 1 candidate for output with bounding boxes."
+ raise ValueError(msg)
return filter_outputs[0]
def __call__(self, outputs):
diff --git a/model_api/python/model_api/models/types.py b/model_api/python/model_api/models/types.py
index 0ba3f0b7..2f684fb0 100644
--- a/model_api/python/model_api/models/types.py
+++ b/model_api/python/model_api/models/types.py
@@ -108,7 +108,8 @@ def __init__(
self.choices = choices
for choice in self.choices:
if not isinstance(choice, str):
- raise ValueError(f"Incorrect option in choice list - {choice}.")
+ msg = f"Incorrect option in choice list - {choice}."
+ raise ValueError(msg)
def from_str(self, value):
return value
diff --git a/model_api/python/model_api/models/utils.py b/model_api/python/model_api/models/utils.py
index 5ec81442..26c48f8c 100644
--- a/model_api/python/model_api/models/utils.py
+++ b/model_api/python/model_api/models/utils.py
@@ -41,7 +41,8 @@ def get_contours(
# Assuming one contour output for findContours. Based on OTX this is a safe
# assumption
if len(contours) != 1:
- raise RuntimeError("findContours() must have returned only one contour")
+ msg = "findContours() must have returned only one contour"
+ raise RuntimeError(msg)
combined_contours.append(Contour(str(obj.str_label), obj.score, contours[0]))
return combined_contours
diff --git a/model_api/python/model_api/models/visual_prompting.py b/model_api/python/model_api/models/visual_prompting.py
index b70dbd9a..3cfcf85d 100644
--- a/model_api/python/model_api/models/visual_prompting.py
+++ b/model_api/python/model_api/models/visual_prompting.py
@@ -65,7 +65,8 @@ def infer(
VisualPromptingResult: result object containing predicted masks and aux information.
"""
if boxes is None and points is None:
- raise RuntimeError("boxes or points prompts are required for inference")
+ msg = "boxes or points prompts are required for inference"
+ raise RuntimeError(msg)
outputs: list[dict[str, Any]] = []
@@ -165,7 +166,8 @@ def __init__(
if 0 <= threshold <= 1:
self._threshold: float = threshold
else:
- raise ValueError("Confidence threshold should belong to [0;1] range.")
+ msg = "Confidence threshold should belong to [0;1] range."
+ raise ValueError(msg)
self._num_bg_points: int = 1
self._default_threshold_target: float = 0.0
self._image_size: int = self.encoder.image_size
@@ -187,7 +189,8 @@ def reference_features(self) -> VisualPromptingFeatures:
np.copy(self._used_indices),
)
- raise RuntimeError("Reference features are not generated")
+ msg = "Reference features are not generated"
+ raise RuntimeError(msg)
def learn(
self,
@@ -220,9 +223,8 @@ def learn(
The shape of the reference mask is N_labels x H x W, where H and W are the same as in the input image.
"""
if boxes is None and points is None and polygons is None:
- raise RuntimeError(
- "boxes, polygons or points prompts are required for learning",
- )
+ msg = "boxes, polygons or points prompts are required for learning"
+ raise RuntimeError(msg)
if reset_features or not self.has_reference_features():
self.reset_reference_info()
@@ -275,7 +277,8 @@ def learn(
elif "polygon" in inputs_decoder:
masks = _polygon_to_mask(inputs_decoder["polygon"], *original_shape)
else:
- raise RuntimeError("Unsupported type of prompt")
+ msg = "Unsupported type of prompt"
+ raise RuntimeError(msg)
ref_mask = np.where(masks, 1, ref_mask)
ref_feat: np.ndarray | None = None
@@ -334,19 +337,19 @@ def infer(
"""
if reference_features is None:
if self._reference_features is None:
- raise RuntimeError(
- (
- "Reference features are not defined. This parameter can be passed via "
- "SAMLearnableVisualPrompter constructor, or as an argument of infer() method"
- ),
+ msg = (
+ "Reference features are not defined. This parameter can be passed via "
+ "SAMLearnableVisualPrompter constructor, or as an argument of infer() method"
)
+ raise RuntimeError(msg)
reference_feats = self._reference_features
if self._used_indices is None:
- raise RuntimeError(
+ msg = (
"Used indices are not defined. This parameter can be passed via "
"SAMLearnableVisualPrompter constructor, or as an argument of infer() method"
)
+ raise RuntimeError(msg)
used_idx = self._used_indices
else:
reference_feats, used_idx = reference_features
@@ -400,7 +403,9 @@ def infer(
inputs_decoder["image_embeddings"] = image_embeddings
_prediction: dict[str, np.ndarray] = self._predict_masks(
- inputs_decoder, original_shape, apply_masks_refinement
+ inputs_decoder,
+ original_shape,
+ apply_masks_refinement,
)
_prediction.update({"scores": points_score[-1]})
@@ -445,7 +450,8 @@ def _gather_prompts_with_labels(
def _expand_reference_info(self, new_largest_label: int) -> None:
"""Expand reference info dimensions if newly given processed prompts have more labels."""
if self._reference_features is None:
- raise RuntimeError("Can not expand non existing reference info")
+ msg = "Can not expand non existing reference info"
+ raise RuntimeError(msg)
if new_largest_label > (cur_largest_label := len(self._reference_features) - 1):
diff = new_largest_label - cur_largest_label
diff --git a/model_api/python/model_api/models/yolo.py b/model_api/python/model_api/models/yolo.py
index f69b419a..60ed0c12 100644
--- a/model_api/python/model_api/models/yolo.py
+++ b/model_api/python/model_api/models/yolo.py
@@ -350,7 +350,7 @@ def __init__(self, classes, num, sides, anchors, mask, layout):
self.anchors = masked_anchors
self.use_input_size = True
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
def _get_output_info(self):
@@ -431,7 +431,7 @@ def __init__(self, classes, num, sides, anchors):
self.output_layout = "NCHW"
self.use_input_size = True
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
def _get_output_info(self):
@@ -478,13 +478,13 @@ def _get_absolute_det_box(
class YOLOX(DetectionModel):
__model__ = "YOLOX"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self._check_io_number(1, 1)
self.output_blob_name = next(iter(self.outputs))
- self.expanded_strides = []
- self.grids = []
+ self.expanded_strides: list = []
+ self.grids: list = []
self.set_strides_grids()
@classmethod
@@ -564,7 +564,7 @@ def postprocess(self, outputs, meta):
scores[keep_nms],
j[keep_nms],
),
- )
+ ),
)
return clip_detections(detections, meta["original_shape"])
@@ -591,7 +591,7 @@ def set_strides_grids(self):
class YoloV3ONNX(DetectionModel):
__model__ = "YOLOv3-ONNX"
- def __init__(self, inference_adapter, configuration=dict(), preload=False):
+ def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self.image_info_blob_name = self.image_info_blob_names[0] if len(self.image_info_blob_names) == 1 else None
self._check_io_number(2, 3)
@@ -602,7 +602,7 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False):
self.indices_blob_name,
) = self._get_outputs()
- if self.embed_preprocessing:
+ if self.embedded_processing:
layout = "NHWC" if self.nchw_layout else "NCHW"
inference_adapter.embed_preprocessing(
image_layout=layout,
@@ -650,7 +650,7 @@ def preprocess(self, inputs):
dict_inputs = {}
meta = {"original_shape": image.shape}
- if self.embed_preprocessing:
+ if self.embedded_processing:
meta.update({"resized_shape": (self.w, self.h)})
dict_inputs = {
@@ -774,9 +774,11 @@ def postprocess(self, outputs, meta):
self.raise_error("the output must be of precision f32")
out_shape = prediction.shape
if len(out_shape) != 3:
- raise RuntimeError("the output must be of rank 3")
+ msg = "the output must be of rank 3"
+ raise RuntimeError(msg)
if out_shape[0] != 1:
- raise RuntimeError("the first dim of the output must be 1")
+ msg = "the first dim of the output must be 1"
+ raise RuntimeError(msg)
LABELS_START = 4
filtered = prediction[0].T[(prediction[:, LABELS_START:] > self.confidence_threshold).any(1)[0]]
confidences = filtered[:, LABELS_START:]
diff --git a/model_api/python/model_api/tilers/detection.py b/model_api/python/model_api/tilers/detection.py
index 9c4597b5..8e4d5d77 100644
--- a/model_api/python/model_api/tilers/detection.py
+++ b/model_api/python/model_api/tilers/detection.py
@@ -19,7 +19,7 @@ class DetectionTiler(Tiler):
or one `DetectionResult` object.
"""
- def __init__(self, model, configuration=dict(), execution_mode="async"):
+ def __init__(self, model, configuration: dict = {}, execution_mode="async"):
super().__init__(model, configuration, execution_mode)
@classmethod
@@ -66,7 +66,8 @@ def _postprocess_tile(self, predictions, coord):
elif hasattr(predictions, "segmentedObjects"):
detections = _detection2array(predictions.segmentedObjects)
else:
- raise RuntimeError("Unsupported model predictions format")
+ msg = "Unsupported model predictions format"
+ raise RuntimeError(msg)
output_dict["saliency_map"] = predictions.saliency_map
output_dict["features"] = predictions.feature_vector
diff --git a/model_api/python/model_api/tilers/instance_segmentation.py b/model_api/python/model_api/tilers/instance_segmentation.py
index fad3f587..5de1db14 100644
--- a/model_api/python/model_api/tilers/instance_segmentation.py
+++ b/model_api/python/model_api/tilers/instance_segmentation.py
@@ -30,7 +30,7 @@ class InstanceSegmentationTiler(DetectionTiler):
def __init__(
self,
model,
- configuration=dict(),
+ configuration: dict = {},
execution_mode="async",
tile_classifier_model=None,
):
diff --git a/model_api/python/model_api/tilers/tiler.py b/model_api/python/model_api/tilers/tiler.py
index 74da28fe..73581d85 100644
--- a/model_api/python/model_api/tilers/tiler.py
+++ b/model_api/python/model_api/tilers/tiler.py
@@ -30,7 +30,7 @@ class Tiler(abc.ABC):
execution_mode: Controls inference mode of the tiler (`async` or `sync`).
"""
- def __init__(self, model, configuration=dict(), execution_mode="async"):
+ def __init__(self, model, configuration: dict = {}, execution_mode: str = "async"):
"""Base constructor for creating a tiling pipeline
Args:
@@ -46,9 +46,8 @@ def __init__(self, model, configuration=dict(), execution_mode="async"):
self._load_config(configuration)
self.async_pipeline = AsyncPipeline(self.model)
if execution_mode not in Tiler.EXECUTION_MODES:
- raise ValueError(
- f"Wrong execution mode. The following modes are supported {Tiler.EXECUTION_MODES}",
- )
+ msg = f"Wrong execution mode. The following modes are supported {Tiler.EXECUTION_MODES}"
+ raise ValueError(msg)
self.execution_mode = execution_mode
def get_model(self):
@@ -133,7 +132,8 @@ def _load_config(self, config):
self.logger.error(f'Error with "{name}" parameter:')
for _error in errors:
self.logger.error(f"\t{_error}")
- raise RuntimeError("Incorrect user configuration")
+ msg = "Incorrect user configuration"
+ raise RuntimeError(msg)
value = parameters[name].get_value(value)
self.__setattr__(name, value)
else:
diff --git a/model_api/python/pyproject.toml b/model_api/python/pyproject.toml
index a8cd3eb3..064e9e96 100644
--- a/model_api/python/pyproject.toml
+++ b/model_api/python/pyproject.toml
@@ -105,15 +105,15 @@ lint.select = [
# "FBT", # flake8-boolean-trap (`FBT`)
# "B", # flake8-bugbear (`B`)
# "A", # flake8-builtins (`A`)
- # "COM", # flake8-commas (`COM`)
+ "COM", # flake8-commas (`COM`)
"CPY", # flake8-copyright (`CPY`)
- # "C4", # flake8-comprehensions (`C4`)
+ "C4", # flake8-comprehensions (`C4`)
"DTZ", # flake8-datatimez (`DTZ`)
# "T10", # flake8-debugger (`T10`)
- # "EM", # flake8-errmsg (`EM`)
+ "EM", # flake8-errmsg (`EM`)
# "FA", # flake8-future-annotations (`FA`)
- # "ISC", # flake8-implicit-str-concat (`ISC`)
- # "ICN", # flake8-import-conventions (`ICN`)
+ "ISC", # flake8-implicit-str-concat (`ISC`)
+ "ICN", # flake8-import-conventions (`ICN`)
# "PIE", # flake8-pie (`PIE`)
# "PT", # flake8-pytest-style (`PT`)
# "RSE", # flake8-raise (`RSE`)