Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions model_api/python/model_api/adapters/openvino_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@

def create_core():
if openvino_absent:
raise ImportError("The OpenVINO package is not installed")
msg = "The OpenVINO package is not installed"
raise ImportError(msg)

log.info("OpenVINO Runtime")
log.info(f"\tbuild: {get_version()}")
Expand Down Expand Up @@ -73,7 +74,8 @@ def parse_value_per_device(devices: set[str], values_string: str) -> dict[str, i
for device in devices:
result[device] = int(device_value_list[0])
elif device_value_list[0] != "":
raise RuntimeError(f"Unknown string format: {values_string}")
msg = f"Unknown string format: {values_string}"
raise RuntimeError(msg)
return result


Expand Down Expand Up @@ -174,7 +176,8 @@ def __init__(
log.info(f"Reading model {self.model_path}")
self.model = core.read_model(self.model_path)
return
raise RuntimeError("Model must be bytes, a file or existing OMZ model name")
msg = "Model must be bytes, a file or existing OMZ model name"
raise RuntimeError(msg)

def load_model(self):
self.compiled_model = self.core.compile_model(
Expand Down Expand Up @@ -377,9 +380,8 @@ def embed_preprocessing(
)

else:
raise ValueError(
f"Upsupported resize type in model preprocessing: {resize_mode}",
)
msg = f"Upsupported resize type in model preprocessing: {resize_mode}"
raise ValueError(msg)

# Handle layout
ppp.input(input_idx).model().set_layout(ov.Layout(layout))
Expand Down
25 changes: 14 additions & 11 deletions model_api/python/model_api/adapters/ovms_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ def reshape_model(self, new_shape):
raise NotImplementedError

def get_rt_info(self, path):
raise NotImplementedError("OVMSAdapter does not support RT info getting")
msg = "OVMSAdapter does not support RT info getting"
raise NotImplementedError(msg)


_tf2ov_precision = {
Expand Down Expand Up @@ -147,21 +148,24 @@ def get_rt_info(self, path):

def _parse_model_arg(target_model: str):
if not isinstance(target_model, str):
raise TypeError("target_model must be str")
msg = "target_model must be str"
raise TypeError(msg)
# Expected format: <address>:<port>/models/<model_name>[:<model_version>]
if not re.fullmatch(
r"(\w+\.*\-*)*\w+:\d+\/models\/[a-zA-Z0-9._-]+(\:\d+)*",
target_model,
):
raise ValueError("invalid --model option format")
msg = "invalid --model option format"
raise ValueError(msg)
service_url, _, model = target_model.split("/")
model_spec = model.split(":")
if len(model_spec) == 1:
# model version not specified - use latest
return service_url, model_spec[0], 0
if len(model_spec) == 2:
return service_url, model_spec[0], int(model_spec[1])
raise ValueError("invalid target_model format")
msg = "invalid target_model format"
raise ValueError(msg)


def _verify_model_available(client, model_name, model_version):
Expand All @@ -171,22 +175,21 @@ def _verify_model_available(client, model_name, model_version):
try:
model_status = client.get_model_status(model_name, model_version)
except ovmsclient.ModelNotFoundError as e:
raise RuntimeError(
f"Requested model: {model_name}, version: {version} has not been found",
) from e
msg = f"Requested model: {model_name}, version: {version} has not been found"
raise RuntimeError(msg) from e
target_version = max(model_status.keys())
version_status = model_status[target_version]
if version_status["state"] != "AVAILABLE" or version_status["error_code"] != 0:
raise RuntimeError(
f"Requested model: {model_name}, version: {version} is not in available state",
)
msg = f"Requested model: {model_name}, version: {version} is not in available state"
raise RuntimeError(msg)


def _prepare_inputs(dict_data, inputs_meta):
inputs = {}
for input_name, input_data in dict_data.items():
if input_name not in inputs_meta:
raise ValueError("Input data does not match model inputs")
msg = "Input data does not match model inputs"
raise ValueError(msg)
input_info = inputs_meta[input_name]
model_precision = _tf2np_precision[input_info["dtype"]]
if isinstance(input_data, np.ndarray) and input_data.dtype != model_precision:
Expand Down
22 changes: 12 additions & 10 deletions model_api/python/model_api/adapters/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,8 @@ def from_shape(shape):
if len(shape) == 6:
return "NSTHWC" if shape[5] in range(1, 5) else "NSCTHW"

raise RuntimeError(
f"Get layout from shape method doesn't support {len(shape)}D shape",
)
msg = f"Get layout from shape method doesn't support {len(shape)}D shape"
raise RuntimeError(msg)

@staticmethod
def from_openvino(input):
Expand Down Expand Up @@ -75,9 +74,11 @@ def parse_layouts(layout_string: str) -> dict | None:

def resize_image_letterbox_graph(input: Output, size, interpolation, pad_value):
if not isinstance(pad_value, int):
raise RuntimeError("pad_value must be int")
msg = "pad_value must be int"
raise RuntimeError(msg)
if not 0 <= pad_value <= 255:
raise RuntimeError("pad_value must be in range [0, 255]")
msg = "pad_value must be in range [0, 255]"
raise RuntimeError(msg)
w, h = size
h_axis = 1
w_axis = 2
Expand Down Expand Up @@ -291,9 +292,11 @@ def resize_image_graph(
pad_value,
):
if not isinstance(pad_value, int):
raise RuntimeError("pad_value must be int")
msg = "pad_value must be int"
raise RuntimeError(msg)
if not 0 <= pad_value <= 255:
raise RuntimeError("pad_value must be in range [0, 255]")
msg = "pad_value must be in range [0, 255]"
raise RuntimeError(msg)
h_axis = 1
w_axis = 2
w, h = size
Expand Down Expand Up @@ -426,9 +429,8 @@ def get_rt_info_from_dict(rt_info_dict, path):
value = value[item]
return OVAny(value)
except KeyError:
raise RuntimeError(
"Cannot get runtime attribute. Path to runtime attribute is incorrect.",
)
msg = "Cannot get runtime attribute. Path to runtime attribute is incorrect."
raise RuntimeError(msg)


def resize_image_ocv(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class ActionClassificationModel(Model):
def __init__(
self,
inference_adapter: InferenceAdapter,
configuration: dict[str, Any] = dict(),
configuration: dict[str, Any] = {},
preload: bool = False,
) -> None:
"""Action classaification model constructor
Expand Down
5 changes: 4 additions & 1 deletion model_api/python/model_api/models/anomaly.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ class AnomalyDetection(ImageModel):
__model__ = "AnomalyDetection"

def __init__(
self, inference_adapter: InferenceAdapter, configuration: dict = dict(), preload: bool = False
self,
inference_adapter: InferenceAdapter,
configuration: dict = {},
preload: bool = False,
) -> None:
super().__init__(inference_adapter, configuration, preload)
self._check_io_number(1, 1)
Expand Down
7 changes: 3 additions & 4 deletions model_api/python/model_api/models/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
class ClassificationModel(ImageModel):
__model__ = "Classification"

def __init__(self, inference_adapter: InferenceAdapter, configuration: dict = dict(), preload: bool = False):
def __init__(self, inference_adapter: InferenceAdapter, configuration: dict = {}, preload: bool = False):
super().__init__(inference_adapter, configuration, preload=False)
self.topk: int
self.labels: list[str]
Expand Down Expand Up @@ -605,9 +605,8 @@ def topological_sort(self):
nodes_deque.append(node)

if len(ordered) != len(self._v):
raise RuntimeError(
"Topological sort failed: input graph has been" "changed during the sorting or contains a cycle",
)
msg = "Topological sort failed: input graph has been changed during the sorting or contains a cycle"
raise RuntimeError(msg)

return ordered

Expand Down
4 changes: 2 additions & 2 deletions model_api/python/model_api/models/detection_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class DetectionModel(ImageModel):

__model__ = "DetectionModel"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Detection Model constructor

It extends the `ImageModel` construtor.
Expand All @@ -36,7 +36,7 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False):
WrapperError: if the model has more than 1 image inputs
"""
super().__init__(inference_adapter, configuration, preload)

self.path_to_labels: str
if not self.image_blob_name:
self.raise_error(
f"The Wrapper supports only one image input, but {len(self.image_blob_names)} found",
Expand Down
10 changes: 9 additions & 1 deletion model_api/python/model_api/models/image_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class ImageModel(Model):

__model__ = "ImageModel"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Image model constructor

It extends the `Model` constructor.
Expand All @@ -50,6 +50,14 @@ def __init__(self, inference_adapter, configuration=dict(), preload=False):
super().__init__(inference_adapter, configuration, preload)
self.image_blob_names, self.image_info_blob_names = self._get_inputs()
self.image_blob_name = self.image_blob_names[0]
self.orig_height: int
self.orig_width: int
self.pad_value: int
self.resize_type: str
self.mean_values: list
self.scale_values: list
self.reverse_input_channels: bool
self.embedded_processing: bool

self.nchw_layout = self.inputs[self.image_blob_name].layout == "NCHW"
if self.nchw_layout:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@
class MaskRCNNModel(ImageModel):
__model__ = "MaskRCNN"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self._check_io_number((1, 2), (3, 4, 5, 6, 8))
self.path_to_labels: str
if self.path_to_labels:
self.labels = load_labels(self.path_to_labels)
self.is_segmentoly = len(self.inputs) == 2
Expand Down
4 changes: 2 additions & 2 deletions model_api/python/model_api/models/keypoint_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@ class KeypointDetectionModel(ImageModel):

__model__ = "keypoint_detection"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Initializes the keypoint detection model.

Args:
inference_adapter (InferenceAdapter): inference adapter containing the underlying model.
configuration (dict, optional): configuration overrides the model parameters (see parameters() method).
Defaults to dict().
Defaults to {}.
preload (bool, optional): forces inference adapter to load the model. Defaults to False.
"""
super().__init__(inference_adapter, configuration, preload)
Expand Down
2 changes: 1 addition & 1 deletion model_api/python/model_api/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class Model:

__model__: str = "Model"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
"""Model constructor

Args:
Expand Down
2 changes: 1 addition & 1 deletion model_api/python/model_api/models/result_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def __str__(self) -> str:


class Detection:
def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label=None):
def __init__(self, xmin, ymin, xmax, ymax, score, id, str_label=None) -> None:
self.xmin: int = xmin
self.ymin: int = ymin
self.xmax: int = xmax
Expand Down
4 changes: 2 additions & 2 deletions model_api/python/model_api/models/sam_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class SAMImageEncoder(ImageModel):
def __init__(
self,
inference_adapter: InferenceAdapter,
configuration: dict[str, Any] = dict(),
configuration: dict[str, Any] = {},
preload: bool = False,
):
super().__init__(inference_adapter, configuration, preload)
Expand Down Expand Up @@ -73,7 +73,7 @@ class SAMDecoder(SegmentationModel):
def __init__(
self,
model_adapter: InferenceAdapter,
configuration: dict[str, Any] = dict(),
configuration: dict[str, Any] = {},
preload: bool = False,
):
super().__init__(model_adapter, configuration, preload)
Expand Down
6 changes: 4 additions & 2 deletions model_api/python/model_api/models/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,10 @@ def create_hard_prediction_from_soft_prediction(
class SegmentationModel(ImageModel):
__model__ = "Segmentation"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self._check_io_number(1, (1, 2))
self.path_to_labels: str
if self.path_to_labels:
self.labels = load_labels(self.path_to_labels)

Expand Down Expand Up @@ -162,7 +163,8 @@ def get_contours(
n_layers = prediction.soft_prediction.shape[2]

if n_layers == 1:
raise RuntimeError("Cannot get contours from soft prediction with 1 layer")
msg = "Cannot get contours from soft prediction with 1 layer"
raise RuntimeError(msg)
combined_contours = []
for layer_index in range(1, n_layers): # ignoring background
label = self.get_label_name(layer_index - 1)
Expand Down
22 changes: 13 additions & 9 deletions model_api/python/model_api/models/ssd.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
class SSD(DetectionModel):
__model__ = "SSD"

def __init__(self, inference_adapter, configuration=dict(), preload=False):
def __init__(self, inference_adapter, configuration: dict = {}, preload=False):
super().__init__(inference_adapter, configuration, preload)
self.image_info_blob_name = self.image_info_blob_names[0] if len(self.image_info_blob_names) == 1 else None
self.output_parser = self._get_output_parser(self.image_blob_name)
Expand Down Expand Up @@ -70,24 +70,26 @@ def _parse_outputs(self, outputs):
def find_layer_by_name(name, layers):
suitable_layers = [layer_name for layer_name in layers if name in layer_name]
if not suitable_layers:
raise ValueError(f'Suitable layer for "{name}" output is not found')
msg = f'Suitable layer for "{name}" output is not found'
raise ValueError(msg)

if len(suitable_layers) > 1:
raise ValueError(f'More than 1 layer matched to "{name}" output')
msg = f'More than 1 layer matched to "{name}" output'
raise ValueError(msg)

return suitable_layers[0]


class SingleOutputParser:
def __init__(self, all_outputs):
if len(all_outputs) != 1:
raise ValueError("Network must have only one output.")
msg = "Network must have only one output."
raise ValueError(msg)
self.output_name, output_data = next(iter(all_outputs.items()))
last_dim = output_data.shape[-1]
if last_dim != 7:
raise ValueError(
"The last dimension of the output blob must be equal to 7, " f"got {last_dim} instead.",
)
msg = f"The last dimension of the output blob must be equal to 7, got {last_dim} instead."
raise ValueError(msg)

def __call__(self, outputs):
return [
Expand Down Expand Up @@ -134,9 +136,11 @@ def find_layer_bboxes_output(layers):
if (len(data.shape) == 2 or len(data.shape) == 3) and data.shape[-1] == 5
]
if not filter_outputs:
raise ValueError("Suitable output with bounding boxes is not found")
msg = "Suitable output with bounding boxes is not found"
raise ValueError(msg)
if len(filter_outputs) > 1:
raise ValueError("More than 1 candidate for output with bounding boxes.")
msg = "More than 1 candidate for output with bounding boxes."
raise ValueError(msg)
return filter_outputs[0]

def __call__(self, outputs):
Expand Down
3 changes: 2 additions & 1 deletion model_api/python/model_api/models/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,8 @@ def __init__(
self.choices = choices
for choice in self.choices:
if not isinstance(choice, str):
raise ValueError(f"Incorrect option in choice list - {choice}.")
msg = f"Incorrect option in choice list - {choice}."
raise ValueError(msg)

def from_str(self, value):
return value
Expand Down
Loading
Loading