Skip to content

Commit 903f900

Browse files
Python Model API: update docstrings for Model, ImageModel, DetectionModel wrappers (#3375)
* Modify docstrings for Model, ImageModel, DetectionModel classes in Python Model API * Small fixes * suggestions: use inputs/outputs, remove by wrapper * Apply suggestions
1 parent f5386a7 commit 903f900

File tree

6 files changed

+211
-89
lines changed

6 files changed

+211
-89
lines changed

demos/common/python/openvino/model_zoo/model_api/adapters/model_adapter.py

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class ModelAdapter(metaclass=abc.ABCMeta):
3535
3636
- Reading the model from disk or other place
3737
- Loading the model to the device
38-
- Accessing the information about input/output layers
38+
- Accessing the information about inputs/outputs
3939
- The model reshaping
4040
- Synchronous model inference
4141
- Asynchronous model inference
@@ -58,34 +58,36 @@ def load_model(self):
5858
@abc.abstractmethod
5959
def get_input_layers(self):
6060
'''
61-
Gets the names of model input layers and for each layer creates the Metadata structure,
62-
which contains the information about the layer shape, blob precision in OpenVINO format, meta (optional)
61+
Gets the names of model inputs and for each one creates the Metadata structure,
62+
which contains the information about the input shape, layout, precision
63+
in OpenVINO format, meta (optional)
6364
6465
Returns:
65-
- the dict containing Metadata for all input layers
66+
- the dict containing Metadata for all inputs
6667
'''
6768

6869
@abc.abstractmethod
6970
def get_output_layers(self):
7071
'''
71-
Gets the names of model output layers and for each layer creates the Metadata structure,
72-
which contains the information about the layer shape, blob precision in OpenVINO format, meta (optional)
72+
Gets the names of model outputs and for each one creates the Metadata structure,
73+
which contains the information about the output shape, layout, precision
74+
in OpenVINO format, meta (optional)
7375
7476
Returns:
75-
- the dict containing Metadata for all output layers
77+
- the dict containing Metadata for all outputs
7678
'''
7779

7880
@abc.abstractmethod
7981
def reshape_model(self, new_shape):
8082
'''
81-
Reshapes the model input layers to fit the new input shape.
83+
Reshapes the model inputs to fit the new input shape.
8284
8385
Args:
84-
- new_shape(dict): the dictionary with input layers as keys and
86+
- new_shape (dict): the dictionary with inputs names as keys and
8587
list of new shape as values in the following format:
8688
{
87-
'input_layer_1': [1, 128, 128, 3],
88-
'input_layer_2': [1, 128, 128, 3],
89+
'input_layer_name_1': [1, 128, 128, 3],
90+
'input_layer_name_2': [1, 128, 128, 3],
8991
...
9092
}
9193
'''
@@ -98,16 +100,16 @@ def infer_sync(self, dict_data):
98100
Args:
99101
- dict_data: it's submitted to the model for inference and has the following format:
100102
{
101-
'input_layer_1': data_1,
102-
'input_layer_2': data_2,
103+
'input_layer_name_1': data_1,
104+
'input_layer_name_2': data_2,
103105
...
104106
}
105107
106108
Returns:
107-
- raw result(dict) - model raw output in the following format:
109+
- raw result (dict) - model raw output in the following format:
108110
{
109-
'output_layer_1': raw_result_1,
110-
'output_layer_2': raw_result_2,
111+
'output_layer_name_1': raw_result_1,
112+
'output_layer_name_2': raw_result_2,
111113
...
112114
}
113115
'''
@@ -123,8 +125,8 @@ def infer_async(self, dict_data, callback_fn, callback_data):
123125
Args:
124126
- dict_data: it's submitted to the model for inference and has the following format:
125127
{
126-
'input_layer_1': data_1,
127-
'input_layer_2': data_2,
128+
'input_layer_name_1': data_1,
129+
'input_layer_name_2': data_2,
128130
...
129131
}
130132
- callback_fn: the callback function, which is defined outside the adapter

demos/common/python/openvino/model_zoo/model_api/adapters/openvino_adapter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ def create_core():
3838

3939

4040
class OpenvinoAdapter(ModelAdapter):
41-
"""
41+
'''
4242
Works with OpenVINO model
43-
"""
43+
'''
4444

4545
def __init__(self, core, model_path, weights_path=None, model_parameters = {}, device='CPU', plugin_config=None, max_num_requests=0):
4646
self.core = core

demos/common/python/openvino/model_zoo/model_api/adapters/ovms_adapter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@
2828

2929

3030
class OVMSAdapter(ModelAdapter):
31-
"""
31+
'''
3232
Class that allows working with models served by the OpenVINO Model Server
33-
"""
33+
'''
3434

3535
tf2ov_precision = {
3636
"DT_INT64": "I64",

demos/common/python/openvino/model_zoo/model_api/models/detection_model.py

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -19,30 +19,31 @@
1919

2020

2121
class DetectionModel(ImageModel):
22-
'''An abstract detection model class
22+
'''An abstract wrapper for object detection model
2323
24-
This class supports detection models. The Detection Model must have single image input.
24+
The DetectionModel must have a single image input.
25+
It inherits `preprocess` from `ImageModel` wrapper. Also, it defines `_resize_detections` method,
26+
which should be used in `postprocess`, to clip bounding boxes and resize ones to original image shape.
2527
26-
Attributes:
27-
labels(List[str]): list of labels for classes (could be None)
28-
threshold(float): threshold for detection filtering, any detection with confidence less than this value
29-
should be omitted in ``posptrocess`` method (0<=thresold<=1.0 for most models)
30-
iou_threshold(float): threshold for NMS detection filtering
28+
The `postprocess` method must be implemented in a specific inherited wrapper.
3129
'''
3230

3331
def __init__(self, model_adapter, configuration=None, preload=False):
34-
'''The Detection Model constructor
32+
'''Detection Model constructor
3533
36-
Calls the ``ImageModel`` construtor first.
34+
It extends the `ImageModel` construtor.
3735
3836
Args:
39-
labels(Iterable[str], str, Path): list of labels for detection classes or path to file with them
40-
threshold(float): threshold for detections filtering by confidence
41-
iou_threshold(float): threshold for NMS filtering
37+
model_adapter (ModelAdapter): allows working with the specified executor
38+
configuration (dict, optional): it contains values for parameters accepted by specific
39+
wrapper (`confidence_threshold`, `labels` etc.) which are set as data attributes
40+
preload (bool, optional): a flag whether the model is loaded to device while
41+
initialization. If `preload=False`, the model must be loaded via `load` method before inference
4242
4343
Raises:
44-
WrapperError: If loaded model has more than one image inputs
44+
WrapperError: if the model has more than 1 image inputs
4545
'''
46+
4647
super().__init__(model_adapter, configuration, preload)
4748

4849
if not self.image_blob_name:
@@ -66,25 +67,25 @@ def parameters(cls):
6667
return parameters
6768

6869
def _resize_detections(self, detections, meta):
69-
'''Resizes detection bounding boxes according to initial image size
70+
'''Resizes detection bounding boxes according to initial image shape.
7071
71-
Implements resize operations for different image resize types (see ``ImageModel`` class for details).
72-
Applies clipping bounding box to original image size.
72+
It implements image resizing depending on the set `resize_type`(see `ImageModel` for details).
73+
Next, it applies bounding boxes clipping.
7374
7475
Args:
75-
detections(List[Detection]): list of detections with coordinates in normalized form
76-
meta: meta information with fields `resized_shape` and `original_shape`
76+
detections (List[Detection]): list of detections with coordinates in normalized form
77+
meta (dict): the input metadata obtained from `preprocess` method
7778
7879
Returns:
79-
List of detections fit to initial image (resized and clipped)
80+
- list of detections with resized and clipped coordinates fit to initial image
8081
8182
Raises:
82-
WrapperError: If model uses custom resize or `resize_type` not set
83+
WrapperError: If the model uses custom resize or `resize_type` is not set
8384
'''
8485
resized_shape = meta['resized_shape']
8586
original_shape = meta['original_shape']
8687

87-
if self.resize_type=='fit_to_window_letterbox':
88+
if self.resize_type == 'fit_to_window_letterbox':
8889
detections = resize_detections_letterbox(detections, original_shape[1::-1], resized_shape[1::-1])
8990
elif self.resize_type == 'fit_to_window':
9091
detections = resize_detections_with_aspect_ratio(detections, original_shape[1::-1], resized_shape[1::-1], (self.w, self.h))

demos/common/python/openvino/model_zoo/model_api/models/image_model.py

Lines changed: 55 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -22,25 +22,38 @@
2222
class ImageModel(Model):
2323
'''An abstract wrapper for an image-based model
2424
25-
An image-based model is a model which has one or more inputs with image - 4D tensors with NHWC or NCHW layout.
26-
Also it may support additional inputs - 2D tensor.
27-
Implements basic preprocessing for image: resizing and aligning to model input.
25+
The ImageModel has 1 or more inputs with images - 4D tensors with NHWC or NCHW layout.
26+
It may support additional inputs - 2D tensors.
27+
28+
The ImageModel implements basic preprocessing for an image provided as model input.
29+
See `preprocess` description.
30+
31+
The `postprocess` method must be implemented in a specific inherited wrapper.
2832
2933
Attributes:
30-
resize_type(str): one of the preimplemented resize types
31-
image_blob_names(List[str]): names of all image-like inputs (4D tensors)
32-
image_info_blob_names(List[str]): names of all secondary inputs (2D tensors)
33-
image_blob_name(str): name of image input (None, if they are many)
34+
image_blob_names (List[str]): names of all image-like inputs (4D tensors)
35+
image_info_blob_names (List[str]): names of all secondary inputs (2D tensors)
36+
image_blob_name (str): name of the first image input
37+
nchw_layout (bool): a flag whether the model input layer has NCHW layout
38+
resize_type (str): the type for image resizing (see `RESIZE_TYPE` for info)
39+
resize (function): resizing function corresponding to the `resize_type`
40+
input_transform (InputTransform): instance of the `InputTransform` for image normalization
3441
'''
3542

3643
def __init__(self, model_adapter, configuration=None, preload=False):
3744
'''Image model constructor
3845
39-
Calls the `Model` constructor first
46+
It extends the `Model` constructor.
4047
4148
Args:
42-
model_adapter(ModelAdapter): allows working with the specified executor
43-
resize_type(str): sets the type for image resizing (see ``RESIZE_TYPE`` for info)
49+
model_adapter (ModelAdapter): allows working with the specified executor
50+
configuration (dict, optional): it contains values for parameters accepted by specific
51+
wrapper (`confidence_threshold`, `labels` etc.) which are set as data attributes
52+
preload (bool, optional): a flag whether the model is loaded to device while
53+
initialization. If `preload=False`, the model must be loaded via `load` method before inference
54+
55+
Raises:
56+
WrapperError: if the wrapper failed to define appropriate inputs for images
4457
'''
4558
super().__init__(model_adapter, configuration, preload)
4659
self.image_blob_names, self.image_info_blob_names = self._get_inputs()
@@ -75,6 +88,15 @@ def parameters(cls):
7588
return parameters
7689

7790
def _get_inputs(self):
91+
'''Defines the model inputs for images and additional info.
92+
93+
Raises:
94+
WrapperError: if the wrapper failed to define appropriate inputs for images
95+
96+
Returns:
97+
- list of inputs names for images
98+
- list of inputs names for additional info
99+
'''
78100
image_blob_names, image_info_blob_names = [], []
79101
for name, metadata in self.inputs.items():
80102
if len(metadata.shape) == 4:
@@ -90,24 +112,27 @@ def _get_inputs(self):
90112
def preprocess(self, inputs):
91113
'''Data preprocess method
92114
93-
Performs some basic preprocessing with single image:
94-
- resizing to net input size
95-
- applying tranform orerations: mean and scale values, BGR-RGB conversions
96-
- changing layout according to net input layout
115+
It performs basic preprocessing of a single image:
116+
- Resizes the image to fit the model input size via the defined resize type
117+
- Normalizes the image: subtracts means, divides by scales, switch channels BGR-RGB
118+
- Changes the image layout according to the model input layout
97119
98-
Adds the size of initial image and after resizing to metadata as `original_shape` and `resized_shape`
99-
correspondenly.
120+
Also, it keeps the size of original image and resized one as `original_shape` and `resized_shape`
121+
in the metadata dictionary.
100122
101123
Note:
102-
This method supports only models with single image input. If model has more image inputs
103-
or has additional support inputs, their preprocessing should be implemented in concrete class
124+
It supports only models with single image input. If the model has more image inputs or has
125+
additional supported inputs, the `preprocess` should be overloaded in a specific wrapper.
104126
105127
Args:
106-
inputs: single image as 3D array in HWC layout
128+
inputs (ndarray): a single image as 3D array in HWC layout
107129
108130
Returns:
109-
- the dict with preprocessed image data
110-
- The dict with metadata
131+
- the preprocessed image in the following format:
132+
{
133+
'input_layer_name': preprocessed_image
134+
}
135+
- the input metadata, which might be used in `postprocess` method
111136
'''
112137
image = inputs
113138
meta = {'original_shape': image.shape}
@@ -121,8 +146,16 @@ def preprocess(self, inputs):
121146
return dict_inputs, meta
122147

123148
def _change_layout(self, image):
149+
'''Changes the input image layout to fit the layout of the model input layer.
150+
151+
Args:
152+
inputs (ndarray): a single image as 3D array in HWC layout
153+
154+
Returns:
155+
- the image with layout aligned with the model layout
156+
'''
124157
if self.nchw_layout:
125-
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
158+
image = image.transpose((2, 0, 1)) # HWC->CHW
126159
image = image.reshape((1, self.c, self.h, self.w))
127160
else:
128161
image = image.reshape((1, self.h, self.w, self.c))

0 commit comments

Comments
 (0)