Skip to content

Commit 31c3cc2

Browse files
authored
Avoid use of deprecated openvino.runtime (#1274)
* Avoid use of deprecate openvino.runtime namespace in favor of openvino Signed-off-by: Kazantsev, Roman <[email protected]> * Fix code-style Signed-off-by: Kazantsev, Roman <[email protected]> * Fix code-style Signed-off-by: Kazantsev, Roman <[email protected]> --------- Signed-off-by: Kazantsev, Roman <[email protected]>
1 parent f027f08 commit 31c3cc2

File tree

16 files changed

+75
-77
lines changed

16 files changed

+75
-77
lines changed

optimum/exporters/openvino/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizerBase, ProcessorMixin
2626
from transformers.utils import is_torch_available
2727

28-
from openvino.runtime import Core, Type, save_model
28+
from openvino import Core, Type, save_model
2929
from optimum.exporters import TasksManager
3030
from optimum.exporters.onnx.base import OnnxConfig
3131
from optimum.exporters.onnx.constants import SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED

optimum/exporters/openvino/convert.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@
2525
from transformers.models.speecht5.modeling_speecht5 import SpeechT5HifiGan
2626
from transformers.utils import is_tf_available, is_torch_available
2727

28-
from openvino.runtime import Model, save_model
29-
from openvino.runtime.exceptions import OVTypeError
28+
from openvino import Model, save_model
29+
from openvino.exceptions import OVTypeError
3030
from openvino.tools.ovc import convert_model
3131
from optimum.exporters import TasksManager
3232
from optimum.exporters.utils import (

optimum/exporters/openvino/stateful.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from transformers import PretrainedConfig
2020

2121
import openvino as ov
22-
from openvino.runtime import opset13
22+
from openvino import opset13
2323
from optimum.intel.utils.import_utils import _openvino_version, is_openvino_version, is_transformers_version
2424

2525
from .utils import MULTI_MODAL_TEXT_GENERATION_MODELS

optimum/exporters/openvino/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@
2121
from transformers import PretrainedConfig
2222
from transformers.utils import is_torch_available
2323

24-
from openvino.runtime import Dimension, PartialShape, Symbol
25-
from openvino.runtime.utils.types import get_element_type
24+
from openvino import Dimension, PartialShape, Symbol
25+
from openvino.utils.types import get_element_type
2626
from optimum.exporters import TasksManager
2727
from optimum.exporters.onnx.base import OnnxConfig
2828
from optimum.intel.utils import is_transformers_version

optimum/intel/openvino/loaders.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919
import torch
2020
from diffusers.loaders.textual_inversion import TextualInversionLoaderMixin, load_textual_inversion_state_dicts
2121
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
22-
from openvino.runtime import Type
23-
from openvino.runtime import opset11 as ops
24-
from openvino.runtime.passes import Manager, Matcher, MatcherPass, WrapType
22+
from openvino import Type
23+
from openvino import opset11 as ops
24+
from openvino.passes import Manager, Matcher, MatcherPass, WrapType
2525
from transformers import PreTrainedTokenizer
2626

2727
from .utils import TEXTUAL_INVERSION_EMBEDDING_KEYS
@@ -81,7 +81,7 @@ def load_textual_inversion(
8181
pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
8282
token: Optional[Union[str, List[str]]] = None,
8383
tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
84-
text_encoder: Optional["openvino.runtime.Model"] = None, # noqa: F821
84+
text_encoder: Optional["openvino.Model"] = None, # noqa: F821
8585
**kwargs,
8686
):
8787
if not hasattr(self, "tokenizer"):
@@ -97,9 +97,9 @@ def load_textual_inversion(
9797
raise ValueError(
9898
f"{self.__class__.__name__} requires `self.text_encoder` for calling `{self.load_textual_inversion.__name__}`"
9999
)
100-
elif not isinstance(self.text_encoder.model, openvino.runtime.Model):
100+
elif not isinstance(self.text_encoder.model, openvino.Model):
101101
raise ValueError(
102-
f"{self.__class__.__name__} requires `self.text_encoder` of type `openvino.runtime.Model` for calling `{self.load_textual_inversion.__name__}`"
102+
f"{self.__class__.__name__} requires `self.text_encoder` of type `openvino.Model` for calling `{self.load_textual_inversion.__name__}`"
103103
)
104104

105105
# 1. Set correct tokenizer and text encoder

optimum/intel/openvino/modeling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
This model inherits from [`optimum.intel.openvino.modeling.OVBaseModel`]. Check the superclass documentation for the generic methods the
6767
library implements for all its model (such as downloading or saving)
6868
Parameters:
69-
model (`openvino.runtime.Model`): is the main class used to run OpenVINO Runtime inference.
69+
model (`openvino.Model`): is the main class used to run OpenVINO Runtime inference.
7070
config (`transformers.PretrainedConfig`): [PretrainedConfig](https://huggingface.co/docs/transformers/main_classes/configuration#transformers.PretrainedConfig)
7171
is the Model configuration class with all the parameters of the model.
7272
Initializing with a config file does not load the weights associated with the model, only the configuration.
@@ -119,7 +119,7 @@ class OVModel(OVBaseModel):
119119
base_model_prefix = "openvino_model"
120120
auto_model_class = AutoModel
121121

122-
def __init__(self, model: openvino.runtime.Model, config: transformers.PretrainedConfig = None, **kwargs):
122+
def __init__(self, model: openvino.Model, config: transformers.PretrainedConfig = None, **kwargs):
123123
super().__init__(model, config, **kwargs)
124124
# Avoid warnings when creating a transformers pipeline
125125
AutoConfig.register(self.base_model_prefix, AutoConfig)

optimum/intel/openvino/modeling_base.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ class OVBaseModel(OptimizedModel):
6767

6868
def __init__(
6969
self,
70-
model: openvino.runtime.Model,
70+
model: openvino.Model,
7171
config: PretrainedConfig = None,
7272
device: str = "CPU",
7373
dynamic_shapes: bool = True,
@@ -206,7 +206,7 @@ def dtype(self) -> Optional[torch.dtype]:
206206
return None
207207

208208
@property
209-
def ov_submodels(self) -> Dict[str, openvino.runtime.Model]:
209+
def ov_submodels(self) -> Dict[str, openvino.Model]:
210210
return {submodel_name: getattr(self, submodel_name) for submodel_name in self._ov_submodel_names}
211211

212212
@property
@@ -220,7 +220,7 @@ def _ov_submodel_names(self) -> List[str]:
220220
def load_model(
221221
file_name: Union[str, Path],
222222
quantization_config: Union[OVWeightQuantizationConfig, Dict] = None,
223-
) -> openvino.runtime.Model:
223+
) -> openvino.Model:
224224
"""
225225
Loads the model.
226226
@@ -231,7 +231,7 @@ def load_model(
231231
Quantization config to apply after model is loaded.
232232
"""
233233

234-
def fix_op_names_duplicates(model: openvino.runtime.Model):
234+
def fix_op_names_duplicates(model: openvino.Model):
235235
names = set()
236236
for op in model.get_ops():
237237
friendly_name = op.get_friendly_name()
@@ -705,7 +705,7 @@ def compile(self):
705705

706706
def _reshape(
707707
self,
708-
model: openvino.runtime.Model,
708+
model: openvino.Model,
709709
batch_size: int,
710710
sequence_length: int,
711711
height: int = None,

optimum/intel/openvino/modeling_base_seq2seq.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,9 @@ class OVBaseModelForSeq2SeqLM(OVBaseModel):
5353

5454
def __init__(
5555
self,
56-
encoder: openvino.runtime.Model,
57-
decoder: openvino.runtime.Model,
58-
decoder_with_past: openvino.runtime.Model = None,
56+
encoder: openvino.Model,
57+
decoder: openvino.Model,
58+
decoder_with_past: openvino.Model = None,
5959
config: PretrainedConfig = None,
6060
device: str = "CPU",
6161
dynamic_shapes: bool = True,
@@ -446,7 +446,7 @@ def _from_transformers(
446446
**kwargs,
447447
)
448448

449-
def _reshape(self, model: openvino.runtime.Model, batch_size: int, sequence_length: int, is_decoder=True):
449+
def _reshape(self, model: openvino.Model, batch_size: int, sequence_length: int, is_decoder=True):
450450
shapes = {}
451451
for inputs in model.inputs:
452452
shapes[inputs] = inputs.get_partial_shape()

optimum/intel/openvino/modeling_decoder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@
103103
class OVBaseDecoderModel(OVModel):
104104
def __init__(
105105
self,
106-
model: openvino.runtime.Model,
106+
model: openvino.Model,
107107
config: PretrainedConfig = None,
108108
device: str = "CPU",
109109
dynamic_shapes: bool = True,
@@ -360,7 +360,7 @@ def _from_transformers(
360360

361361
def _reshape(
362362
self,
363-
model: openvino.runtime.Model,
363+
model: openvino.Model,
364364
batch_size: int,
365365
sequence_length: int,
366366
height: int = None,

optimum/intel/openvino/modeling_diffusion.py

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@
4949
from huggingface_hub import snapshot_download
5050
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
5151
from huggingface_hub.utils import validate_hf_hub_args
52+
from openvino import Core
5253
from openvino._offline_transformations import compress_model_transformation
53-
from openvino.runtime import Core
5454
from transformers import CLIPFeatureExtractor, CLIPTokenizer
5555
from transformers.modeling_outputs import ModelOutput
5656
from transformers.utils import http_user_agent
@@ -141,14 +141,14 @@ class OVDiffusionPipeline(OVBaseModel, DiffusionPipeline):
141141
def __init__(
142142
self,
143143
scheduler: SchedulerMixin,
144-
unet: Optional[openvino.runtime.Model] = None,
145-
vae_decoder: Optional[openvino.runtime.Model] = None,
144+
unet: Optional[openvino.Model] = None,
145+
vae_decoder: Optional[openvino.Model] = None,
146146
# optional pipeline models
147-
vae_encoder: Optional[openvino.runtime.Model] = None,
148-
text_encoder: Optional[openvino.runtime.Model] = None,
149-
text_encoder_2: Optional[openvino.runtime.Model] = None,
150-
text_encoder_3: Optional[openvino.runtime.Model] = None,
151-
transformer: Optional[openvino.runtime.Model] = None,
147+
vae_encoder: Optional[openvino.Model] = None,
148+
text_encoder: Optional[openvino.Model] = None,
149+
text_encoder_2: Optional[openvino.Model] = None,
150+
text_encoder_3: Optional[openvino.Model] = None,
151+
transformer: Optional[openvino.Model] = None,
152152
# optional pipeline submodels
153153
tokenizer: Optional[CLIPTokenizer] = None,
154154
tokenizer_2: Optional[CLIPTokenizer] = None,
@@ -183,7 +183,7 @@ def __init__(
183183
)
184184

185185
main_model = unet if unet is not None else transformer
186-
if not isinstance(main_model, openvino.runtime.CompiledModel):
186+
if not isinstance(main_model, openvino.CompiledModel):
187187
raise ValueError("`compile_only` expect that already compiled model will be provided")
188188

189189
model_is_dynamic = model_has_dynamic_inputs(main_model)
@@ -278,7 +278,7 @@ def __init__(
278278
self.compile()
279279

280280
@property
281-
def ov_submodels(self) -> Dict[str, openvino.runtime.Model]:
281+
def ov_submodels(self) -> Dict[str, openvino.Model]:
282282
return {name: getattr(getattr(self, name), "model") for name in self._ov_submodel_names}
283283

284284
@property
@@ -709,7 +709,7 @@ def batch_size(self) -> int:
709709

710710
def _reshape_unet(
711711
self,
712-
model: openvino.runtime.Model,
712+
model: openvino.Model,
713713
batch_size: int = -1,
714714
height: int = -1,
715715
width: int = -1,
@@ -757,7 +757,7 @@ def _reshape_unet(
757757

758758
def _reshape_transformer(
759759
self,
760-
model: openvino.runtime.Model,
760+
model: openvino.Model,
761761
batch_size: int = -1,
762762
height: int = -1,
763763
width: int = -1,
@@ -824,17 +824,15 @@ def _reshape_transformer(
824824
model.reshape(shapes)
825825
return model
826826

827-
def _reshape_text_encoder(
828-
self, model: openvino.runtime.Model, batch_size: int = -1, tokenizer_max_length: int = -1
829-
):
827+
def _reshape_text_encoder(self, model: openvino.Model, batch_size: int = -1, tokenizer_max_length: int = -1):
830828
if batch_size != -1:
831829
shapes = {input_tensor: [batch_size, tokenizer_max_length] for input_tensor in model.inputs}
832830
model.reshape(shapes)
833831
return model
834832

835833
def _reshape_vae_encoder(
836834
self,
837-
model: openvino.runtime.Model,
835+
model: openvino.Model,
838836
batch_size: int = -1,
839837
height: int = -1,
840838
width: int = -1,
@@ -858,7 +856,7 @@ def _reshape_vae_encoder(
858856

859857
def _reshape_vae_decoder(
860858
self,
861-
model: openvino.runtime.Model,
859+
model: openvino.Model,
862860
height: int = -1,
863861
width: int = -1,
864862
num_images_per_prompt: int = -1,
@@ -1098,7 +1096,7 @@ class OVPipelinePart(ConfigMixin):
10981096

10991097
def __init__(
11001098
self,
1101-
model: openvino.runtime.Model,
1099+
model: openvino.Model,
11021100
parent_pipeline: OVDiffusionPipeline,
11031101
model_name: str = "",
11041102
):
@@ -1184,7 +1182,7 @@ def modules(self):
11841182

11851183

11861184
class OVModelTextEncoder(OVPipelinePart):
1187-
def __init__(self, model: openvino.runtime.Model, parent_pipeline: OVDiffusionPipeline, model_name: str = ""):
1185+
def __init__(self, model: openvino.Model, parent_pipeline: OVDiffusionPipeline, model_name: str = ""):
11881186
super().__init__(model, parent_pipeline, model_name)
11891187
self.hidden_states_output_names = [
11901188
name for out in self.model.outputs for name in out.names if name.startswith("hidden_states")

0 commit comments

Comments
 (0)