Skip to content

Commit e9a7c7c

Browse files
committed
merge main
2 parents 586d2e5 + 37e76c0 commit e9a7c7c

File tree

28 files changed

+381
-1073
lines changed

28 files changed

+381
-1073
lines changed

.github/workflows/build_documentation.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ jobs:
2525
- uses: actions/checkout@v4
2626
- uses: actions/setup-node@v4
2727
with:
28-
node-version: "18"
28+
node-version: "20"
2929
cache-dependency-path: "kit/package-lock.json"
3030

3131
- name: Set up Python

.github/workflows/build_pr_documentation.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ jobs:
2626
- uses: actions/checkout@v4
2727
- uses: actions/setup-node@v4
2828
with:
29-
node-version: "18"
29+
node-version: "20"
3030
cache-dependency-path: "kit/package-lock.json"
3131

3232
- name: Set up Python

optimum/commands/export/openvino.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,8 @@
2121

2222
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
2323

24-
from ...exporters import TasksManager
25-
from ...intel.utils.import_utils import (
26-
DIFFUSERS_IMPORT_ERROR,
27-
is_diffusers_available,
28-
is_nncf_available,
29-
)
30-
from ...intel.utils.modeling_utils import _infer_library_from_model_name_or_path
31-
from ...utils.save_utils import maybe_load_preprocessors
32-
from ..base import BaseOptimumCLICommand, CommandInfo
24+
from optimum.commands.base import BaseOptimumCLICommand, CommandInfo
25+
from optimum.exporters.tasks import TasksManager
3326

3427

3528
logger = logging.getLogger(__name__)
@@ -348,9 +341,13 @@ def parse_args(parser: "ArgumentParser"):
348341
return parse_args_openvino(parser)
349342

350343
def run(self):
344+
from optimum.utils.save_utils import maybe_load_preprocessors
345+
351346
from ...exporters.openvino.__main__ import infer_task, main_export, maybe_convert_tokenizers
352347
from ...exporters.openvino.utils import save_preprocessors
353348
from ...intel.openvino.configuration import _DEFAULT_4BIT_WQ_CONFIG, OVConfig, get_default_quantization_config
349+
from ...intel.utils.import_utils import DIFFUSERS_IMPORT_ERROR, is_diffusers_available, is_nncf_available
350+
from ...intel.utils.modeling_utils import _infer_library_from_model_name_or_path
354351

355352
if self.args.library is None:
356353
# TODO: add revision, subfolder and token to args

optimum/commands/neural_compressor/base.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from .. import BaseOptimumCLICommand, CommandInfo
15+
from optimum.commands.base import BaseOptimumCLICommand, CommandInfo
16+
1617
from .quantize import INCQuantizeCommand
1718

1819

optimum/commands/neural_compressor/quantize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@
1616
from pathlib import Path
1717
from typing import TYPE_CHECKING, Optional
1818

19-
from ...exporters import TasksManager
20-
from ..base import BaseOptimumCLICommand, CommandInfo
19+
from optimum.commands.base import BaseOptimumCLICommand, CommandInfo
20+
from optimum.exporters.tasks import TasksManager
2121

2222

2323
if TYPE_CHECKING:

optimum/commands/register/register_inc.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,6 @@
1515
from ..neural_compressor.base import INCCommand
1616

1717

18-
REGISTER_COMMANDS = [INCCommand]
18+
REGISTER_COMMANDS = [
19+
INCCommand,
20+
]

optimum/commands/register/register_openvino.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,11 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from ..export import ExportCommand
15+
from optimum.commands.export.base import ExportCommand
16+
1617
from ..export.openvino import OVExportCommand
1718

1819

19-
REGISTER_COMMANDS = [(OVExportCommand, ExportCommand)]
20+
REGISTER_COMMANDS = [
21+
(OVExportCommand, ExportCommand),
22+
]

optimum/exporters/openvino/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@
2626
from transformers.utils import is_torch_available
2727

2828
from openvino import Core, Type, save_model
29-
from optimum.exporters import TasksManager
3029
from optimum.exporters.onnx.base import OnnxConfig
30+
from optimum.exporters.tasks import TasksManager
3131
from optimum.intel.utils.import_utils import (
3232
is_nncf_available,
3333
is_openvino_tokenizers_available,

optimum/exporters/openvino/convert.py

Lines changed: 17 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@
2424
from packaging.version import Version
2525
from transformers.generation import GenerationMixin
2626
from transformers.models.speecht5.modeling_speecht5 import SpeechT5HifiGan
27-
from transformers.utils import is_tf_available, is_torch_available
27+
from transformers.utils import is_torch_available
2828

2929
from openvino import Model, save_model
3030
from openvino.exceptions import OVTypeError
3131
from openvino.tools.ovc import convert_model
32-
from optimum.exporters import TasksManager
32+
from optimum.exporters.tasks import TasksManager
3333
from optimum.exporters.utils import (
3434
DECODER_NAME,
3535
ENCODER_NAME,
@@ -57,7 +57,6 @@
5757

5858
from ...intel.utils.import_utils import is_nncf_available
5959
from ...intel.utils.modeling_utils import _infer_library_from_model_or_model_class
60-
from .model_patcher import patch_model_with_bettertransformer
6160
from .stateful import (
6261
ensure_export_task_support_stateful,
6362
ensure_model_type_support_stateful,
@@ -87,9 +86,6 @@
8786
if is_diffusers_available():
8887
from diffusers import DiffusionPipeline, ModelMixin
8988

90-
if is_tf_available():
91-
from transformers.modeling_tf_utils import TFPreTrainedModel
92-
9389

9490
if TYPE_CHECKING:
9591
from optimum.exporters.onnx.base import OnnxConfig
@@ -99,7 +95,7 @@
9995
def _set_runtime_options(
10096
models_and_export_configs: Dict[
10197
str,
102-
Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"], "OnnxConfig"],
98+
Tuple[Union["PreTrainedModel", "ModelMixin", "DiffusionPipeline"], "OnnxConfig"],
10399
],
104100
task: str,
105101
library_name: str,
@@ -141,7 +137,7 @@ def _save_model(
141137

142138

143139
def export(
144-
model: Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"],
140+
model: Union["PreTrainedModel", "ModelMixin", "DiffusionPipeline"],
145141
config: "OnnxConfig",
146142
output: Path,
147143
opset: Optional[int] = None,
@@ -154,10 +150,10 @@ def export(
154150
library_name: Optional[str] = None,
155151
) -> Tuple[List[str], List[str]]:
156152
"""
157-
Exports a Pytorch or TensorFlow model to an OpenVINO Intermediate Representation.
153+
Exports a Pytorch model to an OpenVINO Intermediate Representation.
158154
159155
Args:
160-
model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
156+
model ([`PreTrainedModel`]):
161157
The model to export.
162158
config ([`~exporters.onnx.config.OnnxConfig`]):
163159
The ONNX configuration associated with the exported model.
@@ -179,10 +175,10 @@ def export(
179175
`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
180176
the ONNX configuration.
181177
"""
182-
if not (is_torch_available() or is_tf_available()):
178+
if not is_torch_available():
183179
raise ImportError(
184-
"Cannot convert because neither PyTorch nor TensorFlow are installed. "
185-
"Please install torch or tensorflow first."
180+
"Cannot convert because torch is not installed. "
181+
"Please install torch with `pip install torch` and try again."
186182
)
187183

188184
if "diffusers" in str(model.__class__) and not is_diffusers_available():
@@ -227,63 +223,8 @@ def export(
227223
patch_16bit_model=patch_16bit_model,
228224
library_name=library_name,
229225
)
230-
231-
elif is_tf_available() and issubclass(type(model), TFPreTrainedModel):
232-
output.parent.mkdir(parents=True, exist_ok=True)
233-
if opset is None:
234-
opset = config.DEFAULT_ONNX_OPSET
235-
if device == "cuda":
236-
raise RuntimeError("`tf2onnx` does not support export on CUDA device.")
237-
if input_shapes is not None:
238-
logger.info("`input_shapes` argument is not supported by the Tensorflow ONNX export and will be ignored.")
239-
return export_tensorflow(model, config, opset, output, ov_config=ov_config, library_name=library_name)
240-
241226
else:
242-
raise RuntimeError(
243-
"You either provided a PyTorch model with only TensorFlow installed, or a TensorFlow model with only PyTorch installed."
244-
)
245-
246-
247-
def export_tensorflow(
248-
model: Union["PreTrainedModel", "ModelMixin"],
249-
config: "OnnxConfig",
250-
opset: int,
251-
output: Path,
252-
ov_config: Optional["OVConfig"] = None,
253-
library_name: Optional[str] = None,
254-
):
255-
"""
256-
Export the TensorFlow model to OpenVINO format.
257-
258-
Args:
259-
model (Union[): The model to export.
260-
config (OnnxConfig): The configuration of the model.
261-
opset (int): The ONNX opset version to use.
262-
output (Path): The path to save the model.
263-
264-
Returns:
265-
input_names: list of input names from ONNX configuration
266-
output_names: list of output names from ONNX configuration
267-
bool: True if the model was exported successfully.
268-
"""
269-
from optimum.exporters.onnx.convert import export_tensorflow as export_tensorflow_onnx
270-
271-
onnx_path = Path(output).with_suffix(".onnx")
272-
input_names, output_names = export_tensorflow_onnx(model, config, opset, onnx_path)
273-
ov_model = convert_model(str(onnx_path))
274-
275-
library_name = _infer_library_from_model_or_model_class(model=model, library_name=library_name)
276-
277-
_save_model(
278-
ov_model,
279-
output.parent / output,
280-
ov_config=ov_config,
281-
library_name=library_name,
282-
config=config,
283-
)
284-
del ov_model
285-
gc.collect()
286-
return input_names, output_names, True
227+
raise RuntimeError("You either provided a non-PyTorch model or the PyTorch library is not installed.")
287228

288229

289230
def export_pytorch_via_onnx(
@@ -406,15 +347,6 @@ def export_pytorch(
406347
# TODO: temporary solution but statefulness should be added to the export config earlier
407348
config.stateful = stateful
408349

409-
if stateful:
410-
# Trigger bettertransformer together with stateful model because OpenVINO HW-dependent transformations expect
411-
# both of them are applied to demonstrate the best performance.
412-
# TODO: Consider applying bettertransformer regardless of stateful flag -- requires additional validation.
413-
model = patch_model_with_bettertransformer(model)
414-
# TODO: Consider unpatching model after export is done in the end of this function.
415-
# Now it is left as-is because the model is not expected to be used after call export_pytorch, and
416-
# this function is one of the _internal_ steps in a bigger model conversion pipeline.
417-
418350
with torch.no_grad():
419351
if hasattr(model, "config"):
420352
model.config.torchscript = False
@@ -523,7 +455,7 @@ def ts_patched_forward(*args, **kwargs):
523455

524456
def export_models(
525457
models_and_export_configs: Dict[
526-
str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"], "OnnxConfig"]
458+
str, Tuple[Union["PreTrainedModel", "ModelMixin", "DiffusionPipeline"], "OnnxConfig"]
527459
],
528460
output_dir: Path,
529461
opset: Optional[int] = None,
@@ -540,7 +472,7 @@ def export_models(
540472
Export the models to OpenVINO IR format
541473
542474
Args:
543-
models_and_export_configs (Dict[ str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin"], "OnnxConfig"]):
475+
models_and_export_configs (Dict[ str, Tuple[Union["PreTrainedModel", "ModelMixin"], "OnnxConfig"]):
544476
output_dir (Path): output directory for saving models
545477
opset (Optional[int], optional, Default to None): ONNX export opset
546478
output_names (Optional[List[str]], optional, Defaults to None): model output names
@@ -596,7 +528,7 @@ def export_models(
596528

597529

598530
def export_from_model(
599-
model: Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"],
531+
model: Union["PreTrainedModel", "ModelMixin", "DiffusionPipeline"],
600532
output: Union[str, Path],
601533
task: Optional[str] = None,
602534
ov_config: Optional["OVConfig"] = None,
@@ -720,7 +652,6 @@ def export_from_model(
720652
library_name=library_name,
721653
model_kwargs=model_kwargs,
722654
_variant="default",
723-
legacy=False,
724655
exporter="openvino",
725656
stateful=stateful,
726657
)
@@ -937,7 +868,7 @@ def _add_version_info_to_model(model: Model, library_name: Optional[str] = None)
937868

938869

939870
def _get_multi_modal_submodels_and_export_configs(
940-
model: Union["PreTrainedModel", "TFPreTrainedModel"],
871+
model: "PreTrainedModel",
941872
task: str,
942873
library_name: str,
943874
int_dtype: str,
@@ -991,7 +922,7 @@ def _get_multi_modal_submodels_and_export_configs(
991922

992923

993924
def _get_submodels_and_export_configs(
994-
model: Union["PreTrainedModel", "TFPreTrainedModel", "DiffusionPipeline"],
925+
model: Union["PreTrainedModel", "DiffusionPipeline"],
995926
task: str,
996927
monolith: bool,
997928
custom_export_configs: Dict,
@@ -1002,7 +933,6 @@ def _get_submodels_and_export_configs(
1002933
float_dtype: str = "fp32",
1003934
fn_get_submodels: Optional[Callable] = None,
1004935
preprocessors: Optional[List[Any]] = None,
1005-
legacy: bool = False,
1006936
model_kwargs: Optional[Dict] = None,
1007937
exporter: str = "openvino",
1008938
stateful: bool = False,
@@ -1032,7 +962,6 @@ def _get_submodels_and_export_configs(
1032962
float_dtype,
1033963
fn_get_submodels,
1034964
preprocessors,
1035-
legacy,
1036965
model_kwargs,
1037966
exporter,
1038967
)
@@ -1405,7 +1334,7 @@ def get_flux_models_for_export(pipeline, exporter, int_dtype, float_dtype):
14051334

14061335

14071336
def _get_encoder_decoder_stateful_models_for_export(
1408-
model: Union["PreTrainedModel", "TFPreTrainedModel"],
1337+
model: "PreTrainedModel",
14091338
task: str,
14101339
_variant: str,
14111340
library_name: str,
@@ -1421,7 +1350,6 @@ def _get_encoder_decoder_stateful_models_for_export(
14211350
int_dtype=int_dtype,
14221351
float_dtype=float_dtype,
14231352
preprocessors=preprocessors,
1424-
legacy=False,
14251353
)
14261354

14271355
export_config.variant = _variant
@@ -1444,7 +1372,7 @@ def _get_encoder_decoder_stateful_models_for_export(
14441372

14451373

14461374
def _get_speecht5_tss_model_for_export(
1447-
model: Union["PreTrainedModel", "TFPreTrainedModel"],
1375+
model: "PreTrainedModel",
14481376
task: str,
14491377
library_name: str,
14501378
int_dtype: str,
@@ -1467,7 +1395,6 @@ def _get_speecht5_tss_model_for_export(
14671395
int_dtype=int_dtype,
14681396
float_dtype=float_dtype,
14691397
preprocessors=preprocessors,
1470-
legacy=False,
14711398
)
14721399
export_config.variant = "default"
14731400

0 commit comments

Comments
 (0)