Skip to content

Commit b362180

Browse files
Diffusion tasks (#827)
* adapt to the new tasks * Update optimum/exporters/openvino/convert.py Co-authored-by: Ella Charlaix <[email protected]> * update tasks manager * fix * explicit kwargs * fix * fix model saving * fix library name overriding * fix _save_model call assesrtions * fix tasks vs model types in tests * last fix --------- Co-authored-by: Ella Charlaix <[email protected]>
1 parent d35ced8 commit b362180

File tree

8 files changed

+151
-65
lines changed

8 files changed

+151
-65
lines changed

optimum/commands/export/openvino.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -227,13 +227,19 @@ def _get_default_int4_config(model_id_or_path, library_name):
227227

228228
return _DEFAULT_4BIT_CONFIG
229229

230-
library_name = TasksManager.infer_library_from_model(self.args.model, library_name=self.args.library)
231-
if library_name == "sentence_transformers" and self.args.library is None:
232-
logger.warning(
233-
"Library name is not specified. There are multiple possible variants: `sentence_transformers`, `transformers`."
234-
"`transformers` will be selected. If you want to load your model with the `sentence-transformers` library instead, please set --library sentence_transformers"
230+
if self.args.library is None:
231+
# TODO: add revision, subfolder and token to args
232+
library_name = TasksManager._infer_library_from_model_name_or_path(
233+
model_name_or_path=self.args.model, cache_dir=self.args.cache_dir
235234
)
236-
library_name = "transformers"
235+
if library_name == "sentence_transformers":
236+
logger.warning(
237+
"Library name is not specified. There are multiple possible variants: `sentence_transformers`, `transformers`."
238+
"`transformers` will be selected. If you want to load your model with the `sentence-transformers` library instead, please set --library sentence_transformers"
239+
)
240+
library_name = "transformers"
241+
else:
242+
library_name = self.args.library
237243

238244
if self.args.weight_format is None:
239245
ov_config = None

optimum/exporters/openvino/__main__.py

Lines changed: 34 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,24 @@
4747
logger = logging.getLogger(__name__)
4848

4949

50-
def infer_task(task, model_name_or_path):
50+
def infer_task(
51+
task,
52+
model_name_or_path,
53+
subfolder: str = "",
54+
revision: Optional[str] = None,
55+
cache_dir: str = HUGGINGFACE_HUB_CACHE,
56+
token: Optional[Union[bool, str]] = None,
57+
):
5158
task = TasksManager.map_from_synonym(task)
5259
if task == "auto":
5360
try:
54-
task = TasksManager.infer_task_from_model(model_name_or_path)
61+
task = TasksManager._infer_task_from_model_name_or_path(
62+
model_name_or_path=model_name_or_path,
63+
subfolder=subfolder,
64+
revision=revision,
65+
cache_dir=cache_dir,
66+
token=token,
67+
)
5568
except KeyError as e:
5669
raise KeyError(
5770
f"The task could not be automatically inferred. Please provide the argument --task with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}"
@@ -193,19 +206,27 @@ def main_export(
193206
ov_config = OVConfig(quantization_config=q_config)
194207

195208
original_task = task
196-
task = infer_task(task, model_name_or_path)
197-
framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, framework=framework)
198-
library_name_is_not_provided = library_name is None
199-
library_name = TasksManager.infer_library_from_model(
200-
model_name_or_path, subfolder=subfolder, library_name=library_name
209+
task = infer_task(
210+
task, model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token
211+
)
212+
framework = TasksManager.determine_framework(
213+
model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token
201214
)
202215

203-
if library_name == "sentence_transformers" and library_name_is_not_provided:
204-
logger.warning(
205-
"Library name is not specified. There are multiple possible variants: `sentence_tenasformers`, `transformers`."
206-
"`transformers` will be selected. If you want to load your model with the `sentence-transformers` library instead, please set --library sentence_transformers"
216+
if library_name is None:
217+
library_name = TasksManager._infer_library_from_model_name_or_path(
218+
model_name_or_path=model_name_or_path,
219+
subfolder=subfolder,
220+
revision=revision,
221+
cache_dir=cache_dir,
222+
token=token,
207223
)
208-
library_name = "transformers"
224+
if library_name == "sentence_transformers":
225+
logger.warning(
226+
"Library name is not specified. There are multiple possible variants: `sentence_tenasformers`, `transformers`."
227+
"`transformers` will be selected. If you want to load your model with the `sentence-transformers` library instead, please set --library sentence_transformers"
228+
)
229+
library_name = "transformers"
209230

210231
do_gptq_patching = False
211232
custom_architecture = False
@@ -317,9 +338,7 @@ class StoreAttr(object):
317338
)
318339
model.config.pad_token_id = pad_token_id
319340

320-
if "stable-diffusion" in task:
321-
model_type = "stable-diffusion"
322-
elif hasattr(model.config, "export_model_type"):
341+
if hasattr(model.config, "export_model_type"):
323342
model_type = model.config.export_model_type.replace("_", "-")
324343
else:
325344
model_type = model.config.model_type.replace("_", "-")

optimum/exporters/openvino/convert.py

Lines changed: 49 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464
from transformers.modeling_utils import PreTrainedModel
6565

6666
if is_diffusers_available():
67-
from diffusers import ModelMixin
67+
from diffusers import DiffusionPipeline, ModelMixin
6868

6969
if is_tf_available():
7070
from transformers.modeling_tf_utils import TFPreTrainedModel
@@ -74,7 +74,7 @@
7474
from optimum.intel.openvino.configuration import OVConfig
7575

7676

77-
def _save_model(model, path: str, ov_config: Optional["OVConfig"] = None):
77+
def _save_model(model, path: str, ov_config: Optional["OVConfig"] = None, library_name: Optional[str] = None):
7878
compress_to_fp16 = False
7979

8080
if ov_config is not None:
@@ -90,13 +90,12 @@ def _save_model(model, path: str, ov_config: Optional["OVConfig"] = None):
9090

9191
compress_to_fp16 = ov_config.dtype == "fp16"
9292

93-
library_name = TasksManager.infer_library_from_model(Path(path).parent)
9493
model = _add_version_info_to_model(model, library_name)
9594
save_model(model, path, compress_to_fp16)
9695

9796

9897
def export(
99-
model: Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin"],
98+
model: Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"],
10099
config: OnnxConfig,
101100
output: Path,
102101
opset: Optional[int] = None,
@@ -139,7 +138,7 @@ def export(
139138
)
140139

141140
if "diffusers" in str(model.__class__) and not is_diffusers_available():
142-
raise ImportError("The pip package `diffusers` is required to export stable diffusion models to ONNX.")
141+
raise ImportError("The package `diffusers` is required to export diffusion models to OpenVINO.")
143142

144143
if stateful:
145144
# This will be checked anyway after the model conversion, but checking it earlier will save time for a user if not suitable version is used
@@ -198,7 +197,19 @@ def export_tensorflow(
198197
onnx_path = Path(output).with_suffix(".onnx")
199198
input_names, output_names = export_tensorflow_onnx(model, config, opset, onnx_path)
200199
ov_model = convert_model(str(onnx_path))
201-
_save_model(ov_model, output.parent / output, ov_config=ov_config)
200+
201+
if model.__class__.__module__.startswith("optimum"):
202+
# for wrapped models
203+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model.model)
204+
else:
205+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model)
206+
207+
_save_model(
208+
ov_model,
209+
output.parent / output,
210+
ov_config=ov_config,
211+
library_name=library_name,
212+
)
202213
return input_names, output_names, True
203214

204215

@@ -251,7 +262,19 @@ def export_pytorch_via_onnx(
251262
)
252263
torch.onnx.export = orig_torch_onnx_export
253264
ov_model = convert_model(str(onnx_output))
254-
_save_model(ov_model, output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output, ov_config=ov_config)
265+
266+
if model.__class__.__module__.startswith("optimum"):
267+
# for wrapped models
268+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model.model)
269+
else:
270+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model)
271+
272+
_save_model(
273+
ov_model,
274+
output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output,
275+
ov_config=ov_config,
276+
library_name=library_name,
277+
)
255278
return input_names, output_names, True
256279

257280

@@ -413,7 +436,18 @@ def ts_patched_forward(*args, **kwargs):
413436
if stateful:
414437
patch_stateful(model.config, ov_model)
415438

416-
_save_model(ov_model, output, ov_config=ov_config)
439+
if model.__module__.startswith("optimum"):
440+
# for wrapped models like timm in optimum.intel.openvino.modeling_timm
441+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model.model)
442+
else:
443+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model)
444+
445+
_save_model(
446+
ov_model,
447+
output,
448+
ov_config=ov_config,
449+
library_name=library_name,
450+
)
417451
clear_class_registry()
418452
del model
419453
gc.collect()
@@ -422,7 +456,7 @@ def ts_patched_forward(*args, **kwargs):
422456

423457
def export_models(
424458
models_and_export_configs: Dict[
425-
str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin"], "OnnxConfig"]
459+
str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"], "OnnxConfig"]
426460
],
427461
output_dir: Path,
428462
opset: Optional[int] = None,
@@ -491,7 +525,7 @@ def export_models(
491525

492526

493527
def export_from_model(
494-
model: Union["PreTrainedModel", "TFPreTrainedModel"],
528+
model: Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"],
495529
output: Union[str, Path],
496530
task: Optional[str] = None,
497531
ov_config: Optional["OVConfig"] = None,
@@ -505,14 +539,15 @@ def export_from_model(
505539
trust_remote_code: bool = False,
506540
**kwargs_shapes,
507541
):
542+
model_kwargs = model_kwargs or {}
543+
508544
if ov_config is not None and ov_config.quantization_config and not is_nncf_available():
509545
raise ImportError(
510546
f"Compression of the weights to {ov_config.quantization_config} requires nncf, please install it with `pip install nncf`"
511547
)
512548

513-
model_kwargs = model_kwargs or {}
514-
library_name = TasksManager._infer_library_from_model(model)
515-
TasksManager.standardize_model_attributes(model, library_name)
549+
library_name = TasksManager._infer_library_from_model_or_model_class(model=model)
550+
TasksManager.standardize_model_attributes(model)
516551

517552
if hasattr(model.config, "export_model_type"):
518553
model_type = model.config.export_model_type.replace("_", "-")
@@ -521,7 +556,7 @@ def export_from_model(
521556

522557
custom_architecture = library_name == "transformers" and model_type not in TasksManager._SUPPORTED_MODEL_TYPE
523558

524-
if task is not None:
559+
if task is not None and task != "auto":
525560
task = TasksManager.map_from_synonym(task)
526561
else:
527562
try:

optimum/intel/openvino/utils.py

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,12 @@
1818
import os
1919
from glob import glob
2020
from pathlib import Path
21-
from typing import Tuple, Union
21+
from typing import Tuple, Type, Union
2222

2323
import numpy as np
2424
from huggingface_hub import model_info
25-
from openvino.runtime import Core, Type, properties
25+
from openvino.runtime import Core, properties
26+
from openvino.runtime import Type as OVType
2627
from transformers import AutoTokenizer, CLIPTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
2728
from transformers.onnx.utils import ParameterFormat, compute_serialized_parameters_size
2829

@@ -70,19 +71,19 @@
7071

7172

7273
STR_TO_OV_TYPE = {
73-
"boolean": Type.boolean,
74-
"f16": Type.f16,
75-
"f32": Type.f32,
76-
"f64": Type.f64,
77-
"i8": Type.i8,
78-
"i16": Type.i16,
79-
"i32": Type.i32,
80-
"i64": Type.i64,
81-
"u8": Type.u8,
82-
"u16": Type.u16,
83-
"u32": Type.u32,
84-
"u64": Type.u64,
85-
"bf16": Type.bf16,
74+
"boolean": OVType.boolean,
75+
"f16": OVType.f16,
76+
"f32": OVType.f32,
77+
"f64": OVType.f64,
78+
"i8": OVType.i8,
79+
"i16": OVType.i16,
80+
"i32": OVType.i32,
81+
"i64": OVType.i64,
82+
"u8": OVType.u8,
83+
"u16": OVType.u16,
84+
"u32": OVType.u32,
85+
"u64": OVType.u64,
86+
"bf16": OVType.bf16,
8687
}
8788

8889

@@ -110,7 +111,7 @@
110111
}
111112

112113

113-
NEED_CONVERT_TO_FAST_TOKENIZER: Tuple[type(PreTrainedTokenizer)] = (CLIPTokenizer,)
114+
NEED_CONVERT_TO_FAST_TOKENIZER: Tuple[Type[PreTrainedTokenizer]] = (CLIPTokenizer,)
114115

115116

116117
def maybe_convert_tokenizer_to_fast(

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
INSTALL_REQUIRE = [
3030
"torch>=1.11",
3131
"transformers>=4.36.0,<4.43.0",
32-
"optimum>=1.21.2,<1.22.0",
32+
"optimum@git+https://github.com/huggingface/optimum.git",
3333
"datasets>=1.4.0",
3434
"sentencepiece",
3535
"setuptools",
File renamed without changes.

0 commit comments

Comments
 (0)