From 629a7faf13d70bef57aea10f9c813b063fd2a778 Mon Sep 17 00:00:00 2001 From: Ella Charlaix Date: Wed, 29 Oct 2025 17:26:33 +0100 Subject: [PATCH 1/3] Fix TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS --- optimum/exporters/openvino/model_configs.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/optimum/exporters/openvino/model_configs.py b/optimum/exporters/openvino/model_configs.py index b8ffcdcf26..6c1797e937 100644 --- a/optimum/exporters/openvino/model_configs.py +++ b/optimum/exporters/openvino/model_configs.py @@ -198,10 +198,6 @@ def init_model_configs(): "AutoModelForImageTextToText", ) - TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS[ - "image-text-to-text" - ] = TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"] - TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["video-text-to-text"] = "AutoModelForVision2Seq" if is_diffusers_available() and "fill" not in TasksManager._DIFFUSERS_TASKS_TO_MODEL_LOADERS: From ebdb168879d1d0959e9c4eaa5a6868df532781bf Mon Sep 17 00:00:00 2001 From: Ella Charlaix Date: Wed, 29 Oct 2025 18:49:24 +0100 Subject: [PATCH 2/3] remove quantizer set task --- optimum/intel/openvino/quantization.py | 33 ++++++++------------------ 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 546f87f345..8a100b83fa 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -40,11 +40,9 @@ from transformers.pytorch_utils import Conv1D from transformers.utils import is_accelerate_available -from optimum.exporters.tasks import TasksManager from optimum.quantization_base import OptimumQuantizer from optimum.utils.logging import warn_once -from ..utils.constant import _TASK_ALIASES from ..utils.import_utils import ( DATASETS_IMPORT_ERROR, _nncf_version, @@ -1142,15 +1140,20 @@ def __init__(self, model: OVModel, task: Optional[str] = None, seed: int = 42, * Args: model (`OVModel`): The [OVModel](https://huggingface.co/docs/optimum-intel/en/openvino/reference) to quantize. - task (`str`, defaults to None): - The task defining the model topology used for the ONNX export. seed (`int`, defaults to 42): The random seed to use when shuffling the calibration dataset. """ super().__init__() self.model = model - self.task = task self.dataset_builder = OVCalibrationDatasetBuilder(model, seed) + self.task = task + if self.task is not None: + logger.warning(f"The `task` argument is ignored and will be remvoed in optimum-intel v1.27") + + @property + def task(self) -> Dict[str, Union[openvino.Model, openvino.runtime.CompiledModel]]: + logger.warning("The `task` attribute is deprecated and will be removed in v1.27.") + return self.task @classmethod def from_pretrained(cls, model: OVModel, **kwargs): @@ -1196,7 +1199,7 @@ def quantize( >>> from optimum.intel import OVQuantizer, OVModelForCausalLM >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b") - >>> quantizer = OVQuantizer.from_pretrained(model, task="text-generation") + >>> quantizer = OVQuantizer.from_pretrained(model) >>> ov_config = OVConfig(quantization_config=OVWeightQuantizationConfig()) >>> quantizer.quantize(ov_config=ov_config, save_directory="./quantized_model") >>> optimized_model = OVModelForCausalLM.from_pretrained("./quantized_model") @@ -1208,7 +1211,7 @@ def quantize( >>> model = OVModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english", export=True) >>> # or >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") - >>> quantizer = OVQuantizer.from_pretrained(model, task="text-classification") + >>> quantizer = OVQuantizer.from_pretrained(model) >>> ov_config = OVConfig(quantization_config=OVQuantizationConfig()) >>> quantizer.quantize(calibration_dataset=dataset, ov_config=ov_config, save_directory="./quantized_model") >>> optimized_model = OVModelForSequenceClassification.from_pretrained("./quantized_model") @@ -1454,22 +1457,6 @@ def _save_pretrained(model: openvino.Model, output_path: str): compress_quantize_weights_transformation(model) openvino.save_model(model, output_path, compress_to_fp16=False) - def _set_task(self): - if self.task is None: - self.task = TasksManager.infer_task_from_model(self.model.config._name_or_path) - if self.task is None: - raise ValueError( - "The task defining the model topology could not be extracted and needs to be specified for the ONNX export." - ) - - self.task = _TASK_ALIASES.get(self.task, self.task) - - if self.task == "text2text-generation": - raise ValueError("Seq2Seq models are currently not supported for post-training static quantization.") - - if self.task == "image-to-text": - raise ValueError("Image2Text models are currently not supported for post-training static quantization.") - def get_calibration_dataset( self, dataset_name: str, From 56520ad1a00aac803cf203526dedf56dfa2a174a Mon Sep 17 00:00:00 2001 From: Ella Charlaix Date: Thu, 30 Oct 2025 14:21:41 +0100 Subject: [PATCH 3/3] add test with transformers v4.46 --- .github/workflows/test_openvino.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_openvino.yml b/.github/workflows/test_openvino.yml index eaacbbb630..f96ae68488 100644 --- a/.github/workflows/test_openvino.yml +++ b/.github/workflows/test_openvino.yml @@ -37,7 +37,7 @@ jobs: "*diffusion*", "*quantization*", ] - transformers-version: ["4.45.0", "latest"] + transformers-version: ["4.45.0", "4.46.0", "latest"] runs-on: ubuntu-22.04