Skip to content

Commit 07ada34

Browse files
authored
Remove deprecated arguments (#817)
1 parent 9ef6766 commit 07ada34

File tree

2 files changed

+8
-52
lines changed

2 files changed

+8
-52
lines changed

optimum/commands/export/openvino.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -189,14 +189,6 @@ def parse_args_openvino(parser: "ArgumentParser"):
189189
action="store_true",
190190
help="Do not add converted tokenizer and detokenizer OpenVINO models.",
191191
)
192-
# TODO : deprecated
193-
optional_group.add_argument("--fp16", action="store_true", help="Compress weights to fp16")
194-
optional_group.add_argument("--int8", action="store_true", help="Compress weights to int8")
195-
optional_group.add_argument(
196-
"--convert-tokenizer",
197-
action="store_true",
198-
help="[Deprecated] Add converted tokenizer and detokenizer with OpenVINO Tokenizers.",
199-
)
200192

201193

202194
class OVExportCommand(BaseOptimumCLICommand):
@@ -243,17 +235,6 @@ def _get_default_int4_config(model_id_or_path, library_name):
243235
)
244236
library_name = "transformers"
245237

246-
if self.args.fp16:
247-
logger.warning(
248-
"`--fp16` option is deprecated and will be removed in a future version. Use `--weight-format` instead."
249-
)
250-
self.args.weight_format = "fp16"
251-
if self.args.int8:
252-
logger.warning(
253-
"`--int8` option is deprecated and will be removed in a future version. Use `--weight-format` instead."
254-
)
255-
self.args.weight_format = "int8"
256-
257238
if self.args.weight_format is None:
258239
ov_config = None
259240
elif self.args.weight_format in {"fp16", "fp32"}:
@@ -296,9 +277,6 @@ def _get_default_int4_config(model_id_or_path, library_name):
296277
quantization_config["group_size"] = 128 if "128" in self.args.weight_format else 64
297278
ov_config = OVConfig(quantization_config=quantization_config)
298279

299-
if self.args.convert_tokenizer:
300-
logger.warning("`--convert-tokenizer` option is deprecated. Tokenizer will be converted by default.")
301-
302280
quantization_config = ov_config.quantization_config if ov_config else None
303281
quantize_with_dataset = quantization_config and getattr(quantization_config, "dataset", None) is not None
304282
task = infer_task(self.args.task, self.args.model)

optimum/intel/openvino/quantization.py

Lines changed: 8 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -193,11 +193,6 @@ def __init__(self, model: transformers.PreTrainedModel, task: Optional[str] = No
193193
signature = inspect.signature(self.model.forward)
194194
self._signature_columns = list(signature.parameters.keys())
195195

196-
@property
197-
def input_names(self):
198-
logger.warning("The`input_names` attribute is deprecated and will be removed in v1.18.0")
199-
return None
200-
201196
@classmethod
202197
def from_pretrained(cls, model: PreTrainedModel, **kwargs):
203198
# TODO : Create model
@@ -212,7 +207,6 @@ def quantize(
212207
batch_size: int = 1,
213208
data_collator: Optional[DataCollator] = None,
214209
remove_unused_columns: bool = True,
215-
weights_only: bool = None,
216210
**kwargs,
217211
):
218212
"""
@@ -235,10 +229,6 @@ def quantize(
235229
The function to use to form a batch from a list of elements of the calibration dataset.
236230
remove_unused_columns (`bool`, defaults to `True`):
237231
Whether to remove the columns unused by the model forward method.
238-
weights_only (`bool`, *optional*):
239-
Being deprecated.
240-
Compress weights to integer precision (8-bit by default) while keeping activations
241-
floating-point. Fits best for LLM footprint reduction and performance acceleration.
242232
243233
Examples:
244234
```python
@@ -263,32 +253,20 @@ def quantize(
263253
>>> optimized_model = OVModelForSequenceClassification.from_pretrained("./quantized_model")
264254
```
265255
"""
266-
# TODO: deprecate weights_only argument
267-
if weights_only is not None:
268-
logger.warning(
269-
"`weights_only` argument is deprecated and will be removed in v1.18.0. In the future please provide `ov_config.quantization_config` "
270-
"as an instance of `OVWeightQuantizationConfig` for weight-only compression or as an instance of `OVQuantizationConfig` for full model quantization."
271-
)
272-
273256
if ov_config is None:
274257
ov_config = OVConfig()
275258
if not isinstance(ov_config, OVConfig):
276259
raise TypeError(f"`ov_config` should be an `OVConfig`, but got: {type(ov_config)} instead.")
277260
quantization_config = ov_config.quantization_config
278261
if quantization_config is None:
279-
if (weights_only is None or weights_only is True) and calibration_dataset is None:
280-
if weights_only is None:
281-
logger.info(
282-
"`quantization_config` was not provided, 8-bit asymmetric weight quantization will be applied."
283-
)
284-
ov_config.quantization_config = OVWeightQuantizationConfig(bits=8)
285-
else:
286-
logger.warning(
287-
"`quantization_config` was not provided, but calibration dataset was provided, assuming full "
288-
"model quantization is intended. In the future, please provide `quantization_config` as an "
289-
"instance of OVQuantizationConfig."
290-
)
291-
ov_config.quantization_config = OVQuantizationConfig()
262+
logger.warning(
263+
"`quantization_config` was not provided. In the future, please provide `quantization_config`"
264+
)
265+
ov_config.quantization_config = (
266+
OVWeightQuantizationConfig(bits=8)
267+
if calibration_dataset is None
268+
else OVWeightQuantizationConfig(bits=8)
269+
)
292270

293271
if isinstance(self.model, OVBaseModel):
294272
self._quantize_ovbasemodel(

0 commit comments

Comments
 (0)