Skip to content

Commit e7387be

Browse files
Resolve logger warnings (#1324)
Signed-off-by: Emmanuel Ferdman <[email protected]>
1 parent c0a6eec commit e7387be

File tree

4 files changed

+4
-4
lines changed

4 files changed

+4
-4
lines changed

examples/neural_compressor/question-answering/trainer_qa.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metr
4646
and getattr(self.model.config, "framework", None) in {"pytorch", "pytorch_fx"}
4747
and self.use_cpu_amp
4848
):
49-
logger.warn(
49+
logger.warning(
5050
f"{self.model.config.framework} quantized model doesn't support BFloat16 input, setting `use_cpu_amp` to False."
5151
)
5252
self.use_cpu_amp = False

optimum/intel/neural_compressor/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -877,7 +877,7 @@ def evaluate(
877877
and getattr(self.model.config, "framework", None) in {"pytorch", "pytorch_fx"}
878878
and self.use_cpu_amp
879879
):
880-
logger.warn(
880+
logger.warning(
881881
f"{self.model.config.framework} quantized model doesn't support BFloat16 input, setting `use_cpu_amp` to False."
882882
)
883883
self.use_cpu_amp = False

optimum/intel/openvino/modeling_decoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def __init__(
158158
is_stateful_supported = ensure_stateful_is_available(warn=False)
159159

160160
if self.use_cache and not self.stateful:
161-
logger.warn(
161+
logger.warning(
162162
"Provided model does not contain state. It may lead to sub-optimal performance."
163163
"Please reexport model with updated OpenVINO version >= 2023.3.0 calling the `from_pretrained` method with original model "
164164
"and `export=True` parameter"

optimum/intel/openvino/quantization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1355,7 +1355,7 @@ def _quantize_torchmodel(
13551355
if not isinstance(quantization_config, OVQuantizationConfig):
13561356
raise ValueError(f"Unsupported type of quantization config: {type(quantization_config)}")
13571357
if stateful:
1358-
logger.warn(
1358+
logger.warning(
13591359
"Quantization algorithm does not support optimized stateful models. "
13601360
"The original model without optimization will be quantized and exported."
13611361
)

0 commit comments

Comments
 (0)