Skip to content

Commit f8a9353

Browse files
authored
[bug fix] Remove re-initialization of Sequential Quantizer during set_quantizer_attribute (#433)
Signed-off-by: realAsma <[email protected]>
1 parent 75ba7cd commit f8a9353

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

modelopt/torch/quantization/conversion.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
"""Quantization conversion/restore utilities."""
1717

1818
import fnmatch
19+
import warnings
1920
from collections.abc import Callable
2021
from contextlib import contextmanager
2122
from typing import Any
@@ -288,11 +289,15 @@ def set_quantizer_attribute(
288289
):
289290
continue
290291

291-
if isinstance(attribute, list):
292+
if isinstance(attribute, list) and not isinstance(module, SequentialQuantizer):
292293
parent_module = quant_model.get_submodule(name.rpartition(".")[0])
293294
module = SequentialQuantizer(*(TensorQuantizer() for _ in range(len(attribute))))
294295
setattr(parent_module, name.split(".")[-1], module)
295-
296+
elif isinstance(attribute, list) and len(attribute) != len(module):
297+
warnings.warn(
298+
f"The number of attributes ({len(attribute)}) does not match the number of "
299+
f"quantizers of {module} leading to partial assignment.",
300+
)
296301
module.set_from_attribute_config(attribute)
297302

298303

0 commit comments

Comments
 (0)