Skip to content

Commit 0027707

Browse files
committed
minor cleanup
Signed-off-by: Kyle Sayers <[email protected]>
1 parent 27a122f commit 0027707

File tree

1 file changed

+12
-10
lines changed

1 file changed

+12
-10
lines changed

src/llmcompressor/modifiers/quantization/calibration.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from compressed_tensors.quantization import (
66
DynamicType,
77
KVCacheScaleType,
8+
QuantizationArgs,
89
QuantizationScheme,
910
QuantizationStatus,
1011
QuantizationStrategy,
@@ -48,16 +49,17 @@ def initialize_observer(
4849
:param base_name: str used to name the observer attribute
4950
5051
"""
51-
52-
arg_name = "weights" if base_name == "weight" else f"{base_name}_activations"
53-
quantization_scheme = getattr(module, "quantization_scheme", None)
54-
if not quantization_scheme:
55-
# no quantization scheme nothing to do
56-
return
57-
58-
args = getattr(quantization_scheme, arg_name, None)
59-
# dont need observers for dynamic
60-
if args is not None and args.dynamic in (False, DynamicType.LOCAL):
52+
if base_name == "weight":
53+
arg_name = "weights"
54+
elif base_name == "output":
55+
arg_name = "output_activations"
56+
else: # input, q, k, v
57+
arg_name = "input_activations"
58+
59+
args: QuantizationArgs = getattr_chain(
60+
module, f"quantization_scheme.{arg_name}", None
61+
)
62+
if args is not None and args.dynamic is not True:
6163
observer = Observer.load_from_registry(
6264
args.observer, base_name=base_name, args=args, module=module
6365
)

0 commit comments

Comments
 (0)