Skip to content

Commit 8e810fb

Browse files
committed
tests: rename to test_packed_asym_decompression.py
1 parent 76595d4 commit 8e810fb

File tree

1 file changed

+15
-2
lines changed

1 file changed

+15
-2
lines changed

tests/test_compressors/quantized_compressors/test_asymmetric_decompression.py renamed to tests/test_compressors/quantized_compressors/test_packed_asym_decompression.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,12 @@ def create_asymmetric_quant_config(
7676
(QuantizationStrategy.CHANNEL, None),
7777
],
7878
)
79-
def test_end_to_end_asymmetric_quantization(strategy, group_size):
79+
def test_end_to_end_asymmetric_quantization(
80+
strategy,
81+
group_size,
82+
mock_per_group_calibration,
83+
mock_per_channel_calibration,
84+
):
8085
"""
8186
Test end-to-end workflow: quantize -> compress -> save -> load -> decompress -> use
8287
"""
@@ -95,6 +100,13 @@ def test_end_to_end_asymmetric_quantization(strategy, group_size):
95100
group_size=group_size
96101
)
97102
apply_quantization_config(model, quant_config)
103+
104+
if strategy == QuantizationStrategy.GROUP:
105+
mock_per_group_calibration(model.layer1, "weight", model.layer1.weight, group_size)
106+
mock_per_group_calibration(model.layer2, "weight", model.layer2.weight, group_size)
107+
else:
108+
mock_per_channel_calibration(model.layer1, "weight", model.layer1.weight)
109+
mock_per_channel_calibration(model.layer2, "weight", model.layer2.weight)
98110

99111

100112

@@ -146,7 +158,7 @@ def test_end_to_end_asymmetric_quantization(strategy, group_size):
146158

147159

148160
@pytest.mark.parametrize("num_bits", [4, 8])
149-
def test_asymmetric_quantization_accuracy(num_bits):
161+
def test_asymmetric_quantization_accuracy(num_bits, mock_per_group_calibration):
150162
"""
151163
Test that asymmetric quantization with zero-point preserves accuracy better
152164
than symmetric quantization for biased weight distributions.
@@ -173,6 +185,7 @@ def __init__(self):
173185

174186
with torch.no_grad():
175187
model.layer.weight.copy_(biased_weights)
188+
mock_per_group_calibration(model.layer, "weight", model.layer.weight, 128)
176189

177190
compressor = PackedQuantizationCompressor(config=quant_config)
178191
quantized_modules_to_scheme = {"layer": quant_config.config_groups["group_1"]}

0 commit comments

Comments
 (0)