Skip to content

Commit 30f40b0

Browse files
committed
fix tests
Signed-off-by: shanjiaz <[email protected]>
1 parent 7dc3c9f commit 30f40b0

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

tests/test_compressors/quantized_compressors/test_pack_quant.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,8 @@ def test_quant_format(shape):
8888
dense_state_dict, names_to_scheme=quantized_modules_to_scheme
8989
)
9090

91-
# compressed state_dict adds one entry for shape
92-
# but removes the zero points since we are symmetric
93-
assert len(dense_state_dict) == len(compressed_state_dict)
91+
# compressed state_dict adds one entry for shape and keeps zero_point
92+
assert len(dense_state_dict) + 1 == len(compressed_state_dict)
9493

9594
# check compressed and packed
9695
assert compressed_state_dict["dummy.weight_packed"].dtype == torch.int32

tests/test_compressors/quantized_compressors/test_packed_asym_decompression.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def test_end_to_end_asymmetric_quantization(
114114
# Verify compression created zero-point parameters
115115
assert hasattr(model.layer1, "weight_zero_point")
116116
assert hasattr(model.layer2, "weight_zero_point")
117+
# For asymmetric GROUP/CHANNEL quantization, zero_point should be packed to int32
117118
assert model.layer1.weight_zero_point.dtype == torch.int32
118119
assert model.layer2.weight_zero_point.dtype == torch.int32
119120

0 commit comments

Comments
 (0)