Skip to content

Commit fd73ced

Browse files
committed
minimal diff
Signed-off-by: shanjiaz <[email protected]>
1 parent 35e5442 commit fd73ced

File tree

2 files changed

+3
-7
lines changed

2 files changed

+3
-7
lines changed

tests/test_compressors/model_compressors/test_model_compressor.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -572,12 +572,9 @@ def test_decompress_model(model_stub, comp_stub):
572572
# equivalent to decompressing from disk
573573
assert decompressed.keys() == true_decompressed.keys()
574574
for key in decompressed.keys():
575-
# Skip dtype check for weight_shape - int32/int64 are functionally equivalent
576-
# torch.Size() works identically with both, old checkpoints use int64, new use int32
577-
if not key.endswith("weight_shape"):
578-
assert (
579-
decompressed[key].dtype == true_decompressed[key].dtype
580-
), f"{key} dtypes not equal"
575+
assert (
576+
decompressed[key].dtype == true_decompressed[key].dtype
577+
), f"{key} dtypes not equal"
581578
assert torch.all(
582579
decompressed[key] == true_decompressed[key]
583580
), f"{key} values not equal"

tests/test_compressors/quantized_compressors/test_packed_asym_decompression.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,6 @@ def test_end_to_end_asymmetric_quantization(
114114
# Verify compression created zero-point parameters
115115
assert hasattr(model.layer1, "weight_zero_point")
116116
assert hasattr(model.layer2, "weight_zero_point")
117-
# For asymmetric GROUP/CHANNEL quantization, zero_point should be packed to int32
118117
assert model.layer1.weight_zero_point.dtype == torch.int32
119118
assert model.layer2.weight_zero_point.dtype == torch.int32
120119

0 commit comments

Comments
 (0)