Skip to content

Commit 865a780

Browse files
unit test fixes
Signed-off-by: Brian Dellabetta <[email protected]>
1 parent 9b96b09 commit 865a780

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

tests/llmcompressor/modifiers/calibration/test_cache.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ def test_is_quantized_cache_singleton():
4747

4848

4949
def test_update():
50-
nbits = 8
51-
args = QuantizationArgs(nbits=nbits, symmetric=True)
50+
num_bits = 8
51+
args = QuantizationArgs(num_bits=num_bits, symmetric=True)
5252
cache = QuantizedKVParameterCache(args)
5353

5454
max_key_states_val = 1.0
@@ -62,7 +62,7 @@ def test_update():
6262
layer_idx = 0
6363

6464
cache.update(key_states, value_states, layer_idx)
65-
denom = (2 ** (nbits) - 1) / 2
65+
denom = (2 ** (num_bits) - 1) / 2
6666
expected_k_scale = torch.tensor([max_key_states_val / denom])
6767
expected_v_scale = torch.tensor([max_value_states_val / denom])
6868

@@ -83,8 +83,8 @@ def test_update():
8383

8484

8585
def test_cache_reset():
86-
nbits = 8
87-
args = QuantizationArgs(nbits=nbits, symmetric=True)
86+
num_bits = 8
87+
args = QuantizationArgs(num_bits=num_bits, symmetric=True)
8888
cache = QuantizedKVParameterCache(args)
8989

9090
max_key_states_val = 1.0

0 commit comments

Comments
 (0)