Skip to content

Commit 9be16bc

Browse files
Fix hadamard and FP4 gpu tests
Signed-off-by: Keval Morabia <[email protected]>
1 parent 0bea1c3 commit 9be16bc

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

tests/gpu/torch/quantization/test_hadamard.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def test_hadamard_transform(dim):
3939
xxt = x @ x.T
4040
x_h = normalized_hadamard_transform(x)
4141
xxt_h = x_h @ x_h.T
42-
assert torch.allclose(xxt_h, xxt, atol=1e-5)
42+
assert torch.allclose(xxt_h, xxt, atol=1e-3)
4343

4444

4545
def test_kv_rotate():
@@ -59,7 +59,7 @@ def test_kv_rotate():
5959
},
6060
):
6161
output_test = model(dummy_input)
62-
assert torch.allclose(output_ref, output_test, atol=1e-5)
62+
assert torch.allclose(output_ref, output_test, atol=1e-3)
6363

6464
set_quantizer_by_cfg(
6565
model,

tests/gpu/torch/quantization/test_tensor_quant_cuda.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ def _test_fp4_kernel(test_in, test_out):
231231
assert torch.allclose(quantized_outputs, expected_outputs)
232232
if triton_kernel.IS_AVAILABLE:
233233
quantized_outputs_triton = triton_kernel.fp4_fake_quant_block(
234-
inputs, inputs.abs().amax().item()
234+
inputs, inputs.abs().amax()
235235
)
236236
assert torch.allclose(quantized_outputs_triton, expected_outputs)
237237

0 commit comments

Comments
 (0)