Skip to content

Commit fa9facf

Browse files
Tests: xfail opcheck for 4bit quantization with floating storage dtypes
1 parent a2a74ed commit fa9facf

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

tests/test_ops.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,8 @@ def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize
167167
assert absmax.device == A.device
168168
assert absmax.dtype == torch.float32
169169

170-
# TODO: Enable it
171-
if device in ("cpu", "xpu") and storage_dtype == torch.bfloat16:
172-
pytest.skip("CPU bf16 storage_dtype will fail on torch op check")
170+
if storage_dtype != torch.uint8:
171+
pytest.xfail("opcheck fails for storage_dtype != torch.uint8")
173172

174173
opcheck(torch.ops.bitsandbytes.quantize_4bit, (A, blocksize, quant_type, storage_dtype))
175174

0 commit comments

Comments
 (0)