Skip to content

Commit 41f27f4

Browse files
[TEST] Update test_matmul.py skiplist (#4674)
pass rate: 96.19%->97.77% --------- Signed-off-by: Whitney Tsang <[email protected]>
1 parent 3797a0e commit 41f27f4

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

python/test/unit/language/test_matmul.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -111,20 +111,20 @@ def test_simple_matmul(dtype_src_str, dtype_dst_str, BLOCK_M, BLOCK_N, BLOCK_K,
111111
if is_hip() and (not is_hip_cdna3()) and dtype_src_str == "tensorfloat32":
112112
pytest.skip("tensorfloat32 is only supported on HIP CDNA3")
113113
if dtype_src_str == "float8e5" and BLOCK_K == 16:
114-
pytest.skip("Skipping cases small K for float8")
114+
pytest.xfail("Skipping cases small K for float8")
115115
if dtype_src_str == "float8e5" and device == "cuda" and torch.cuda.get_device_capability()[0] < 9:
116116
pytest.skip("Float8 requires compute capability >= 9")
117117
if (dtype_src_str == "float64") != (dtype_dst_str == "float64"):
118-
pytest.skip("Skipping unsupported case")
119-
if dtype_src_str == "float64" and not is_cuda():
118+
pytest.xfail("Skipping unsupported case")
119+
if not is_xpu() and dtype_src_str == "float64" and not is_cuda():
120120
pytest.skip("Float64 not supported on HIP yet")
121121
if "float32" in dtype_src_str and dtype_dst_str == "float16":
122-
pytest.skip("Skipping unsupported case")
122+
pytest.xfail("Skipping unsupported case")
123123
if "float32" == dtype_src_str and NUM_CTAS > 1:
124124
pytest.skip("FMA matmul not supported for multiple CTAs")
125125
if (BLOCK_M < 64 or (BLOCK_M == 64 and BLOCK_N == 16)) and NUM_CTAS > 1:
126126
pytest.skip("multi-CTAs is broken for mmav2")
127-
if EPILOGUE_SUBTILE and not is_xpu() and (is_hip() or NUM_CTAS > 1 or BLOCK_N >= 512):
127+
if not is_xpu() and EPILOGUE_SUBTILE and (is_hip() or NUM_CTAS > 1 or BLOCK_N >= 512):
128128
pytest.skip("creates convert layout too big to fit in smem")
129129
if LAYOUT_16x256 and (not is_cuda() or torch.cuda.get_device_capability()[0] < 10):
130130
pytest.xfail("skip forcing tmem layout on non blackwell targets.")

0 commit comments

Comments
 (0)