Skip to content

Commit 34f4c17

Browse files
anmyachevwhitneywhtsang
authored andcommitted
[intel] update skiplist and use xfail mark after '65ad3b7'
Signed-off-by: Anatoly Myachev <[email protected]>
1 parent ca77a33 commit 34f4c17

File tree

4 files changed

+192
-125
lines changed

4 files changed

+192
-125
lines changed

python/test/unit/language/test_core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6016,7 +6016,7 @@ def compute_scratch_buffer_shape(src_layout, dst_layout, shape):
60166016
@pytest.mark.parametrize("dst_layout", filter_layouts(layouts))
60176017
def test_convert2d(M, N, src_layout, interm_layout, dst_layout, dtype, device, tmp_path: pathlib.Path):
60186018
if str(src_layout) == str(dst_layout):
6019-
pytest.skip()
6019+
pytest.xfail("Do not convert same layout")
60206020
if is_hip() or is_xpu():
60216021
try:
60226022
scratch_shape = compute_scratch_buffer_shape(src_layout, dst_layout, (M, N))

python/test/unit/language/test_matmul.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def test_simple_matmul(dtype_src_str, dtype_dst_str, BLOCK_M, BLOCK_N, BLOCK_K,
120120
if EPILOGUE_SUBTILE and not is_xpu() and (is_hip() or NUM_CTAS > 1 or BLOCK_N >= 512):
121121
pytest.skip("creates convert layout too big to fit in smem")
122122
if LAYOUT_16x256 and (not is_cuda() or torch.cuda.get_device_capability()[0] < 10):
123-
pytest.skip("skip forcing tmem layout on non blackwell targets.")
123+
pytest.xfail("skip forcing tmem layout on non blackwell targets.")
124124
M, N, K = 1024, 512, 256
125125
torch.manual_seed(42)
126126
precision = "tf32" if dtype_src_str == "tensorfloat32" else "ieee"

0 commit comments

Comments
 (0)