Skip to content

Commit af5a09a

Browse files
[TEST] Skip test_host_tensor_descriptor_load
1 parent 9a3f308 commit af5a09a

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

python/test/unit/language/test_tensor_descriptor.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1608,7 +1608,9 @@ def alloc_fn(size: int, align: int, stream: Optional[int]):
16081608
@pytest.mark.parametrize("M_BLOCK,N_BLOCK", [(2, 16), (8, 16), (8, 32), (8, 128)])
16091609
def test_host_tensor_descriptor_load(dtype_str, num_ctas, M_BLOCK, N_BLOCK, device):
16101610
if num_ctas == 2 and (not is_cuda() or torch.cuda.get_device_capability(0)[0] not in (9, 10)):
1611-
pytest.skip("CTAs is unsupported for these cards")
1611+
pytest.xfail("CTAs is unsupported for these cards")
1612+
if is_xpu():
1613+
pytest.skip("FIXME: issue #4289")
16121614

16131615
@triton.jit(debug=True)
16141616
def kernel(out_ptr, desc, M, N, M_BLOCK: tl.constexpr, N_BLOCK: tl.constexpr):
@@ -1668,10 +1670,12 @@ def matmul_kernel_host_tensor_descriptor(a_desc, b_desc, c_desc):
16681670
])
16691671
def test_host_tensor_descriptor_matmul(num_stages, num_ctas, BLOCK_M, BLOCK_N, BLOCK_K, device):
16701672
if num_ctas == 2 and (not is_cuda() or torch.cuda.get_device_capability(0)[0] not in (9, 10)):
1671-
pytest.skip("CTAs is unsupported for these cards")
1673+
pytest.xfail("CTAs is unsupported for these cards")
16721674

16731675
if is_hip() and (BLOCK_M, BLOCK_N, BLOCK_K, num_stages) == (256, 128, 32, 4):
16741676
pytest.skip("Insufficient shared memory on HIP devices")
1677+
if is_xpu():
1678+
pytest.skip("FIXME: issue #4289")
16751679

16761680
if is_interpreter():
16771681
M, N, K = BLOCK_M, BLOCK_N, BLOCK_K

0 commit comments

Comments
 (0)