Skip to content

Commit 61625fc

Browse files
[TEST] Mark expected failures as xfail
Signed-off-by: Whitney Tsang <[email protected]>
1 parent 0bb2bc0 commit 61625fc

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

python/test/unit/language/test_core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1731,7 +1731,7 @@ def kernel(X, val, NUM: tl.constexpr):
17311731
def test_tensor_atomic_add_access_patterns(shape, idx_order, mask_step, num_ctas, dtype_x_str, device):
17321732
check_type_supported(dtype_x_str, device)
17331733
if is_interpreter():
1734-
pytest.skip("not supported in the interpreter")
1734+
pytest.xfail("not supported in the interpreter")
17351735

17361736
@triton.jit
17371737
def kernel(in_ptr, idx_ptr, out_ptr, shape0, shape1, mask_step, XBLOCK: tl.constexpr):

python/test/unit/language/test_matmul.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -892,8 +892,6 @@ def mxfp8_mxfp4_matmul( #
892892
@pytest.mark.parametrize("nonKDim", ([0, 16, 32] if is_hip_cdna() else []))
893893
def test_mxfp8_mxfp4_matmul(M, N, K, BLOCK_M, BLOCK_N, BLOCK_K, NUM_STAGES, B_TRANS, CONST_SCALE, A_DATA_TYPE,
894894
B_DATA_TYPE, WITH_A_SCALE, WITH_B_SCALE, nonKDim, device):
895-
if is_xpu():
896-
pytest.skip("FIXME: failed to legalize operation 'tt.dot_scaled' on XPU")
897895
if is_cuda():
898896
if torch.cuda.get_device_capability()[0] < 10:
899897
pytest.skip("Requires compute capability >= 10")
@@ -912,7 +910,10 @@ def test_mxfp8_mxfp4_matmul(M, N, K, BLOCK_M, BLOCK_N, BLOCK_K, NUM_STAGES, B_TR
912910
pytest.skip("Float4 without scale is tested in test_block_scale_fp4")
913911

914912
if B_DATA_TYPE != 'float4' and B_TRANS:
915-
pytest.skip(f'No need to transpose B for {B_DATA_TYPE}')
913+
pytest.xfail(f'No need to transpose B for {B_DATA_TYPE}')
914+
915+
if is_xpu():
916+
pytest.skip("FIXME: failed to legalize operation 'tt.dot_scaled' on XPU")
916917

917918
if not is_hip() and BLOCK_N == 256 and BLOCK_K == 256:
918919
NUM_STAGES = 2

0 commit comments

Comments
 (0)