Skip to content

Commit 6ee08cd

Browse files
[TEST] Mark expected failures as xfail (#3061)
Pass rate: 98.79%->99.03% Signed-off-by: Whitney Tsang <[email protected]>
1 parent e4e0905 commit 6ee08cd

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

python/test/unit/language/test_core.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3306,7 +3306,7 @@ def convert_fp8_to_fp32(x, device, dtype_str):
33063306
def test_dot(M, N, K, num_warps, col_a, col_b, epilogue, input_precision, in_dtype, out_dtype, kpack, num_ctas, device):
33073307
if is_interpreter():
33083308
if M < 16 or N < 16 or K < 16:
3309-
pytest.skip("small dots are supported only on HIP at the moment")
3309+
pytest.xfail("small dots are supported only on HIP at the moment")
33103310
if in_dtype == 'bfloat16':
33113311
pytest.xfail("bfloat16 is not supported in the interpreter")
33123312
else:
@@ -4847,7 +4847,7 @@ def kernel(X, Y, BLOCK: tl.constexpr):
48474847
@pytest.mark.parametrize("num_ctas", num_ctas_list)
48484848
def test_inline_asm(num_ctas, device):
48494849
if not is_cuda():
4850-
pytest.skip("test_inline_asm is only supported in CUDA")
4850+
pytest.xfail("test_inline_asm is only supported in CUDA")
48514851

48524852
@triton.jit
48534853
def kernel(X, Y, Z, n: tl.constexpr, BLOCK: tl.constexpr):
@@ -4875,7 +4875,7 @@ def kernel(X, Y, Z, n: tl.constexpr, BLOCK: tl.constexpr):
48754875
@pytest.mark.parametrize("num_ctas", num_ctas_list)
48764876
def test_inline_asm_packed(num_ctas, device):
48774877
if not is_cuda():
4878-
pytest.skip("test_inline_asm is only supported in CUDA")
4878+
pytest.xfail("test_inline_asm is only supported in CUDA")
48794879

48804880
@triton.jit
48814881
def kernel(X, Y, BLOCK: tl.constexpr):
@@ -4902,7 +4902,7 @@ def kernel(X, Y, BLOCK: tl.constexpr):
49024902
@pytest.mark.parametrize('num_ctas', num_ctas_list)
49034903
def test_inline_asm_with_pointers(num_ctas, device):
49044904
if not is_cuda():
4905-
pytest.skip('test_inline_asm is only supported in CUDA')
4905+
pytest.xfail('test_inline_asm is only supported in CUDA')
49064906

49074907
@triton.jit
49084908
def kernel(X, Y, BLOCK: tl.constexpr):
@@ -4927,7 +4927,7 @@ def kernel(X, Y, BLOCK: tl.constexpr):
49274927

49284928
def test_inline_asm_multiple_outputs(device):
49294929
if not is_cuda():
4930-
pytest.skip('test_inline_asm is only supported in CUDA')
4930+
pytest.xfail('test_inline_asm is only supported in CUDA')
49314931

49324932
@triton.jit
49334933
def kernel(A, B, C, D, BLOCK: tl.constexpr):
@@ -4973,7 +4973,7 @@ def kernel(A, B, C, D, BLOCK: tl.constexpr):
49734973

49744974
def test_inline_asm_packed_multiple_outputs(device):
49754975
if not is_cuda():
4976-
pytest.skip('test_inline_asm is only supported in CUDA')
4976+
pytest.xfail('test_inline_asm is only supported in CUDA')
49774977

49784978
@triton.jit
49794979
def kernel(A, B, C, D, BLOCK: tl.constexpr):
@@ -6261,7 +6261,7 @@ def kernel(input):
62616261
@pytest.mark.parametrize("dtype_str", ['float32', 'float64'])
62626262
def test_math_extern(dtype_str, device):
62636263
if is_interpreter():
6264-
pytest.skip('math_extern does not work in the interpreter mode')
6264+
pytest.xfail('math_extern does not work in the interpreter mode')
62656265

62666266
@triton.jit
62676267
def kernel(

0 commit comments

Comments
 (0)