Skip to content

Commit b3479a0

Browse files
[TEST] Enable bfloat16 unit tests
Signed-off-by: Whitney Tsang <[email protected]>
1 parent 7d20b9e commit b3479a0

File tree

2 files changed

+0
-14
lines changed

2 files changed

+0
-14
lines changed

python/test/unit/language/test_core.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1353,8 +1353,6 @@ def test_atomic_rmw(op, dtype_x_str, mode, sem, device):
13531353
pytest.xfail("Only test atomic bfloat16/float16 ops on GPU")
13541354
if "uint" in dtype_x_str and mode in ["min_neg", "all_neg"]:
13551355
pytest.xfail("uint cannot be negative")
1356-
if is_xpu() and dtype_x_str == 'bfloat16':
1357-
pytest.skip("bfloat16 not yet supported for xpu")
13581356

13591357
n_programs = 5
13601358

@@ -1443,8 +1441,6 @@ def kernel(X):
14431441
for check_return_val in ([True, False] if is_hip() else [True])])
14441442
def test_tensor_atomic_rmw(shape, axis, num_ctas, dtype_x_str, check_return_val, device):
14451443
check_type_supported(dtype_x_str, device)
1446-
if is_xpu() and dtype_x_str == 'bfloat16':
1447-
pytest.skip("bfloat16 not yet supported for xpu")
14481444
shape0, shape1 = shape
14491445
# triton kernel
14501446

@@ -1524,8 +1520,6 @@ def torch_to_triton_dtype(t):
15241520
for dtype_x_str in ['bfloat16', 'float16', 'float32']])
15251521
def test_tensor_atomic_add_non_exclusive_offset(size, num_ctas, dtype_x_str, device):
15261522
check_type_supported(dtype_x_str, device)
1527-
if is_xpu() and dtype_x_str == 'bfloat16':
1528-
pytest.skip("bfloat16 not yet supported for xpu")
15291523

15301524
@triton.jit
15311525
def kernel(X, val, NUM: tl.constexpr):
@@ -1550,8 +1544,6 @@ def kernel(X, val, NUM: tl.constexpr):
15501544
for dtype_x_str in ['bfloat16', 'float16', 'float32']])
15511545
def test_tensor_atomic_add_shift_1(size, num_ctas, dtype_x_str, device):
15521546
check_type_supported(dtype_x_str, device)
1553-
if is_xpu() and dtype_x_str == 'bfloat16':
1554-
pytest.skip("bfloat16 not yet supported for xpu")
15551547

15561548
@triton.jit
15571549
def kernel(X, val, NUM: tl.constexpr):
@@ -1588,9 +1580,6 @@ def test_tensor_atomic_add_access_patterns(shape, idx_order, mask_step, num_ctas
15881580
if is_interpreter():
15891581
pytest.xfail("not supported in the interpreter")
15901582

1591-
if is_xpu() and dtype_x_str == 'bfloat16':
1592-
pytest.skip("bfloat16 not yet supported for xpu")
1593-
15941583
@triton.jit
15951584
def kernel(in_ptr, idx_ptr, out_ptr, shape0, shape1, mask_step, XBLOCK: tl.constexpr):
15961585
xoffset = tl.program_id(0) * XBLOCK

python/test/unit/language/test_tensor_descriptor.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1566,9 +1566,6 @@ def test_tensor_descriptor_reduce(kind, descriptor, dtype_str, num_ctas, M_BLOCK
15661566
pytest.xfail("Multi-CTA not supported")
15671567
if is_hip_cdna3() and (kind, dtype_str, M_BLOCK, N_BLOCK) in REDUCE_SKIP_HIP_CDNA3:
15681568
pytest.skip("Broken on rocm")
1569-
if is_xpu():
1570-
if (kind, dtype_str) in [("add", "bfloat16")]:
1571-
pytest.skip("FIXME: issue #3914")
15721569

15731570
@triton.jit(debug=True)
15741571
def kernel(out_desc, out_ptr, a_ptr, M, N, M_BLOCK: tl.constexpr, N_BLOCK: tl.constexpr, kind: tl.constexpr):

0 commit comments

Comments
 (0)