Skip to content

Commit ca0fe1b

Browse files
authored
[KERNELS] Skip test_upcast_mxfp4_to_bf16 on AMD (#7595)
This test was merged with AMD CI failing and it's now blocking merges. See #7591
1 parent de846c0 commit ca0fe1b

File tree

1 file changed

+2
-0
lines changed

1 file changed

+2
-0
lines changed

python/triton_kernels/tests/test_tensor_details/test_layout_hopper.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import pytest
2+
from triton._internal_testing import is_cuda
23
from triton_kernels.tensor import wrap_torch_tensor, convert_layout, FP4
34
from triton_kernels.tensor_details.layout import HopperMXScaleLayout, HopperMXValueLayout
45
from triton_kernels.numerics_details.mxfp import downcast_to_mxfp, upcast_from_mxfp
@@ -69,6 +70,7 @@ def _upcast_mxfp4_to_bf16(Y, X, XScale, x_stride_m, x_stride_n, x_scale_stride_m
6970
tl.store(Y + offs_y, y)
7071

7172

73+
@pytest.mark.skipif(not is_cuda(), reason="Only supported on cuda")
7274
def test_upcast_mxfp4_to_bf16():
7375
mx_axis = 0
7476
num_warps = 4

0 commit comments

Comments
 (0)