Skip to content

Commit 51244af

Browse files
[TEST] Update runtime skiplist (#4714)
pass rate: 98.91%->98.93% Signed-off-by: Whitney Tsang <[email protected]>
1 parent 9ee3f44 commit 51244af

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

python/test/unit/runtime/test_autotuner.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -173,8 +173,8 @@ def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
173173
assert records['capture_named_args']
174174

175175

176-
@pytest.mark.skipif(not is_cuda() or torch.cuda.get_device_capability()[0] < 9,
177-
reason="Requires compute capability >= 9 for NV")
176+
@pytest.mark.xfail(not is_cuda() or torch.cuda.get_device_capability()[0] < 9,
177+
reason="Requires compute capability >= 9 for NV")
178178
def test_override_ttir(device):
179179
N = 1024
180180
src = torch.randn(N, device=device)
@@ -222,8 +222,8 @@ def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
222222
torch.testing.assert_close(src * 10, dst)
223223

224224

225-
@pytest.mark.skipif(not is_cuda() or torch.cuda.get_device_capability()[0] < 9,
226-
reason="Requires compute capability >= 9 for NV")
225+
@pytest.mark.xfail(not is_cuda() or torch.cuda.get_device_capability()[0] < 9,
226+
reason="Requires compute capability >= 9 for NV")
227227
def test_override_ttgir(device):
228228
N = 1024
229229
src = torch.randn(N, device=device)
@@ -272,8 +272,8 @@ def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
272272
torch.testing.assert_close(src * 10, dst)
273273

274274

275-
@pytest.mark.skipif(not is_cuda() or torch.cuda.get_device_capability()[0] != 9,
276-
reason="PTX file in this unit test is only for SM90")
275+
@pytest.mark.xfail(not is_cuda() or torch.cuda.get_device_capability()[0] != 9,
276+
reason="PTX file in this unit test is only for SM90")
277277
def test_override_ptx(device):
278278
N = 1024
279279
src = torch.randn(N, device=device)
@@ -372,7 +372,7 @@ def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
372372

373373
def test_exceed_tmem(device):
374374
if not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] == 10:
375-
pytest.skip("Test requires tensor memory.")
375+
pytest.xfail("Test requires tensor memory.")
376376
N = 512
377377
dst = torch.empty((N, ), device=device, dtype=torch.float32)
378378
configs = [triton.Config(kwargs={'BLOCK_SIZE': 128}), triton.Config(kwargs={'BLOCK_SIZE': 32})]
@@ -411,7 +411,7 @@ def dot_kernel(dst, BLOCK_SIZE: tl.constexpr):
411411

412412
def test_exceed_threads(device):
413413
if not torch.cuda.is_available():
414-
pytest.skip("CUDA is not available")
414+
pytest.xfail("CUDA is not available")
415415
x = torch.empty(1024, device=device, dtype=torch.float32)
416416
y = torch.empty_like(x)
417417
output = torch.empty_like(x)

0 commit comments

Comments
 (0)