Skip to content

Commit e7091a4

Browse files
xuhancnpytorchmergebot
authored andcommitted
[AOTI] skip Windows XPU crashed UTs. (pytorch#165393)
Skip some UTs, which crashed on Windows XPU. Pull Request resolved: pytorch#165393 Approved by: https://github.com/jansel
1 parent bcfea48 commit e7091a4

File tree

2 files changed

+21
-0
lines changed

2 files changed

+21
-0
lines changed

test/inductor/test_aot_inductor.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@
7373
skipIfRocm,
7474
skipIfRocmArch,
7575
skipIfWindows,
76+
skipIfWindowsXPU,
7677
skipIfXpu,
7778
TEST_MPS,
7879
TEST_WITH_ROCM,
@@ -1166,6 +1167,7 @@ def forward(self, x, y):
11661167
options={"debug_check_inf_and_nan": True},
11671168
)
11681169

1170+
@skipIfWindowsXPU(msg="crash on Windows XPU.")
11691171
def test_assert_async(self):
11701172
if self.device != GPU_TYPE:
11711173
raise unittest.SkipTest("requires GPU_TYPE")
@@ -1858,6 +1860,7 @@ def forward(self, values, repeats, mask, embeddings, x, z, scalar):
18581860
}
18591861
self.check_model(Repro(), example_inputs, dynamic_shapes=spec)
18601862

1863+
@skipIfWindowsXPU(msg="crash on Windows XPU.")
18611864
def test_size_with_unbacked_add_expr_transitive(self):
18621865
# Edge case with torch._check(expr1, expr2) + torch._check(expr2, unbacked).
18631866
# When generating example input sizes for autotuning, it should coalesce
@@ -3438,6 +3441,7 @@ def forward(self, a, b, lengths):
34383441
self.check_model(Model(), example_inputs)
34393442

34403443
@common_utils.parametrize("minmax", [min, max])
3444+
@skipIfWindowsXPU(msg="crash on Windows XPU.")
34413445
def test_sympy_cpp_printer_min_max(self, minmax):
34423446
if self.device != GPU_TYPE:
34433447
raise unittest.SkipTest("requires GPU")
@@ -3927,6 +3931,7 @@ def forward(self, x):
39273931
x = torch.randn(16, 16, device=self.device)
39283932
self.check_model(Model(), (x,))
39293933

3934+
@skipIfWindowsXPU(msg="crash on Windows XPU.")
39303935
def test_triton_kernel_dynamic_grid(self):
39313936
if self.device != GPU_TYPE:
39323937
raise unittest.SkipTest("requires GPU")
@@ -4424,6 +4429,7 @@ def forward(self, a):
44244429
model.weight += 1
44254430
self.check_model(model, example_inputs)
44264431

4432+
@skipIfWindowsXPU(msg="crash on Windows XPU.")
44274433
def test_triton_kernel_extern_kernel_arg(self):
44284434
if self.device != GPU_TYPE:
44294435
raise unittest.SkipTest("requires GPU")

torch/testing/_internal/common_utils.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2102,6 +2102,21 @@ def wrapper(*args, **kwargs):
21022102
return dec_fn(func)
21032103
return dec_fn
21042104

2105+
def skipIfWindowsXPU(func=None, *, msg="test doesn't currently work on the Windows stack"):
2106+
def dec_fn(fn):
2107+
reason = f"skipIfWindowsXPU: {msg}"
2108+
2109+
@wraps(fn)
2110+
def wrapper(*args, **kwargs):
2111+
if IS_WINDOWS and torch.xpu.is_available(): # noqa: F821
2112+
raise unittest.SkipTest(reason)
2113+
else:
2114+
return fn(*args, **kwargs)
2115+
return wrapper
2116+
if func:
2117+
return dec_fn(func)
2118+
return dec_fn
2119+
21052120
def requires_cuda_p2p_access():
21062121
cuda_p2p_access_available = (
21072122
torch.cuda.is_available()

0 commit comments

Comments
 (0)