Skip to content

Commit ad295e3

Browse files
[AUTOGENERATED] [release/2.6] [release/2.5] Skipped *_stress_cuda UTs in test_c10d_gloo (#2422)
Cherry-pick of #2317 Co-authored-by: akashveramd <[email protected]>
1 parent 9663f2d commit ad295e3

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

test/distributed/test_c10d_gloo.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@
5252
retry_on_connect_failures,
5353
run_tests,
5454
skip_but_pass_in_sandcastle,
55+
skipIfRocm,
5556
TestCase,
5657
)
5758

@@ -385,6 +386,7 @@ def test_broadcast_stress(self):
385386
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
386387
self._test_broadcast_stress(inputs)
387388

389+
@skipIfRocm
388390
@skip_if_lt_x_gpu(2)
389391
@requires_gloo()
390392
def test_broadcast_stress_cuda(self):
@@ -490,6 +492,7 @@ def test_allreduce_stress(self):
490492
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
491493
self._test_allreduce_stress(inputs)
492494

495+
@skipIfRocm
493496
@skip_if_lt_x_gpu(2)
494497
@requires_gloo()
495498
def test_allreduce_stress_cuda(self):
@@ -922,6 +925,8 @@ def test_scatter_stress(self):
922925
@skip_but_pass_in_sandcastle(
923926
"Test is flaky, see https://github.com/pytorch/pytorch/issues/15963"
924927
)
928+
929+
@skipIfRocm
925930
@skip_if_lt_x_gpu(2)
926931
@requires_gloo()
927932
def test_scatter_stress_cuda(self):
@@ -1096,6 +1101,7 @@ def test_gather_stress(self):
10961101
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
10971102
self._test_gather_stress(inputs, lambda t: t.clone())
10981103

1104+
@skipIfRocm
10991105
@skip_if_lt_x_gpu(2)
11001106
@requires_gloo()
11011107
def test_gather_stress_cuda(self):
@@ -1231,6 +1237,7 @@ def test_allgather_stress(self):
12311237
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
12321238
self._test_allgather_stress(inputs, lambda t: t.clone())
12331239

1240+
@skipIfRocm
12341241
@skip_if_lt_x_gpu(2)
12351242
@requires_gloo()
12361243
def test_allgather_stress_cuda(self):
@@ -1417,6 +1424,7 @@ def test_reduce_stress(self):
14171424
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
14181425
self._test_reduce_stress(inputs)
14191426

1427+
@skipIfRocm
14201428
@skip_if_lt_x_gpu(2)
14211429
@requires_gloo()
14221430
def test_reduce_stress_cuda(self):

0 commit comments

Comments
 (0)