Skip to content

Commit 19d1c91

Browse files
authored
add get_device_place in op_test.py, and replace the method of obtaining place in legacy_test (#74349)
* small fix
1 parent 868f6b6 commit 19d1c91

File tree

100 files changed

+383
-1232
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

100 files changed

+383
-1232
lines changed

test/legacy_test/op_test.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,17 @@ def get_places(string_format=False):
412412
return places
413413

414414

415+
def get_device_place():
416+
if core.is_compiled_with_cuda():
417+
return base.CUDAPlace(0)
418+
custom_dev_types = paddle.device.get_all_custom_device_type()
419+
if custom_dev_types and core.is_compiled_with_custom_device(
420+
custom_dev_types[0]
421+
):
422+
return base.CustomPlace(custom_dev_types[0], 0)
423+
return base.CPUPlace()
424+
425+
415426
@contextmanager
416427
def auto_parallel_test_guard(test_info_path, generated_test_file_path):
417428
test_info_file, generated_test_file = None, None

test/legacy_test/test_activation_op.py

Lines changed: 29 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,12 @@
1818
from contextlib import contextmanager
1919

2020
import numpy as np
21-
from op_test import OpTest, convert_float_to_uint16, get_places
21+
from op_test import (
22+
OpTest,
23+
convert_float_to_uint16,
24+
get_device_place,
25+
get_places,
26+
)
2227
from scipy.special import erf, expit
2328
from utils import static_guard
2429

@@ -637,11 +642,7 @@ class TestSiluAPI(unittest.TestCase):
637642
# test paddle.nn.Silu, paddle.nn.functional.silu
638643
def setUp(self):
639644
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
640-
self.place = (
641-
paddle.CUDAPlace(0)
642-
if core.is_compiled_with_cuda()
643-
else paddle.CPUPlace()
644-
)
645+
self.place = get_device_place()
645646

646647
def test_static_api(self):
647648
with static_guard():
@@ -739,11 +740,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
739740
def setUp(self):
740741
np.random.seed(1024)
741742
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
742-
self.place = (
743-
paddle.CUDAPlace(0)
744-
if paddle.is_compiled_with_cuda()
745-
else paddle.CPUPlace()
746-
)
743+
self.place = get_device_place()
747744

748745
def test_static_api(self):
749746
with static_guard():
@@ -883,11 +880,7 @@ def setUp(self):
883880
self.dtype = 'float32'
884881
np.random.seed(1024)
885882
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
886-
self.place = (
887-
paddle.CUDAPlace(0)
888-
if paddle.is_compiled_with_cuda()
889-
else paddle.CPUPlace()
890-
)
883+
self.place = get_device_place()
891884
self.executed_api()
892885

893886
def executed_api(self):
@@ -1328,11 +1321,7 @@ class TestTanhshrinkAPI(unittest.TestCase):
13281321
def setUp(self):
13291322
np.random.seed(1024)
13301323
self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
1331-
self.place = (
1332-
paddle.CUDAPlace(0)
1333-
if paddle.is_compiled_with_cuda()
1334-
else paddle.CPUPlace()
1335-
)
1324+
self.place = get_device_place()
13361325

13371326
def test_static_api(self):
13381327
with static_guard():
@@ -1441,11 +1430,7 @@ class TestHardShrinkAPI(unittest.TestCase):
14411430
def setUp(self):
14421431
np.random.seed(1024)
14431432
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1444-
self.place = (
1445-
paddle.CUDAPlace(0)
1446-
if paddle.is_compiled_with_cuda()
1447-
else paddle.CPUPlace()
1448-
)
1433+
self.place = get_device_place()
14491434

14501435
def test_static_api(self):
14511436
with static_guard():
@@ -1510,11 +1495,7 @@ def setUp(self):
15101495
np.random.seed(1024)
15111496
self.init_shape()
15121497
self.x_np = np.random.uniform(-3, 3, self.x_shape).astype('float32')
1513-
self.place = (
1514-
paddle.CUDAPlace(0)
1515-
if paddle.is_compiled_with_cuda()
1516-
else paddle.CPUPlace()
1517-
)
1498+
self.place = get_device_place()
15181499

15191500
def init_shape(self):
15201501
self.x_shape = [10, 12]
@@ -1620,11 +1601,7 @@ def setUp(self):
16201601
self.threshold = 0.8
16211602
np.random.seed(1024)
16221603
self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1623-
self.place = (
1624-
paddle.CUDAPlace(0)
1625-
if paddle.is_compiled_with_cuda()
1626-
else paddle.CPUPlace()
1627-
)
1604+
self.place = get_device_place()
16281605

16291606
def test_static_api(self):
16301607
with static_guard():
@@ -2247,11 +2224,7 @@ def setUp(self):
22472224
np.random.uniform(-1, 1, self.shape)
22482225
+ 1j * np.random.uniform(-1, 1, self.shape)
22492226
).astype(self.dtype)
2250-
self.place = (
2251-
paddle.CUDAPlace(0)
2252-
if paddle.is_compiled_with_cuda()
2253-
else paddle.CPUPlace()
2254-
)
2227+
self.place = get_device_place()
22552228

22562229
out = np.tan(self.x_np)
22572230

@@ -2302,11 +2275,7 @@ def setUp(self):
23022275
np.random.seed(1024)
23032276
self.dtype = 'float32'
23042277
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
2305-
self.place = (
2306-
paddle.CUDAPlace(0)
2307-
if paddle.is_compiled_with_cuda()
2308-
else paddle.CPUPlace()
2309-
)
2278+
self.place = get_device_place()
23102279

23112280
def test_dygraph_api(self):
23122281
with dynamic_guard():
@@ -2852,11 +2821,7 @@ class TestReluAPI(unittest.TestCase):
28522821
def setUp(self):
28532822
np.random.seed(1024)
28542823
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
2855-
self.place = (
2856-
paddle.CUDAPlace(0)
2857-
if paddle.is_compiled_with_cuda()
2858-
else paddle.CPUPlace()
2859-
)
2824+
self.place = get_device_place()
28602825
self.executed_api()
28612826

28622827
def executed_api(self):
@@ -2996,11 +2961,7 @@ class TestLeakyReluAPI(unittest.TestCase):
29962961
def setUp(self):
29972962
np.random.seed(1024)
29982963
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
2999-
self.place = (
3000-
paddle.CUDAPlace(0)
3001-
if paddle.is_compiled_with_cuda()
3002-
else paddle.CPUPlace()
3003-
)
2964+
self.place = get_device_place()
30042965

30052966
def test_static_api(self):
30062967
with static_guard():
@@ -3176,11 +3137,7 @@ class TestGELUAPI(unittest.TestCase):
31763137
def setUp(self):
31773138
np.random.seed(1024)
31783139
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
3179-
self.place = (
3180-
paddle.CUDAPlace(0)
3181-
if paddle.is_compiled_with_cuda()
3182-
else paddle.CPUPlace()
3183-
)
3140+
self.place = get_device_place()
31843141
self.enable_cinn = False
31853142

31863143
# The backward decomposite of gelu is inconsistent with raw kernel on
@@ -3335,11 +3292,7 @@ def setUp(self):
33353292
np.random.seed(1024)
33363293
self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
33373294
self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3338-
self.place = (
3339-
paddle.CUDAPlace(0)
3340-
if paddle.is_compiled_with_cuda()
3341-
else paddle.CPUPlace()
3342-
)
3295+
self.place = get_device_place()
33433296

33443297
def test_static_api(self):
33453298
with static_guard():
@@ -3524,11 +3477,7 @@ class TestHardswishAPI(unittest.TestCase):
35243477
# test paddle.nn.Hardswish, paddle.nn.functional.hardswish
35253478
def setUp(self):
35263479
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3527-
self.place = (
3528-
paddle.CUDAPlace(0)
3529-
if paddle.is_compiled_with_cuda()
3530-
else paddle.CPUPlace()
3531-
)
3480+
self.place = get_device_place()
35323481

35333482
def test_static_api(self):
35343483
with static_guard():
@@ -3691,11 +3640,7 @@ class TestELUAPI(unittest.TestCase):
36913640
def setUp(self):
36923641
np.random.seed(1024)
36933642
self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
3694-
self.place = (
3695-
paddle.CUDAPlace(0)
3696-
if paddle.is_compiled_with_cuda()
3697-
else paddle.CPUPlace()
3698-
)
3643+
self.place = get_device_place()
36993644
self.executed_api()
37003645

37013646
def executed_api(self):
@@ -3806,11 +3751,7 @@ class TestCELUAPI(unittest.TestCase):
38063751
def setUp(self):
38073752
np.random.seed(1024)
38083753
self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
3809-
self.place = (
3810-
paddle.CUDAPlace(0)
3811-
if paddle.is_compiled_with_cuda()
3812-
else paddle.CPUPlace()
3813-
)
3754+
self.place = get_device_place()
38143755
self.executed_api()
38153756

38163757
def executed_api(self):
@@ -4841,11 +4782,7 @@ def setUp(self):
48414782
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
48424783
self.scale_a = self.get_scale_a()
48434784
self.scale_b = self.get_scale_b()
4844-
self.place = (
4845-
paddle.CUDAPlace(0)
4846-
if core.is_compiled_with_cuda()
4847-
else paddle.CPUPlace()
4848-
)
4785+
self.place = get_device_place()
48494786

48504787
def test_static_api(self):
48514788
with static_guard():
@@ -5025,11 +4962,7 @@ def setUp(self):
50254962
self.threshold = 15
50264963
np.random.seed(1024)
50274964
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
5028-
self.place = (
5029-
paddle.CUDAPlace(0)
5030-
if paddle.is_compiled_with_cuda()
5031-
else paddle.CPUPlace()
5032-
)
4965+
self.place = get_device_place()
50334966

50344967
def test_static_api(self):
50354968
with static_guard():
@@ -5160,11 +5093,7 @@ class TestSoftsignAPI(unittest.TestCase):
51605093
def setUp(self):
51615094
np.random.seed(1024)
51625095
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
5163-
self.place = (
5164-
paddle.CUDAPlace(0)
5165-
if paddle.is_compiled_with_cuda()
5166-
else paddle.CPUPlace()
5167-
)
5096+
self.place = get_device_place()
51685097

51695098
def test_static_api(self):
51705099
with static_guard():
@@ -5265,11 +5194,7 @@ def setUp(self):
52655194
np.random.seed(1024)
52665195
self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
52675196
self.x_np[np.abs(self.x_np) < 0.005] = 0.02
5268-
self.place = (
5269-
paddle.CUDAPlace(0)
5270-
if paddle.is_compiled_with_cuda()
5271-
else paddle.CPUPlace()
5272-
)
5197+
self.place = get_device_place()
52735198

52745199
def test_static_api(self):
52755200
with static_guard():
@@ -5397,11 +5322,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
53975322
# test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
53985323
def setUp(self):
53995324
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
5400-
self.place = (
5401-
paddle.CUDAPlace(0)
5402-
if paddle.is_compiled_with_cuda()
5403-
else paddle.CPUPlace()
5404-
)
5325+
self.place = get_device_place()
54055326

54065327
def test_static_api(self):
54075328
with static_guard():
@@ -5508,11 +5429,7 @@ class TestSwishAPI(unittest.TestCase):
55085429
def setUp(self):
55095430
np.random.seed(1024)
55105431
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
5511-
self.place = (
5512-
paddle.CUDAPlace(0)
5513-
if paddle.is_compiled_with_cuda()
5514-
else paddle.CPUPlace()
5515-
)
5432+
self.place = get_device_place()
55165433

55175434
def test_static_api(self):
55185435
with static_guard():
@@ -5619,11 +5536,7 @@ class TestMishAPI(unittest.TestCase):
56195536
def setUp(self):
56205537
np.random.seed(1024)
56215538
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
5622-
self.place = (
5623-
paddle.CUDAPlace(0)
5624-
if paddle.is_compiled_with_cuda()
5625-
else paddle.CPUPlace()
5626-
)
5539+
self.place = get_device_place()
56275540

56285541
def test_static_api(self):
56295542
with static_guard():

test/legacy_test/test_adadelta_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import unittest
1616

1717
import numpy as np
18-
from op_test import OpTest, get_places
18+
from op_test import OpTest, get_device_place, get_places
1919

2020
import paddle
2121
from paddle import base
@@ -337,7 +337,7 @@ def static_adadelta_mp(self, mp, use_amp):
337337
paddle.enable_static()
338338
paddle.seed(100)
339339
np.random.seed(100)
340-
exe = paddle.static.Executor('gpu')
340+
exe = paddle.static.Executor(get_device_place())
341341
train_program = paddle.static.Program()
342342
startup_program = paddle.static.Program()
343343
optimizer = paddle.optimizer.Adadelta(0.1)

test/legacy_test/test_adagrad_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
import numpy as np
1919
from op import Operator
20-
from op_test import OpTest, get_places
20+
from op_test import OpTest, get_device_place, get_places
2121

2222
import paddle
2323
from paddle.base import core
@@ -283,7 +283,7 @@ def static_adagrad_mp(self, mp, use_amp):
283283
paddle.enable_static()
284284
paddle.seed(100)
285285
np.random.seed(100)
286-
exe = paddle.static.Executor('gpu')
286+
exe = paddle.static.Executor(get_device_place())
287287
train_program = paddle.static.Program()
288288
startup_program = paddle.static.Program()
289289
optimizer = paddle.optimizer.Adagrad(0.1)

test/legacy_test/test_adamax_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import unittest
1616

1717
import numpy as np
18-
from op_test import OpTest, get_places
18+
from op_test import OpTest, get_device_place, get_places
1919

2020
import paddle
2121

@@ -319,7 +319,7 @@ def static_adamax_mp(self, mp, use_amp):
319319
paddle.enable_static()
320320
paddle.seed(100)
321321
np.random.seed(100)
322-
exe = paddle.static.Executor('gpu')
322+
exe = paddle.static.Executor(get_device_place())
323323
train_program = paddle.static.Program()
324324
startup_program = paddle.static.Program()
325325
optimizer = paddle.optimizer.Adamax(0.1)

0 commit comments

Comments
 (0)