Skip to content

Commit bd1a240

Browse files
authored
[cherry-pick]Fix cuda12 test (#54622)
* [AMP Prim OP]support some prim ops for bf16 dtype part3 (#54368) * support some prim ops bf16 dtype * fix cmake * [AMP Prim OP]support some prim ops for bf16 dtype part5 (#54422) * support some prim ops for bf16 dtype * remove useless code * support some prim ops bf16 dtype (#54399)
1 parent 37b4d7a commit bd1a240

17 files changed

+248
-83
lines changed

test/legacy_test/CMakeLists.txt

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1197,7 +1197,12 @@ set(TEST_CINN_OPS
11971197
test_scatter_nd_op
11981198
test_strided_slice_op
11991199
test_instance_norm_op
1200-
test_cumsum_op)
1200+
test_cumsum_op
1201+
test_pad_op
1202+
test_split_op
1203+
test_erf_op
1204+
test_assign_op
1205+
test_flatten_contiguous_range_op)
12011206

12021207
foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
12031208
if(WITH_CINN)

test/legacy_test/test_assign_op.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ def setUp(self):
8080
self.public_python_api = paddle.assign
8181
self.op_type = "assign"
8282
self.prim_op_type = "prim"
83-
self.enable_cinn = False
8483
x = np.random.uniform(0, 1, [100, 10]).astype(np.float32)
8584
x = convert_float_to_uint16(x)
8685
self.inputs = {'X': x}

test/legacy_test/test_elementwise_min_op.py

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -127,43 +127,30 @@ def setUp(self):
127127
self.inputs = {'X': x, 'Y': y}
128128
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
129129

130-
def if_enable_cinn(self):
131-
self.enable_cinn = False
132-
133130

134131
class TestElementwiseMinFP16Op_ZeroDim1(TestElementwiseFP16Op):
135132
def init_data(self):
136133
self.x = np.random.uniform(0.1, 1, []).astype(np.float16)
137134
self.y = np.random.uniform(0.1, 1, []).astype(np.float16)
138135

139-
def if_enable_cinn(self):
140-
self.enable_cinn = False
141-
142136

143137
class TestElementwiseMinOp_ZeroDim2(TestElementwiseOp):
144138
def setUp(self):
145139
self.op_type = "elementwise_min"
146140
self.python_api = paddle.minimum
147141
self.public_python_api = paddle.minimum
148142
self.prim_op_type = "prim"
149-
self.if_enable_cinn()
150143
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
151144
y = np.random.uniform(0.1, 1, []).astype("float64")
152145
self.inputs = {'X': x, 'Y': y}
153146
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
154147

155-
def if_enable_cinn(self):
156-
self.enable_cinn = False
157-
158148

159149
class TestElementwiseMinFP16Op_ZeroDim2(TestElementwiseFP16Op):
160150
def init_data(self):
161151
self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
162152
self.y = np.random.uniform(0.1, 1, []).astype("float16")
163153

164-
def if_enable_cinn(self):
165-
self.enable_cinn = False
166-
167154

168155
class TestElementwiseMinOp_ZeroDim3(TestElementwiseOp):
169156
def setUp(self):
@@ -177,18 +164,12 @@ def setUp(self):
177164
self.inputs = {'X': x, 'Y': y}
178165
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
179166

180-
def if_enable_cinn(self):
181-
self.enable_cinn = False
182-
183167

184168
class TestElementwiseMinFP16Op_ZeroDim3(TestElementwiseFP16Op):
185169
def init_data(self):
186170
self.x = np.random.uniform(0.1, 1, []).astype("float16")
187171
self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
188172

189-
def if_enable_cinn(self):
190-
self.enable_cinn = False
191-
192173

193174
@skip_check_grad_ci(
194175
reason="[skip shape check] Use y_shape(1) to test broadcast."
@@ -388,7 +369,7 @@ def test_check_grad_normal(self):
388369
def test_check_grad_ingore_x(self):
389370
places = self._get_places()
390371
for place in places:
391-
if type(place) is paddle.fluid.libpaddle.CPUPlace:
372+
if isinstance(place, paddle.fluid.libpaddle.CPUPlace):
392373
check_prim = False
393374
else:
394375
check_prim = True
@@ -413,7 +394,7 @@ def test_check_grad_ingore_x(self):
413394
def test_check_grad_ingore_y(self):
414395
places = self._get_places()
415396
for place in places:
416-
if type(place) is paddle.fluid.libpaddle.CPUPlace:
397+
if isinstance(place, paddle.fluid.libpaddle.CPUPlace):
417398
check_prim = False
418399
else:
419400
check_prim = True
@@ -436,7 +417,7 @@ def test_check_grad_ingore_y(self):
436417
)
437418

438419
def if_enable_cinn(self):
439-
self.enable_cinn = False
420+
pass
440421

441422

442423
class TestElementwiseMinBF16Op_ZeroDim1(TestElementwiseBF16Op):

test/legacy_test/test_erf_op.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,15 +57,17 @@ def _test_case(self, place):
5757
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05)
5858

5959
def test_case(self):
60-
self._test_case(fluid.CPUPlace())
61-
if fluid.is_compiled_with_cuda():
62-
self._test_case(fluid.CUDAPlace(0))
60+
with paddle.fluid.framework._static_guard():
61+
self._test_case(fluid.CPUPlace())
62+
if fluid.is_compiled_with_cuda():
63+
self._test_case(fluid.CUDAPlace(0))
6364

6465
def test_name(self):
65-
with fluid.program_guard(fluid.Program()):
66-
x = paddle.static.data('x', [3, 4])
67-
y = paddle.erf(x, name='erf')
68-
self.assertTrue('erf' in y.name)
66+
with paddle.fluid.framework._static_guard():
67+
with fluid.program_guard(fluid.Program()):
68+
x = paddle.static.data('x', [3, 4])
69+
y = paddle.erf(x, name='erf')
70+
self.assertTrue('erf' in y.name)
6971

7072

7173
class TestErfFP16OP(OpTest):

test/legacy_test/test_fill_any_like_op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def test_check_output(self):
8888
self.check_output_with_place(place, check_prim=True)
8989

9090
def if_enable_cinn(self):
91-
self.enable_cinn = False
91+
pass
9292

9393

9494
class TestFillAnyLikeOpValue1(TestFillAnyLikeOp):

test/legacy_test/test_flatten_contiguous_range_op.py

Lines changed: 25 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def setUp(self):
3030
self.prim_op_type = "comp"
3131
self.start_axis = 0
3232
self.stop_axis = -1
33-
self.skip_cinn()
33+
self.if_enable_cinn()
3434
self.init_test_case()
3535
self.init_test_dtype()
3636
self.init_input_data()
@@ -40,8 +40,8 @@ def setUp(self):
4040
"XShape": np.random.random(self.in_shape).astype("float32"),
4141
}
4242

43-
def skip_cinn(self):
44-
self.enable_cinn = True
43+
def if_enable_cinn(self):
44+
pass
4545

4646
def test_check_output(self):
4747
if str(self.dtype) in {"float16", "uint16"}:
@@ -104,6 +104,9 @@ def init_test_dtype(self):
104104
"core is not complied with CUDA and not support the bfloat16",
105105
)
106106
class TestFlattenBF16Op(TestFlattenOp):
107+
def if_enable_cinn(self):
108+
pass
109+
107110
def init_test_dtype(self):
108111
self.dtype = "uint16"
109112

@@ -142,6 +145,9 @@ def init_test_dtype(self):
142145
"core is not complied with CUDA and not support the bfloat16",
143146
)
144147
class TestFlattenBF16Op_1(TestFlattenOp_1):
148+
def if_enable_cinn(self):
149+
pass
150+
145151
def init_test_dtype(self):
146152
self.dtype = "uint16"
147153

@@ -180,6 +186,9 @@ def init_test_dtype(self):
180186
"core is not complied with CUDA and not support the bfloat16",
181187
)
182188
class TestFlattenBF16Op_2(TestFlattenOp_2):
189+
def if_enable_cinn(self):
190+
pass
191+
183192
def init_test_dtype(self):
184193
self.dtype = "uint16"
185194

@@ -218,6 +227,9 @@ def init_test_dtype(self):
218227
"core is not complied with CUDA and not support the bfloat16",
219228
)
220229
class TestFlattenBF16Op_3(TestFlattenOp_3):
230+
def if_enable_cinn(self):
231+
pass
232+
221233
def init_test_dtype(self):
222234
self.dtype = "uint16"
223235

@@ -256,6 +268,9 @@ def init_test_dtype(self):
256268
"core is not complied with CUDA and not support the bfloat16",
257269
)
258270
class TestFlattenBF16Op_4(TestFlattenOp_4):
271+
def if_enable_cinn(self):
272+
pass
273+
259274
def init_test_dtype(self):
260275
self.dtype = "uint16"
261276

@@ -294,6 +309,9 @@ def init_test_dtype(self):
294309
"core is not complied with CUDA and not support the bfloat16",
295310
)
296311
class TestFlattenBF16Op_5(TestFlattenOp_5):
312+
def if_enable_cinn(self):
313+
pass
314+
297315
def init_test_dtype(self):
298316
self.dtype = "uint16"
299317

@@ -305,7 +323,7 @@ def init_test_case(self):
305323
self.stop_axis = -1
306324
self.new_shape = (1,)
307325

308-
def skip_cinn(self):
326+
def if_enable_cinn(self):
309327
self.enable_cinn = False
310328

311329
def init_attrs(self):
@@ -363,6 +381,9 @@ def init_test_dtype(self):
363381
"core is not complied with CUDA and not support the bfloat16",
364382
)
365383
class TestFlattenBF16OpSixDims(TestFlattenOpSixDims):
384+
def if_enable_cinn(self):
385+
pass
386+
366387
def init_test_dtype(self):
367388
self.dtype = "uint16"
368389

test/legacy_test/test_index_select_op.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
import paddle
2121
from paddle import fluid
22-
from paddle.fluid import Program, program_guard
22+
from paddle.fluid import Program, core, program_guard
2323

2424
np.random.seed(1024)
2525

@@ -102,8 +102,11 @@ def init_dtype_type(self):
102102
class TestIndexSelectBF16Op(OpTest):
103103
def setUp(self):
104104
self.python_api = paddle.index_select
105+
self.public_python_api = paddle.index_select
106+
self.prim_op_type = "comp"
105107
self.op_type = "index_select"
106108
self.init_dtype_type()
109+
self.if_skip_cinn()
107110
index_np = np.random.randint(
108111
low=0, high=self.x_shape[self.dim], size=self.index_size
109112
)
@@ -124,6 +127,9 @@ def setUp(self):
124127
out = np.reshape(out_list, self.out_shape)
125128
self.outputs = {'Out': convert_float_to_uint16(out)}
126129

130+
def if_skip_cinn(self):
131+
self.enable_cinn = False
132+
127133
def init_dtype_type(self):
128134
self.dim = 1
129135
self.x_type = np.uint16
@@ -132,10 +138,12 @@ def init_dtype_type(self):
132138
self.index_size = 100
133139

134140
def test_check_output(self):
135-
self.check_output()
141+
place = core.CUDAPlace(0)
142+
self.check_output_with_place(place)
136143

137144
def test_check_grad_normal(self):
138-
self.check_grad(['X'], 'Out')
145+
place = core.CUDAPlace(0)
146+
self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
139147

140148

141149
class TestIndexSelectAPI(unittest.TestCase):

test/legacy_test/test_pad_op.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def get_dtype(self):
100100
return np.float16
101101

102102
def test_check_grad_normal(self):
103-
self.check_grad(['X'], 'Out')
103+
self.check_grad(['X'], 'Out', check_prim=True)
104104

105105
cls_name = "{}_{}".format(parent.__name__, "Fp16")
106106
TestPadFp16.__name__ = cls_name
@@ -238,9 +238,12 @@ def setUp(self):
238238
)
239239
self.inputs = {'X': convert_float_to_uint16(x)}
240240
self.outputs = {'Out': convert_float_to_uint16(out)}
241-
self.enable_cinn = False
242241
self.prim_op_type = "prim"
243242
self.public_python_api = pad_wrapper
243+
self.if_enable_cinn()
244+
245+
def if_enable_cinn(self):
246+
pass
244247

245248
def initTestCase(self):
246249
self.shape = (16, 16)

test/legacy_test/test_roll_op.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,6 @@ def test_check_output(self):
5353
def test_check_grad_normal(self):
5454
self.check_grad(['X'], 'Out', check_prim=True)
5555

56-
def test_check_grad(self):
57-
self.check_grad(['X'], 'Out', check_prim=True)
58-
5956

6057
class TestRollOpCase2(TestRollOp):
6158
def init_dtype_type(self):

0 commit comments

Comments
 (0)