Skip to content

Commit e2af9d5

Browse files
【New IR】New ir op test v1.1(sum passed) (#56756)
* add reference of lbfgs * add reference of lbfgs * new ir op test v1.0 * fix new ir optest bug1.0 * modify two testcase bug * add new ir white list & pass test_mean_op.py * rename white list * add new_ir_guard * new ir sum op test all pass * rename backward.grad as ir_backward.grad * check place for new ir * fix test_build_model env bug * fix test_prim_program backward bug * change backward to ir_backward in check_appr * add check_new_ir flag for mkldnn * clean --------- Co-authored-by: wangruting <[email protected]>
1 parent d74bfef commit e2af9d5

File tree

6 files changed

+23
-6
lines changed

6 files changed

+23
-6
lines changed

python/paddle/tensor/math.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1510,8 +1510,11 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
15101510

15111511
dtype_flag = False
15121512
if dtype is not None:
1513-
dtype_flag = True
1514-
dtype = convert_np_dtype_to_dtype_(dtype)
1513+
if paddle.ir.core._use_new_ir_api():
1514+
dtype = paddle.ir.core.convert_np_dtype_to_dtype_(dtype)
1515+
else:
1516+
dtype_flag = True
1517+
dtype = convert_np_dtype_to_dtype_(dtype)
15151518

15161519
if in_dynamic_mode():
15171520
return _C_ops.sum(x, axis, dtype, keepdim)

test/legacy_test/eager_op_test.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1927,6 +1927,7 @@ def check_output_with_place(
19271927
only_check_prim=False,
19281928
inplace_atol=None,
19291929
check_cinn=False,
1930+
check_new_ir=True,
19301931
):
19311932
core._set_prim_all_enabled(False)
19321933
core.set_prim_eager_enabled(False)
@@ -2455,6 +2456,7 @@ def _is_skip_name(self, name):
24552456
if (
24562457
self.op_type
24572458
in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list
2459+
and check_new_ir
24582460
):
24592461
if (
24602462
type(place) is paddle.fluid.libpaddle.CPUPlace
@@ -2576,6 +2578,7 @@ def check_output(
25762578
inplace_atol=None,
25772579
check_cinn=False,
25782580
only_check_prim=False,
2581+
check_new_ir=True,
25792582
):
25802583
self.__class__.op_type = self.op_type
25812584
if self.is_mkldnn_op():
@@ -2600,6 +2603,7 @@ def check_output(
26002603
only_check_prim=only_check_prim,
26012604
inplace_atol=inplace_atol,
26022605
check_cinn=check_cinn,
2606+
check_new_ir=check_new_ir,
26032607
)
26042608
if not res and only_check_prim:
26052609
continue
@@ -2766,6 +2770,7 @@ def check_grad(
27662770
only_check_prim=False,
27672771
atol=1e-5,
27682772
check_cinn=False,
2773+
check_new_ir=True,
27692774
):
27702775
if hasattr(self, "use_custom_device") and self.use_custom_device:
27712776
check_dygraph = False
@@ -2788,6 +2793,7 @@ def check_grad(
27882793
only_check_prim=only_check_prim,
27892794
atol=atol,
27902795
check_cinn=check_cinn,
2796+
check_new_ir=check_new_ir,
27912797
)
27922798

27932799
def check_grad_with_place(
@@ -2807,6 +2813,7 @@ def check_grad_with_place(
28072813
numeric_place=None,
28082814
atol=1e-5,
28092815
check_cinn=False,
2816+
check_new_ir=True,
28102817
):
28112818
if hasattr(self, "use_custom_device") and self.use_custom_device:
28122819
check_dygraph = False
@@ -3007,6 +3014,7 @@ def check_grad_with_place(
30073014
if (
30083015
self.op_type
30093016
in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list
3017+
and check_new_ir
30103018
):
30113019
if (
30123020
type(place) is paddle.fluid.libpaddle.CPUPlace

test/legacy_test/test_reduce_op.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1180,7 +1180,11 @@ def setUp(self):
11801180

11811181

11821182
def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False):
1183-
return paddle._C_ops.sum(x, axis, dtype, keepdim)
1183+
if paddle.in_dynamic_mode():
1184+
return paddle._C_ops.sum(x, axis, dtype, keepdim)
1185+
else:
1186+
if paddle.ir.core._use_new_ir_api():
1187+
return paddle._ir_ops.sum(x, axis, dtype, keepdim)
11841188

11851189

11861190
class Test8DReduce0(Test1DReduce):

test/mkldnn/test_reduce_bf16_mkldnn_op.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def setUp(self):
4040
self.attrs = {'use_mkldnn': self.use_mkldnn}
4141

4242
def test_check_output(self):
43-
self.check_output(check_dygraph=False)
43+
self.check_output(check_dygraph=False, check_new_ir=False)
4444

4545
def calculate_grads(self):
4646
tmp_tensor = np.zeros(self.x_fp32.shape).astype("float32")
@@ -84,6 +84,7 @@ def test_check_grad(self):
8484
check_dygraph=False,
8585
user_defined_grads=[self.grad_X],
8686
user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)],
87+
check_new_ir=False,
8788
)
8889

8990

test/mkldnn/test_reduce_mkldnn_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,12 @@ def setUp(self):
2929
self.attrs = {'use_mkldnn': self.use_mkldnn}
3030

3131
def test_check_output(self):
32-
self.check_output(check_dygraph=False)
32+
self.check_output(check_dygraph=False, check_new_ir=False)
3333

3434

3535
class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp):
3636
def test_check_grad(self):
37-
self.check_grad(['X'], 'Out', check_dygraph=False)
37+
self.check_grad(['X'], 'Out', check_dygraph=False, check_new_ir=False)
3838

3939

4040
class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp):

test/white_list/new_ir_python_api_grad_white_list.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,5 @@
1414

1515
new_ir_python_api_grad_white_list = [
1616
"mean",
17+
"reduce_sum",
1718
]

0 commit comments

Comments
 (0)