Skip to content

Commit dd1c7ee

Browse files
lidanqing-intelluotao1
authored andcommitted
UT for conv2d_mkldnn_op with fuse_bias and fuse_residual (#16016)
test=develop
1 parent 9aaea38 commit dd1c7ee

File tree

1 file changed

+118
-23
lines changed

1 file changed

+118
-23
lines changed

python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py

Lines changed: 118 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -15,44 +15,139 @@
1515
from __future__ import print_function
1616

1717
import unittest
18+
import numpy as np
1819

19-
from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1
20+
import paddle.fluid.core as core
21+
from paddle.fluid.tests.unittests.op_test import OpTest
22+
from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp
2023

2124

22-
class TestMKLDNN(TestConv2dOp):
23-
def init_kernel_type(self):
24-
self.use_mkldnn = True
25-
self.data_format = "NCHW"
25+
def conv2d_bias_naive(out, bias):
26+
_, out_c, _, _ = out.shape
2627

28+
for l in range(out_c):
29+
out[:, l, :, :] = out[:, l, :, :] + bias[l]
30+
return out
2731

28-
class TestMKLDNNWithPad(TestWithPad):
29-
def init_kernel_type(self):
30-
self.use_mkldnn = True
31-
self.data_format = "NCHW"
3232

33+
def conv2d_residual_naive(out, residual):
34+
assert out.shape == residual.shape
35+
out = np.add(out, residual)
36+
return out
3337

34-
class TestMKLDNNWithStride(TestWithStride):
35-
def init_kernel_type(self):
36-
self.use_mkldnn = True
37-
self.data_format = "NCHW"
3838

39+
class TestConv2dMKLDNNOp(TestConv2dOp):
40+
def init_group(self):
41+
self.groups = 1
3942

40-
class TestMKLDNNWithGroup(TestWithGroup):
4143
def init_kernel_type(self):
42-
self.use_mkldnn = True
4344
self.data_format = "NCHW"
45+
self.use_mkldnn = True
46+
self._cpu_only = True
4447

48+
def init_test_case(self):
49+
self.pad = [0, 0]
50+
self.stride = [1, 1]
51+
self.input_size = [2, 3, 5, 5] # NCHW
52+
assert np.mod(self.input_size[1], self.groups) == 0
53+
f_c = self.input_size[1] // self.groups
54+
self.filter_size = [6, f_c, 3, 3]
4555

46-
class TestMKLDNNWith1x1(TestWith1x1):
47-
def init_kernel_type(self):
48-
self.use_mkldnn = True
49-
self.data_format = "NCHW"
56+
def setUp(self):
57+
self.fuse_bias = False
58+
self.bias_size = None
59+
self.fuse_relu = False
60+
self.fuse_residual_connection = False
61+
self.input_residual_size = None
62+
TestConv2dOp.setUp(self)
5063

64+
output = self.outputs['Output']
5165

52-
class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
53-
def init_kernel_type(self):
54-
self.use_mkldnn = True
55-
self.data_format = "NCHW"
66+
#mkldnn only support either conv-sum-relu, or conv-relu.
67+
if self.fuse_bias and self.bias_size is not None:
68+
bias = np.random.random(self.bias_size).astype(self.dtype)
69+
output = conv2d_bias_naive(output, bias)
70+
output = output.astype(self.dtype)
71+
self.attrs['fuse_bias'] = self.fuse_bias
72+
self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias)
73+
74+
if self.fuse_residual_connection and self.input_residual_size is not None:
75+
input_residual = np.random.random(self.input_residual_size).astype(
76+
self.dtype)
77+
output = conv2d_residual_naive(output, input_residual)
78+
79+
self.attrs[
80+
'fuse_residual_connection'] = self.fuse_residual_connection
81+
self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype(
82+
input_residual)
83+
84+
if self.fuse_relu:
85+
output = np.maximum(output, 0).astype(self.dsttype)
86+
87+
output = output.astype(self.dtype)
88+
89+
self.attrs['fuse_bias'] = self.fuse_bias
90+
self.attrs['fuse_relu'] = self.fuse_relu
91+
self.attrs['fuse_residual_connection'] = self.fuse_residual_connection
92+
93+
self.outputs['Output'] = output
94+
95+
96+
class TestWithFuse(TestConv2dMKLDNNOp):
97+
def init_test_case(self):
98+
TestConv2dMKLDNNOp.init_test_case(self)
99+
self.pad = [1, 1]
100+
self.fuse_bias = True
101+
self.bias_size = [6]
102+
self.fuse_residual_connection = True
103+
self.input_residual_size = [2, 6, 5, 5]
104+
105+
def test_check_grad(self):
106+
pass
107+
108+
def test_check_grad_no_filter(self):
109+
pass
110+
111+
def test_check_grad_no_input(self):
112+
pass
113+
114+
115+
class TestWithPadWithBias(TestConv2dMKLDNNOp):
116+
def init_test_case(self):
117+
TestConv2dMKLDNNOp.init_test_case(self)
118+
self.pad = [1, 1]
119+
self.input_size = [2, 3, 6, 6]
120+
121+
122+
class TestWithStride(TestConv2dMKLDNNOp):
123+
def init_test_case(self):
124+
TestConv2dMKLDNNOp.init_test_case(self)
125+
self.pad = [1, 1]
126+
self.stride = [2, 2]
127+
self.input_size = [2, 3, 6, 6]
128+
129+
130+
class TestWithGroup(TestConv2dMKLDNNOp):
131+
def init_group(self):
132+
self.groups = 3
133+
134+
135+
class TestWith1x1(TestConv2dMKLDNNOp):
136+
def init_test_case(self):
137+
TestConv2dMKLDNNOp.init_test_case(self)
138+
self.filter_size = [6, 3, 1, 1]
139+
140+
141+
class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp):
142+
def init_test_case(self):
143+
TestConv2dMKLDNNOp.init_test_case(self)
144+
self.input_size = [2, 3, 1, 1] # NCHW
145+
assert np.mod(self.input_size[1], self.groups) == 0
146+
f_c = self.input_size[1] // self.groups
147+
self.filter_size = [6, f_c, 1, 1]
148+
149+
def init_group(self):
150+
self.groups = 3
56151

57152

58153
if __name__ == '__main__':

0 commit comments

Comments
 (0)