Skip to content

Commit 3eb42bf

Browse files
committed
move test_CompareMKLDNNandCPU to test_MKLDNN and remove unused code
1 parent 56f6e23 commit 3eb42bf

12 files changed

+197
-384
lines changed

paddle/gserver/tests/MKLDNNTester.cpp

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -521,12 +521,16 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
521521
gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN);
522522
// save forward result
523523
for (size_t k = 0; k < outArgs.size(); k++) {
524-
MatrixPtr value = Matrix::create(outArgs[k].value->getHeight(),
525-
outArgs[k].value->getWidth(),
526-
false,
527-
false);
528-
value->copyFrom(*outArgs[k].value);
529-
out.outValues.push_back(value);
524+
const MatrixPtr& src = outArgs[k].value;
525+
MatrixPtr dst =
526+
Matrix::create(src->getHeight(), src->getWidth(), false, false);
527+
if (typeid(*src) == typeid(MKLDNNMatrix)) {
528+
MKLDNNMatrixPtr dnnSrc = std::dynamic_pointer_cast<MKLDNNMatrix>(src);
529+
dnnSrc->copyTo(*dst);
530+
} else {
531+
dst->copyFrom(*src);
532+
}
533+
out.outValues.push_back(dst);
530534
}
531535

532536
// random backward input
@@ -559,9 +563,9 @@ void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
559563
}
560564
}
561565

562-
void MKLDNNTester::runBranchesTest(const std::string& configPath,
563-
size_t iter,
564-
float eps) {
566+
void MKLDNNTester::runNetTest(const std::string& configPath,
567+
size_t iter,
568+
float eps) {
565569
DataIn in;
566570
initArgument(in, configPath, iter);
567571
DataOut outCpu, outDnn;

paddle/gserver/tests/MKLDNNTester.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -85,17 +85,17 @@ class MKLDNNTester {
8585
bool printDetails = false,
8686
size_t iter = 3,
8787
float epsilon = 1e-4);
88-
static void runBranchesTest(const std::string& configPath,
89-
size_t iter = 3,
90-
float eps = 1e-4);
88+
static void runNetTest(const std::string& configPath,
89+
size_t iter = 2,
90+
float eps = 1e-4);
9191
static void initArgument(DataIn& data,
9292
const std::string& configPath,
93-
size_t iter = 3);
93+
size_t iter = 2);
9494
static void getOutResult(const std::string& configPath,
9595
DataIn& in,
9696
DataOut& out,
9797
bool use_mkldnn,
98-
size_t iter = 3);
98+
size_t iter = 2);
9999

100100
private:
101101
void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from paddle.trainer_config_helpers import *
16+
17+
settings(batch_size=16)
18+
channels = get_config_arg("channels", int, 2)
19+
20+
def two_conv(input, group_name):
21+
out1 = img_conv_layer(input=input,
22+
name=group_name+'_conv1_',
23+
filter_size=1,
24+
num_filters=channels,
25+
padding=0,
26+
shared_biases=True,
27+
act=ReluActivation())
28+
29+
out2 = img_conv_layer(input=input,
30+
name=group_name+'_conv2_',
31+
filter_size=3,
32+
num_filters=channels,
33+
padding=1,
34+
shared_biases=True,
35+
act=ReluActivation())
36+
return out1, out2
37+
38+
def two_conv_bn(input, group_name):
39+
out1, out2 = two_conv(input, group_name)
40+
out1 = batch_norm_layer(input=out1,
41+
name=group_name+'_bn1_',
42+
use_global_stats=False,
43+
act=ReluActivation())
44+
45+
out2 = batch_norm_layer(input=out2,
46+
name=group_name+'_bn2_',
47+
use_global_stats=False,
48+
act=ReluActivation())
49+
return out1, out2
50+
51+
def two_conv_pool(input, group_name):
52+
out1, out2 = two_conv(input, group_name)
53+
out1 = img_pool_layer(input=out1,
54+
name=group_name+'_pool1_',
55+
pool_size=3,
56+
stride=2,
57+
padding=0,
58+
pool_type=MaxPooling())
59+
60+
out2 = img_pool_layer(input=out2,
61+
name=group_name+'_pool2_',
62+
pool_size=5,
63+
stride=2,
64+
padding=1,
65+
pool_type=MaxPooling())
66+
return out1, out2
67+
68+
def two_fc(input, group_name):
69+
out1 = fc_layer(input=input,
70+
name=group_name+'_fc1_',
71+
size=channels,
72+
bias_attr=False,
73+
act=LinearActivation())
74+
75+
out2 = fc_layer(input=input,
76+
name=group_name+'_fc2_',
77+
size=channels,
78+
bias_attr=False,
79+
act=LinearActivation())
80+
return out1, out2
81+
82+
data = data_layer(name ="input", size=channels*16*16)
83+
84+
tmp = img_conv_layer(input=data,
85+
num_channels=channels,
86+
filter_size=3,
87+
num_filters=channels,
88+
padding=1,
89+
shared_biases=True,
90+
act=ReluActivation())
91+
92+
a1, a2 = two_conv(tmp, 'conv_branch')
93+
tmp = addto_layer(input=[a1, a2],
94+
act=ReluActivation(),
95+
bias_attr=False)
96+
97+
tmp = img_pool_layer(input=tmp,
98+
pool_size=3,
99+
stride=2,
100+
padding=1,
101+
pool_type=AvgPooling())
102+
103+
b1, b2 = two_conv_pool(tmp, 'pool_branch')
104+
tmp = concat_layer(input=[b1, b2])
105+
106+
tmp = img_pool_layer(input=tmp,
107+
num_channels=channels*2,
108+
pool_size=3,
109+
stride=2,
110+
padding=1,
111+
pool_type=MaxPooling())
112+
113+
tmp = img_conv_layer(input=tmp,
114+
filter_size=3,
115+
num_filters=channels,
116+
padding=1,
117+
stride=2,
118+
shared_biases=True,
119+
act=LinearActivation(),
120+
bias_attr=False)
121+
122+
tmp = batch_norm_layer(input=tmp,
123+
use_global_stats=False,
124+
act=ReluActivation())
125+
126+
c1, c2 = two_conv_bn(tmp, 'bn_branch')
127+
tmp = addto_layer(input=[c1, c2],
128+
act=ReluActivation(),
129+
bias_attr=False)
130+
131+
tmp = fc_layer(input=tmp, size=channels,
132+
bias_attr=True,
133+
act=ReluActivation())
134+
135+
d1, d2 = two_fc(tmp, 'fc_branch')
136+
tmp = addto_layer(input=[d1, d2])
137+
138+
out = fc_layer(input=tmp, size=10,
139+
bias_attr=True,
140+
act=SoftmaxActivation())
141+
142+
outputs(out)

paddle/gserver/tests/mkldnn_branches_fc.conf

Lines changed: 0 additions & 58 deletions
This file was deleted.

paddle/gserver/tests/mkldnn_branches_pool.conf

Lines changed: 0 additions & 60 deletions
This file was deleted.

paddle/gserver/tests/mkldnn_branches_conv.conf renamed to paddle/gserver/tests/mkldnn_simple_net.conf

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -17,40 +17,48 @@ from paddle.trainer_config_helpers import *
1717
settings(batch_size=16)
1818
channels = get_config_arg("channels", int, 2)
1919

20-
def two_conv(input, group_name):
21-
out1 = img_conv_layer(input=input,
22-
name=group_name+'_conv1',
23-
filter_size=1,
24-
num_filters=channels,
25-
padding=0,
26-
shared_biases=True,
27-
act=ReluActivation())
20+
data = data_layer(name ="input", size=channels*16*16)
2821

29-
out2 = img_conv_layer(input=input,
30-
name=group_name+'_conv2',
22+
tmp = img_conv_layer(input=data,
23+
num_channels=channels,
3124
filter_size=3,
3225
num_filters=channels,
3326
padding=1,
3427
shared_biases=True,
3528
act=ReluActivation())
36-
return out1, out2
3729

38-
data = data_layer(name ="input", size=channels*16*16)
30+
tmp = img_pool_layer(input=tmp,
31+
pool_size=3,
32+
stride=1,
33+
padding=0,
34+
pool_type=AvgPooling())
3935

40-
conv = img_conv_layer(input=data,
41-
num_channels=channels,
36+
tmp = img_conv_layer(input=tmp,
4237
filter_size=3,
4338
num_filters=channels,
4439
padding=1,
4540
shared_biases=True,
46-
act=ReluActivation())
41+
act=LinearActivation(),
42+
bias_attr=False)
4743

48-
a1, a2 = two_conv(input=conv, group_name='a')
44+
tmp = batch_norm_layer(input=tmp,
45+
use_global_stats=False,
46+
act=ReluActivation())
4947

50-
concat = concat_layer(input=[a1, a2])
48+
tmp = img_pool_layer(input=tmp,
49+
pool_size=3,
50+
stride=2,
51+
padding=1,
52+
pool_type=MaxPooling())
5153

52-
b1, b2 = two_conv(input=conv, group_name='b')
54+
tmp = fc_layer(input=tmp,
55+
size=channels,
56+
bias_attr=False,
57+
act=ReluActivation())
5358

54-
addto = addto_layer(input=[b1, b2])
59+
out = fc_layer(input=tmp,
60+
size=10,
61+
bias_attr=True,
62+
act=SoftmaxActivation())
5563

56-
outputs([concat, addto])
64+
outputs(out)

0 commit comments

Comments
 (0)