Skip to content

Commit 2e74cf4

Browse files
authored
Merge pull request #5273 from tensor-tang/merge
refine mkldnn unit test
2 parents 8401039 + 1e8300f commit 2e74cf4

12 files changed

+220
-402
lines changed

paddle/gserver/tests/MKLDNNTester.cpp

Lines changed: 30 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -273,31 +273,37 @@ void MKLDNNTester::printVector(const VectorPtr& v) {
273273
VLOG(MKLDNN_ALL) << std::endl << ostr.str();
274274
}
275275

276-
double MKLDNNTester::getDelta(const real* d1,
277-
const real* d2,
276+
double MKLDNNTester::getDelta(const real* refer,
277+
const real* value,
278278
size_t len,
279279
const float failRate,
280280
const float thres) {
281281
double delta = 0, sum = 0;
282282
int failCnt = 0;
283283
const double eps = 1e-5;
284-
double maxOut = 0;
284+
double maxRatio = 0;
285285
for (size_t i = 0; i < len; ++i) {
286-
double ref = fabs(d2[i]);
287-
double diff = fabs(d1[i] - d2[i]);
286+
double ref = fabs(refer[i]);
287+
double val = fabs(value[i]);
288+
double diff = fabs(refer[i] - value[i]);
288289
delta += diff;
289290
sum += ref;
290-
if (ref > eps && fabs(d1[i]) > eps && diff / ref > thres) {
291-
maxOut = std::max(maxOut, diff / ref);
291+
if (ref < eps && val < eps) { // both values are very small
292+
continue;
293+
}
294+
double ratio = diff / ref;
295+
if (ratio > thres) {
296+
maxRatio = std::max(maxRatio, ratio);
292297
failCnt++;
293298
}
294299
}
295-
EXPECT_TRUE(std::isnormal(sum));
296300
EXPECT_FALSE(std::isinf(sum));
301+
EXPECT_FALSE(std::isnan(sum));
297302
EXPECT_FALSE(std::isnan(delta));
298303
VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len
299304
<< ", delta: " << delta / sum << ", failCnt:" << failCnt;
300-
return (failCnt / (float)len) > failRate ? maxOut : delta / sum;
305+
double res = sum > eps ? delta / sum : eps;
306+
return (failCnt / (float)len) > failRate ? maxRatio : res;
301307
}
302308

303309
double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
@@ -515,12 +521,16 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
515521
gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN);
516522
// save forward result
517523
for (size_t k = 0; k < outArgs.size(); k++) {
518-
MatrixPtr value = Matrix::create(outArgs[k].value->getHeight(),
519-
outArgs[k].value->getWidth(),
520-
false,
521-
false);
522-
value->copyFrom(*outArgs[k].value);
523-
out.outValues.push_back(value);
524+
const MatrixPtr& src = outArgs[k].value;
525+
MatrixPtr dst =
526+
Matrix::create(src->getHeight(), src->getWidth(), false, false);
527+
if (typeid(*src) == typeid(MKLDNNMatrix)) {
528+
MKLDNNMatrixPtr dnnSrc = std::dynamic_pointer_cast<MKLDNNMatrix>(src);
529+
dnnSrc->copyTo(*dst);
530+
} else {
531+
dst->copyFrom(*src);
532+
}
533+
out.outValues.push_back(dst);
524534
}
525535

526536
// random backward input
@@ -543,19 +553,19 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
543553
void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
544554
CHECK_EQ(ref.outValues.size(), dnn.outValues.size());
545555
CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size());
546-
VLOG(MKLDNN_TESTS) << "compare value size: " << ref.outValues.size();
547556
for (size_t i = 0; i < ref.outValues.size(); i++) {
557+
VLOG(MKLDNN_TESTS) << "compare value index: " << i;
548558
EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps);
549559
}
550-
VLOG(MKLDNN_TESTS) << "compare param size: " << ref.outValues.size();
551560
for (size_t i = 0; i < ref.paraValues.size(); i++) {
561+
VLOG(MKLDNN_TESTS) << "compare param index: " << i;
552562
EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps);
553563
}
554564
}
555565

556-
void MKLDNNTester::runBranchesTest(const std::string& configPath,
557-
size_t iter,
558-
float eps) {
566+
void MKLDNNTester::runNetTest(const std::string& configPath,
567+
size_t iter,
568+
float eps) {
559569
DataIn in;
560570
initArgument(in, configPath, iter);
561571
DataOut outCpu, outDnn;

paddle/gserver/tests/MKLDNNTester.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -85,17 +85,17 @@ class MKLDNNTester {
8585
bool printDetails = false,
8686
size_t iter = 3,
8787
float epsilon = 1e-4);
88-
static void runBranchesTest(const std::string& configPath,
89-
size_t iter = 3,
90-
float eps = 1e-4);
88+
static void runNetTest(const std::string& configPath,
89+
size_t iter = 2,
90+
float eps = 1e-4);
9191
static void initArgument(DataIn& data,
9292
const std::string& configPath,
93-
size_t iter = 3);
93+
size_t iter = 2);
9494
static void getOutResult(const std::string& configPath,
9595
DataIn& in,
9696
DataOut& out,
9797
bool use_mkldnn,
98-
size_t iter = 3);
98+
size_t iter = 2);
9999

100100
private:
101101
void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
@@ -128,13 +128,13 @@ class MKLDNNTester {
128128

129129
/**
130130
* Get delta percent
131-
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the
132-
* max(diff/ref)
133-
* else return sum(abs(a-b)) / sum(abs(b))
131+
* if many(>failRate) wrong(abs(val-ref)/abs(ref) > thres) points
132+
* return the max(diff/ref)
133+
* else return sum(abs(diff)) / sum(abs(ref))
134134
* The return value should be smaller than eps when passing.
135135
*/
136-
static double getDelta(const real* d1,
137-
const real* d2,
136+
static double getDelta(const real* refer,
137+
const real* value,
138138
size_t len,
139139
const float failRate = 1e-3,
140140
const float thres = 0.1);
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from paddle.trainer_config_helpers import *
16+
17+
settings(batch_size=16)
18+
channels = get_config_arg("channels", int, 2)
19+
20+
def two_conv(input, group_name):
21+
out1 = img_conv_layer(input=input,
22+
name=group_name+'_conv1_',
23+
filter_size=1,
24+
num_filters=channels,
25+
padding=0,
26+
shared_biases=True,
27+
act=ReluActivation())
28+
29+
out2 = img_conv_layer(input=input,
30+
name=group_name+'_conv2_',
31+
filter_size=3,
32+
num_filters=channels,
33+
padding=1,
34+
shared_biases=True,
35+
act=ReluActivation())
36+
return out1, out2
37+
38+
def two_conv_bn(input, group_name):
39+
out1, out2 = two_conv(input, group_name)
40+
out1 = batch_norm_layer(input=out1,
41+
name=group_name+'_bn1_',
42+
use_global_stats=False,
43+
act=ReluActivation())
44+
45+
out2 = batch_norm_layer(input=out2,
46+
name=group_name+'_bn2_',
47+
use_global_stats=False,
48+
act=ReluActivation())
49+
return out1, out2
50+
51+
def two_conv_pool(input, group_name):
52+
out1, out2 = two_conv(input, group_name)
53+
out1 = img_pool_layer(input=out1,
54+
name=group_name+'_pool1_',
55+
pool_size=3,
56+
stride=2,
57+
padding=0,
58+
pool_type=MaxPooling())
59+
60+
out2 = img_pool_layer(input=out2,
61+
name=group_name+'_pool2_',
62+
pool_size=5,
63+
stride=2,
64+
padding=1,
65+
pool_type=MaxPooling())
66+
return out1, out2
67+
68+
def two_fc(input, group_name):
69+
out1 = fc_layer(input=input,
70+
name=group_name+'_fc1_',
71+
size=channels,
72+
bias_attr=False,
73+
act=LinearActivation())
74+
75+
out2 = fc_layer(input=input,
76+
name=group_name+'_fc2_',
77+
size=channels,
78+
bias_attr=False,
79+
act=LinearActivation())
80+
return out1, out2
81+
82+
data = data_layer(name ="input", size=channels*16*16)
83+
84+
tmp = img_conv_layer(input=data,
85+
num_channels=channels,
86+
filter_size=3,
87+
num_filters=channels,
88+
padding=1,
89+
shared_biases=True,
90+
act=ReluActivation())
91+
92+
a1, a2 = two_conv(tmp, 'conv_branch')
93+
tmp = addto_layer(input=[a1, a2],
94+
act=ReluActivation(),
95+
bias_attr=False)
96+
97+
tmp = img_pool_layer(input=tmp,
98+
pool_size=3,
99+
stride=2,
100+
padding=1,
101+
pool_type=AvgPooling())
102+
103+
b1, b2 = two_conv_pool(tmp, 'pool_branch')
104+
tmp = concat_layer(input=[b1, b2])
105+
106+
tmp = img_pool_layer(input=tmp,
107+
num_channels=channels*2,
108+
pool_size=3,
109+
stride=2,
110+
padding=1,
111+
pool_type=MaxPooling())
112+
113+
tmp = img_conv_layer(input=tmp,
114+
filter_size=3,
115+
num_filters=channels,
116+
padding=1,
117+
stride=2,
118+
shared_biases=True,
119+
act=LinearActivation(),
120+
bias_attr=False)
121+
122+
tmp = batch_norm_layer(input=tmp,
123+
use_global_stats=False,
124+
act=ReluActivation())
125+
126+
c1, c2 = two_conv_bn(tmp, 'bn_branch')
127+
tmp = addto_layer(input=[c1, c2],
128+
act=ReluActivation(),
129+
bias_attr=False)
130+
131+
tmp = fc_layer(input=tmp, size=channels,
132+
bias_attr=True,
133+
act=ReluActivation())
134+
135+
d1, d2 = two_fc(tmp, 'fc_branch')
136+
tmp = addto_layer(input=[d1, d2])
137+
138+
out = fc_layer(input=tmp, size=10,
139+
bias_attr=True,
140+
act=SoftmaxActivation())
141+
142+
outputs(out)

paddle/gserver/tests/mkldnn_branches_fc.conf

Lines changed: 0 additions & 58 deletions
This file was deleted.

paddle/gserver/tests/mkldnn_branches_pool.conf

Lines changed: 0 additions & 60 deletions
This file was deleted.

0 commit comments

Comments
 (0)