Skip to content

Commit cad7bd1

Browse files
committed
Merge branch 'develop' of https://github.com/baidu/Paddle into inference
2 parents 91d24c5 + b5901a3 commit cad7bd1

File tree

420 files changed

+18946
-5697
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

420 files changed

+18946
-5697
lines changed

.travis.yml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ addons:
3030
- automake
3131
- libtool
3232
- ccache
33+
ssh_known_hosts: 52.76.173.135
3334
before_install:
3435
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
3536
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
@@ -42,6 +43,14 @@ script:
4243
- |
4344
timeout 2580 paddle/scripts/travis/${JOB}.sh # 43min timeout
4445
RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else false; fi;
46+
- |
47+
if [[ "$JOB" != "build_doc" ]]; then exit 0; fi;
48+
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi;
49+
if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi;
50+
export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh
51+
export DOCS_DIR=`pwd`
52+
cd ..
53+
curl $DEPLOY_DOCS_SH | bash -s $CONTENT_DEC_PASSWD $TRAVIS_BRANCH $DOCS_DIR $DOCS_DIR/build/doc
4554
notifications:
4655
email:
4756
on_success: change

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ include(external/swig) # download, build, install swig
126126
include(external/warpctc) # download, build, install warpctc
127127
include(external/any) # download libn::any
128128
include(external/eigen) # download eigen3
129-
include(external/pybind11) # download pybind11
129+
include(external/pybind11) # download pybind11
130130
include(external/nccl)
131131

132132
include(cudnn) # set cudnn libraries, must before configure

benchmark/IntelOptimizedPaddle.md

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# Benchmark
2+
3+
Machine:
4+
5+
- Server
6+
- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz, 2 Sockets, 20 Cores per socket
7+
- Laptop
8+
- DELL XPS15-9560-R1745: i7-7700HQ 8G 256GSSD
9+
- i5 MacBook Pro (Retina, 13-inch, Early 2015)
10+
- Desktop
11+
- i7-6700k
12+
13+
System: CentOS release 6.3 (Final), Docker 1.12.1.
14+
15+
PaddlePaddle: paddlepaddle/paddle:latest (TODO: will rerun after 0.11.0)
16+
17+
- MKL-DNN tag v0.10
18+
- MKLML 2018.0.20170720
19+
- OpenBLAS v0.2.20
20+
21+
On each machine, we will test and compare the performance of training on single node using MKL-DNN / MKLML / OpenBLAS respectively.
22+
23+
## Benchmark Model
24+
25+
### Server
26+
Test on batch size 64, 128, 256 on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz
27+
28+
Input image size - 3 * 224 * 224, Time: images/second
29+
30+
- VGG-19
31+
32+
| BatchSize | 64 | 128 | 256 |
33+
|--------------|-------| -----| --------|
34+
| OpenBLAS | 7.82 | 8.62 | 10.34 |
35+
| MKLML | 11.02 | 12.86 | 15.33 |
36+
| MKL-DNN | 27.69 | 28.8 | 29.27 |
37+
38+
39+
chart on batch size 128
40+
TBD
41+
42+
- ResNet
43+
- GoogLeNet
44+
45+
### Laptop
46+
TBD
47+
### Desktop
48+
TBD

benchmark/paddle/image/resnet.py

Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,213 @@
1+
#!/usr/bin/env python
2+
from paddle.trainer_config_helpers import *
3+
4+
height = 224
5+
width = 224
6+
num_class = 1000
7+
batch_size = get_config_arg('batch_size', int, 64)
8+
layer_num = get_config_arg("layer_num", int, 50)
9+
is_test = get_config_arg("is_test", bool, False)
10+
11+
args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
12+
define_py_data_sources2(
13+
"train.list", None, module="provider", obj="process", args=args)
14+
15+
settings(
16+
batch_size=batch_size,
17+
learning_rate=0.01 / batch_size,
18+
learning_method=MomentumOptimizer(0.9),
19+
regularization=L2Regularization(0.0005 * batch_size))
20+
21+
22+
#######################Network Configuration #############
23+
def conv_bn_layer(name,
24+
input,
25+
filter_size,
26+
num_filters,
27+
stride,
28+
padding,
29+
channels=None,
30+
active_type=ReluActivation()):
31+
"""
32+
A wrapper for conv layer with batch normalization layers.
33+
Note:
34+
conv layer has no activation.
35+
"""
36+
37+
tmp = img_conv_layer(
38+
name=name + "_conv",
39+
input=input,
40+
filter_size=filter_size,
41+
num_channels=channels,
42+
num_filters=num_filters,
43+
stride=stride,
44+
padding=padding,
45+
act=LinearActivation(),
46+
bias_attr=False)
47+
return batch_norm_layer(
48+
name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test)
49+
50+
51+
def bottleneck_block(name, input, num_filters1, num_filters2):
52+
"""
53+
A wrapper for bottlenect building block in ResNet.
54+
Last conv_bn_layer has no activation.
55+
Addto layer has activation of relu.
56+
"""
57+
last_name = conv_bn_layer(
58+
name=name + '_branch2a',
59+
input=input,
60+
filter_size=1,
61+
num_filters=num_filters1,
62+
stride=1,
63+
padding=0)
64+
last_name = conv_bn_layer(
65+
name=name + '_branch2b',
66+
input=last_name,
67+
filter_size=3,
68+
num_filters=num_filters1,
69+
stride=1,
70+
padding=1)
71+
last_name = conv_bn_layer(
72+
name=name + '_branch2c',
73+
input=last_name,
74+
filter_size=1,
75+
num_filters=num_filters2,
76+
stride=1,
77+
padding=0,
78+
active_type=LinearActivation())
79+
80+
return addto_layer(
81+
name=name + "_addto", input=[input, last_name], act=ReluActivation())
82+
83+
84+
def mid_projection(name, input, num_filters1, num_filters2, stride=2):
85+
"""
86+
A wrapper for middile projection in ResNet.
87+
projection shortcuts are used for increasing dimensions,
88+
and other shortcuts are identity
89+
branch1: projection shortcuts are used for increasing
90+
dimensions, has no activation.
91+
branch2x: bottleneck building block, shortcuts are identity.
92+
"""
93+
# stride = 2
94+
branch1 = conv_bn_layer(
95+
name=name + '_branch1',
96+
input=input,
97+
filter_size=1,
98+
num_filters=num_filters2,
99+
stride=stride,
100+
padding=0,
101+
active_type=LinearActivation())
102+
103+
last_name = conv_bn_layer(
104+
name=name + '_branch2a',
105+
input=input,
106+
filter_size=1,
107+
num_filters=num_filters1,
108+
stride=stride,
109+
padding=0)
110+
last_name = conv_bn_layer(
111+
name=name + '_branch2b',
112+
input=last_name,
113+
filter_size=3,
114+
num_filters=num_filters1,
115+
stride=1,
116+
padding=1)
117+
118+
last_name = conv_bn_layer(
119+
name=name + '_branch2c',
120+
input=last_name,
121+
filter_size=1,
122+
num_filters=num_filters2,
123+
stride=1,
124+
padding=0,
125+
active_type=LinearActivation())
126+
127+
return addto_layer(
128+
name=name + "_addto", input=[branch1, last_name], act=ReluActivation())
129+
130+
131+
img = data_layer(name='image', size=height * width * 3)
132+
133+
134+
def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3):
135+
"""
136+
A wrapper for 50,101,152 layers of ResNet.
137+
res2_num: number of blocks stacked in conv2_x
138+
res3_num: number of blocks stacked in conv3_x
139+
res4_num: number of blocks stacked in conv4_x
140+
res5_num: number of blocks stacked in conv5_x
141+
"""
142+
# For ImageNet
143+
# conv1: 112x112
144+
tmp = conv_bn_layer(
145+
"conv1",
146+
input=img,
147+
filter_size=7,
148+
channels=3,
149+
num_filters=64,
150+
stride=2,
151+
padding=3)
152+
tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2)
153+
154+
# conv2_x: 56x56
155+
tmp = mid_projection(
156+
name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1)
157+
for i in xrange(2, res2_num + 1, 1):
158+
tmp = bottleneck_block(
159+
name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256)
160+
161+
# conv3_x: 28x28
162+
tmp = mid_projection(
163+
name="res3_1", input=tmp, num_filters1=128, num_filters2=512)
164+
for i in xrange(2, res3_num + 1, 1):
165+
tmp = bottleneck_block(
166+
name="res3_" + str(i),
167+
input=tmp,
168+
num_filters1=128,
169+
num_filters2=512)
170+
171+
# conv4_x: 14x14
172+
tmp = mid_projection(
173+
name="res4_1", input=tmp, num_filters1=256, num_filters2=1024)
174+
for i in xrange(2, res4_num + 1, 1):
175+
tmp = bottleneck_block(
176+
name="res4_" + str(i),
177+
input=tmp,
178+
num_filters1=256,
179+
num_filters2=1024)
180+
181+
# conv5_x: 7x7
182+
tmp = mid_projection(
183+
name="res5_1", input=tmp, num_filters1=512, num_filters2=2048)
184+
for i in xrange(2, res5_num + 1, 1):
185+
tmp = bottleneck_block(
186+
name="res5_" + str(i),
187+
input=tmp,
188+
num_filters1=512,
189+
num_filters2=2048)
190+
191+
tmp = img_pool_layer(
192+
name='avgpool',
193+
input=tmp,
194+
pool_size=7,
195+
stride=1,
196+
pool_type=AvgPooling())
197+
198+
return fc_layer(input=tmp, size=num_class, act=SoftmaxActivation())
199+
200+
201+
if layer_num == 50:
202+
resnet = deep_res_net(3, 4, 6, 3)
203+
elif layer_num == 101:
204+
resnet = deep_res_net(3, 4, 23, 3)
205+
elif layer_num == 152:
206+
resnet = deep_res_net(3, 8, 36, 3)
207+
else:
208+
print("Wrong layer number.")
209+
210+
lbl = data_layer(name="label", size=num_class)
211+
loss = cross_entropy(name='loss', input=resnet, label=lbl)
212+
inputs(img, lbl)
213+
outputs(loss)

benchmark/paddle/image/run_mkldnn.sh

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,23 @@ function train() {
55
export OMP_DYNAMIC="FALSE"
66
export KMP_AFFINITY="granularity=fine,compact,0,0"
77
topology=$1
8-
bs=$2
9-
use_mkldnn=$3
10-
if [ $3 == "True" ]; then
8+
layer_num=$2
9+
bs=$3
10+
use_mkldnn=$4
11+
if [ $4 == "True" ]; then
1112
thread=1
12-
log="logs/${topology}-mkldnn-${bs}.log"
13-
elif [ $3 == "False" ]; then
13+
log="logs/${topology}-${layer_num}-mkldnn-${bs}.log"
14+
elif [ $4 == "False" ]; then
1415
thread=`nproc`
1516
# each trainer_count use only 1 core to avoid conflict
1617
export OMP_NUM_THREADS=1
1718
export MKL_NUM_THREADS=1
18-
log="logs/${topology}-${thread}mklml-${bs}.log"
19+
log="logs/${topology}-${layer_num}-${thread}mklml-${bs}.log"
1920
else
2021
echo "Wrong input $3, use True or False."
2122
exit 0
2223
fi
23-
args="batch_size=${bs}"
24+
args="batch_size=${bs},layer_num=${layer_num}"
2425
config="${topology}.py"
2526
paddle train --job=time \
2627
--config=$config \
@@ -40,12 +41,9 @@ if [ ! -d "logs" ]; then
4041
mkdir logs
4142
fi
4243

43-
#========== mkldnn ==========#
44-
train vgg 64 True
45-
train vgg 128 True
46-
train vgg 256 True
47-
48-
#========== mklml ===========#
49-
train vgg 64 False
50-
train vgg 128 False
51-
train vgg 256 False
44+
for use_mkldnn in True False; do
45+
for batchsize in 64 128 256; do
46+
train vgg 19 $batchsize $use_mkldnn
47+
train resnet 50 $batchsize $use_mkldnn
48+
done
49+
done

benchmark/paddle/image/vgg.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
settings(
1515
batch_size=batch_size,
16-
learning_rate=0.01 / batch_size,
16+
learning_rate=0.001 / batch_size,
1717
learning_method=MomentumOptimizer(0.9),
1818
regularization=L2Regularization(0.0005 * batch_size))
1919

0 commit comments

Comments
 (0)