Skip to content

Commit 2ac46d5

Browse files
authored
Add distribution implement of image classification. (#7687)
Add distribution implement of image classification
1 parent 2ad6c8b commit 2ac46d5

File tree

1 file changed

+173
-0
lines changed

1 file changed

+173
-0
lines changed
Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
#Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2+
#
3+
#Licensed under the Apache License, Version 2.0 (the "License");
4+
#you may not use this file except in compliance with the License.
5+
#You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
#Unless required by applicable law or agreed to in writing, software
10+
#distributed under the License is distributed on an "AS IS" BASIS,
11+
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
#See the License for the specific language governing permissions and
13+
#limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import sys
18+
19+
import paddle.v2 as paddle
20+
import paddle.v2.fluid as fluid
21+
import os
22+
import sys
23+
24+
TRAINERS = 5
25+
BATCH_SIZE = 128
26+
PASS_NUM = 100
27+
28+
29+
def resnet_cifar10(input, depth=32):
30+
def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'):
31+
tmp = fluid.layers.conv2d(
32+
input=input,
33+
filter_size=filter_size,
34+
num_filters=ch_out,
35+
stride=stride,
36+
padding=padding,
37+
act=None,
38+
bias_attr=False)
39+
return fluid.layers.batch_norm(input=tmp, act=act)
40+
41+
def shortcut(input, ch_in, ch_out, stride):
42+
if ch_in != ch_out:
43+
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
44+
else:
45+
return input
46+
47+
def basicblock(input, ch_in, ch_out, stride):
48+
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
49+
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None)
50+
short = shortcut(input, ch_in, ch_out, stride)
51+
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
52+
53+
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
54+
tmp = block_func(input, ch_in, ch_out, stride)
55+
for i in range(1, count):
56+
tmp = block_func(tmp, ch_out, ch_out, 1)
57+
return tmp
58+
59+
assert (depth - 2) % 6 == 0
60+
n = (depth - 2) / 6
61+
conv1 = conv_bn_layer(
62+
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
63+
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
64+
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
65+
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
66+
pool = fluid.layers.pool2d(
67+
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
68+
return pool
69+
70+
71+
def vgg16_bn_drop(input):
72+
def conv_block(input, num_filter, groups, dropouts):
73+
return fluid.nets.img_conv_group(
74+
input=input,
75+
pool_size=2,
76+
pool_stride=2,
77+
conv_num_filter=[num_filter] * groups,
78+
conv_filter_size=3,
79+
conv_act='relu',
80+
conv_with_batchnorm=True,
81+
conv_batchnorm_drop_rate=dropouts,
82+
pool_type='max')
83+
84+
conv1 = conv_block(input, 64, 2, [0.3, 0])
85+
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
86+
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
87+
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
88+
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
89+
90+
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
91+
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
92+
bn = fluid.layers.batch_norm(input=fc1, act='relu')
93+
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
94+
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
95+
return fc2
96+
97+
98+
classdim = 10
99+
data_shape = [3, 32, 32]
100+
101+
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
102+
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
103+
104+
net_type = "vgg"
105+
if len(sys.argv) >= 2:
106+
net_type = sys.argv[1]
107+
108+
if net_type == "vgg":
109+
print("train vgg net")
110+
net = vgg16_bn_drop(images)
111+
elif net_type == "resnet":
112+
print("train resnet")
113+
net = resnet_cifar10(images, 32)
114+
else:
115+
raise ValueError("%s network is not supported" % net_type)
116+
117+
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
118+
cost = fluid.layers.cross_entropy(input=predict, label=label)
119+
avg_cost = fluid.layers.mean(x=cost)
120+
121+
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
122+
optimize_ops, params_grads = optimizer.minimize(avg_cost)
123+
124+
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
125+
126+
train_reader = paddle.batch(
127+
paddle.reader.shuffle(
128+
paddle.dataset.cifar.train10(), buf_size=128 * 10),
129+
batch_size=BATCH_SIZE)
130+
131+
place = fluid.CPUPlace()
132+
exe = fluid.Executor(place)
133+
134+
t = fluid.DistributeTranspiler()
135+
# all parameter server endpoints list for spliting parameters
136+
pserver_endpoints = os.getenv("PSERVERS")
137+
# server endpoint for current node
138+
current_endpoint = os.getenv("SERVER_ENDPOINT")
139+
# run as trainer or parameter server
140+
training_role = os.getenv("TRAINING_ROLE",
141+
"TRAINER") # get the training role: trainer/pserver
142+
t.transpile(
143+
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS)
144+
145+
if training_role == "PSERVER":
146+
if not current_endpoint:
147+
print("need env SERVER_ENDPOINT")
148+
exit(1)
149+
print("start pserver at:", current_endpoint)
150+
pserver_prog = t.get_pserver_program(current_endpoint)
151+
pserver_startup = t.get_startup_program(current_endpoint, pserver_prog)
152+
exe.run(pserver_startup)
153+
exe.run(pserver_prog)
154+
print("pserver run end")
155+
elif training_role == "TRAINER":
156+
print("start trainer")
157+
trainer_prog = t.get_trainer_program()
158+
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
159+
exe.run(fluid.default_startup_program())
160+
for pass_id in range(PASS_NUM):
161+
accuracy.reset(exe)
162+
for data in train_reader():
163+
loss, acc = exe.run(trainer_prog,
164+
feed=feeder.feed(data),
165+
fetch_list=[avg_cost] + accuracy.metrics)
166+
pass_acc = accuracy.eval(exe)
167+
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
168+
pass_acc))
169+
# this model is slow, so if we can train two mini batch, we think it works properly.
170+
print("trainer run end")
171+
else:
172+
print("environment var TRAINER_ROLE should be TRAINER os PSERVER")
173+
exit(1)

0 commit comments

Comments
 (0)