Skip to content

Commit 7905e36

Browse files
authored
Implement Book chapter 02 in distributed framework (#7629)
1 parent 2360dd2 commit 7905e36

File tree

1 file changed

+87
-0
lines changed

1 file changed

+87
-0
lines changed
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2+
#
3+
#Licensed under the Apache License, Version 2.0 (the "License");
4+
#you may not use this file except in compliance with the License.
5+
#You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
#Unless required by applicable law or agreed to in writing, software
10+
#distributed under the License is distributed on an "AS IS" BASIS,
11+
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
#See the License for the specific language governing permissions and
13+
#limitations under the License.
14+
from __future__ import print_function
15+
import numpy as np
16+
import paddle.v2 as paddle
17+
import paddle.v2.fluid as fluid
18+
import os
19+
20+
BATCH_SIZE = 128
21+
PASS_NUM = 100
22+
23+
images = fluid.layers.data(name='x', shape=[784], dtype='float32')
24+
25+
# TODO(aroraabhinav) Add regularization and error clipping after
26+
# Issue 7432(https://github.com/PaddlePaddle/Paddle/issues/7432) is resolved.
27+
hidden1 = fluid.layers.fc(input=images, size=128, act='relu')
28+
hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
29+
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
30+
31+
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
32+
33+
cost = fluid.layers.cross_entropy(input=predict, label=label)
34+
avg_cost = fluid.layers.mean(x=cost)
35+
36+
optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
37+
optimize_ops, params_grads = optimizer.minimize(avg_cost)
38+
39+
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
40+
41+
train_reader = paddle.batch(
42+
paddle.reader.shuffle(
43+
paddle.dataset.mnist.train(), buf_size=8192),
44+
batch_size=BATCH_SIZE)
45+
46+
place = fluid.CPUPlace()
47+
exe = fluid.Executor(place)
48+
49+
t = fluid.DistributeTranspiler()
50+
# all parameter server endpoints list for spliting parameters
51+
pserver_endpoints = os.getenv("PSERVERS")
52+
# server endpoint for current node
53+
current_endpoint = os.getenv("SERVER_ENDPOINT")
54+
# run as trainer or parameter server
55+
training_role = os.getenv("TRAINING_ROLE",
56+
"TRAINER") # get the training role: trainer/pserver
57+
t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
58+
59+
if training_role == "PSERVER":
60+
if not current_endpoint:
61+
print("need env SERVER_ENDPOINT")
62+
exit(1)
63+
pserver_prog = t.get_pserver_program(current_endpoint, optimize_ops)
64+
exe.run(fluid.default_startup_program())
65+
exe.run(pserver_prog)
66+
elif training_role == "TRAINER":
67+
trainer_prog = t.get_trainer_program()
68+
feeder = fluid.DataFeeder(feed_list=[images, label], place=place)
69+
exe.run(fluid.default_startup_program())
70+
71+
for pass_id in range(PASS_NUM):
72+
accuracy.reset(exe)
73+
batch_id = 0
74+
for data in train_reader():
75+
loss, acc = exe.run(trainer_prog,
76+
feed=feeder.feed(data),
77+
fetch_list=[avg_cost] + accuracy.metrics)
78+
pass_acc = accuracy.eval(exe)
79+
if batch_id % 100 == 0:
80+
print("batch_id %d, loss: %f, acc: %f" %
81+
(batch_id, loss, pass_acc))
82+
batch_id += 1
83+
84+
pass_acc = accuracy.eval(exe)
85+
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc))
86+
else:
87+
print("environment var TRAINER_ROLE should be TRAINER os PSERVER")

0 commit comments

Comments
 (0)