Skip to content

Commit c02f773

Browse files
author
Yancey
authored
Fix dist demo var type error (#8600)
* Fix dist demo error * revert trainer_id
1 parent decaad5 commit c02f773

10 files changed

+12
-8
lines changed

python/paddle/fluid/framework.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -784,6 +784,7 @@ def rename_var(self, name, new_name):
784784
elif type(v) == Variable:
785785
var = Variable(
786786
self,
787+
type=v.type,
787788
name=new_name,
788789
error_clip=error_clip,
789790
stop_gradient=stop_gradient)

python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
# run as trainer or parameter server
4949
training_role = os.getenv("TRAINING_ROLE",
5050
"TRAINER") # get the training role: trainer/pserver
51+
5152
t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
5253

5354
if training_role == "PSERVER":
@@ -65,8 +66,6 @@
6566

6667
PASS_NUM = 100
6768
for pass_id in range(PASS_NUM):
68-
fluid.io.save_persistables(exe, "./fit_a_line.model/")
69-
fluid.io.load_persistables(exe, "./fit_a_line.model/")
7069
for data in train_reader():
7170
avg_loss_value = exe.run(trainer_prog,
7271
feed=feeder.feed(data),

python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ def conv_block(input, num_filter, groups, dropouts):
138138
# run as trainer or parameter server
139139
training_role = os.getenv("TRAINING_ROLE",
140140
"TRAINER") # get the training role: trainer/pserver
141+
141142
t.transpile(
142143
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS)
143144

python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,7 @@ def main():
191191
# run as trainer or parameter server
192192
training_role = os.getenv(
193193
"TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver
194+
194195
t.transpile(
195196
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
196197

python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@
8282
# run as trainer or parameter server
8383
training_role = os.getenv("TRAINING_ROLE",
8484
"TRAINER") # get the training role: trainer/pserver
85+
8586
t.transpile(
8687
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS)
8788
if training_role == "PSERVER":
@@ -97,9 +98,10 @@
9798
feed_list=[first_word, second_word, third_word, forth_word, next_word],
9899
place=place)
99100
exe.run(fluid.default_startup_program())
101+
trainer_prog = t.get_trainer_program()
100102
for pass_id in range(PASS_NUM):
101103
for data in train_reader():
102-
avg_cost_np = exe.run(t.get_trainer_program(),
104+
avg_cost_np = exe.run(trainer_prog,
103105
feed=feeder.feed(data),
104106
fetch_list=[avg_cost])
105107
print("avg_cost_np", avg_cost_np)

python/paddle/fluid/tests/book_distribute/notest_machine_translation.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ def main():
115115
# run as trainer or parameter server
116116
training_role = os.getenv(
117117
"TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver
118+
118119
t.transpile(
119120
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
120121

python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,11 +64,7 @@
6464

6565
t = fluid.DistributeTranspiler()
6666
t.transpile(
67-
optimize_ops,
68-
params_grads,
69-
0,
70-
pservers=pserver_endpoints,
71-
trainers=trainers)
67+
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=trainers)
7268

7369
if training_role == "PSERVER":
7470
pserver_prog = t.get_pserver_program(current_endpoint)

python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,7 @@ def main():
171171
current_endpoint = os.getenv("SERVER_ENDPOINT")
172172
# run as trainer or parameter server
173173
training_role = os.getenv("TRAINING_ROLE", "TRAINER")
174+
174175
t.transpile(
175176
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
176177

python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ def main():
9090
# run as trainer or parameter server
9191
training_role = os.getenv(
9292
"TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver
93+
9394
t.transpile(
9495
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
9596

python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ def main():
102102
# run as trainer or parameter server
103103
training_role = os.getenv(
104104
"TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver
105+
105106
t.transpile(
106107
optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2)
107108

0 commit comments

Comments
 (0)