Skip to content

Commit 0acc93c

Browse files
author
Yancey
authored
add dist transpiler unit test (#10485)
* add dist transpiler unit test * update by comment * update by comment * fix ci * fix ci
1 parent a94aa4d commit 0acc93c

File tree

3 files changed

+116
-2
lines changed

3 files changed

+116
-2
lines changed
Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
17+
import paddle.fluid as fluid
18+
import paddle.fluid.core as core
19+
import paddle.fluid.layers as layers
20+
from paddle.fluid.transpiler.distribute_transpiler import delete_ops
21+
import numpy
22+
23+
24+
class TestDistTranspiler(unittest.TestCase):
25+
def setUp(self):
26+
self.trainer_id = 0
27+
self.trainers = 2
28+
self.pservers = 2
29+
self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
30+
self.current_pserver_ep = "127.0.0.1:6174"
31+
32+
def net_conf(self):
33+
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
34+
35+
y_predict = fluid.layers.fc(input=x,
36+
size=1000,
37+
act=None,
38+
param_attr=fluid.ParamAttr(name='fc_w'))
39+
40+
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
41+
42+
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
43+
avg_cost = fluid.layers.mean(cost)
44+
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
45+
46+
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
47+
return optimize_ops, params_grads
48+
49+
def test_transpiler(self):
50+
trainer = self.get_trainer()
51+
pserver, startup = self.get_pserver(self.current_pserver_ep)
52+
53+
self.assertEqual([op.type for op in trainer.global_block().ops],
54+
self.get_expect_trainer_ops())
55+
56+
self.assertEqual(len(pserver.blocks), 3)
57+
# block0: listen_and_serv
58+
self.assertEqual([op.type for op in pserver.blocks[0].ops],
59+
["listen_and_serv"])
60+
# block2: optimize pass
61+
self.assertEqual([op.type for op in pserver.blocks[1].ops],
62+
["sum", "scale", "sgd"])
63+
64+
# confirm startup program
65+
66+
self.assertEqual([op.type for op in startup.global_block().ops], [
67+
"fill_constant", "fill_constant", "uniform_random", "uniform_random"
68+
])
69+
70+
# the variable #fc_w will be split into two blocks
71+
fc_w_var = startup.global_block().var("fc_w.block1")
72+
self.assertEqual(fc_w_var.shape, (500, 1000))
73+
74+
def get_main_program(self):
75+
main = fluid.Program()
76+
77+
with fluid.program_guard(main):
78+
self.net_conf()
79+
80+
return main
81+
82+
def get_expect_trainer_ops(self):
83+
trainer = fluid.Program()
84+
85+
with fluid.program_guard(trainer):
86+
optimize_ops, params_grads = self.net_conf()
87+
88+
delete_ops(trainer.global_block(), optimize_ops)
89+
return [op.type for op in trainer.global_block().ops
90+
] + ["split_byref", "send", "concat"]
91+
92+
def get_trainer(self):
93+
return self._transpiler_instance().get_trainer_program()
94+
95+
def get_pserver(self, ep):
96+
t = self._transpiler_instance()
97+
pserver = t.get_pserver_program(ep)
98+
startup = t.get_startup_program(ep, pserver)
99+
return pserver, startup
100+
101+
def _transpiler_instance(self):
102+
main = self.get_main_program()
103+
t = fluid.DistributeTranspiler()
104+
t.transpile(
105+
self.trainer_id,
106+
program=main,
107+
pservers=self.pserver_eps,
108+
trainers=self.trainers)
109+
return t
110+
111+
112+
if __name__ == "__main__":
113+
unittest.main()

python/paddle/fluid/transpiler/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
1415
from distribute_transpiler import DistributeTranspiler
1516
from inference_transpiler import InferenceTranspiler
1617
from memory_optimization_transpiler import memory_optimize, release_memory

python/paddle/fluid/transpiler/distribute_transpiler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import math
1818

1919
import distributed_splitter as splitter
20-
from .. import core
20+
from .. import core, framework
2121
from ..framework import Program, default_main_program, \
2222
default_startup_program, \
2323
Variable, Parameter, grad_var_name
@@ -417,7 +417,7 @@ def get_pserver_program(self, endpoint):
417417
def __append_optimize_op__(op, block, grad_to_block_id):
418418
if self._is_opt_op(op):
419419
self._append_pserver_ops(block, op, endpoint, grad_to_block_id,
420-
default_main_program())
420+
self.origin_program)
421421
else:
422422
self._append_pserver_non_opt_ops(block, op)
423423

0 commit comments

Comments
 (0)