Skip to content

Commit 12e9bf6

Browse files
committed
clean up
1 parent ab72d28 commit 12e9bf6

File tree

3 files changed

+19
-121
lines changed

3 files changed

+19
-121
lines changed

paddle/fluid/operators/distributed/send_recv.proto

Lines changed: 0 additions & 97 deletions
This file was deleted.

python/paddle/fluid/tests/unittests/parallel_executor_test_base.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ def run_executor(exe, feed, fetch_list, program=None):
7171
exec_strategy.allow_op_delay = allow_op_delay
7272

7373
build_strategy = fluid.BuildStrategy()
74-
build_strategy.debug_graphviz_path = "/tmp/graphviz"
7574
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \
7675
if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce
7776

python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py

Lines changed: 19 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,16 @@ def check_simple_fc_convergence(self, use_cuda, use_reduce=False):
152152
use_cuda=use_cuda,
153153
use_reduce=use_reduce)
154154

155+
def test_simple_fc(self):
156+
# use_cuda
157+
self.check_simple_fc_convergence(True)
158+
self.check_simple_fc_convergence(False)
159+
160+
def test_simple_fc_with_new_strategy(self):
161+
# use_cuda, use_reduce
162+
self._compare_reduce_and_allreduce(simple_fc_net, True)
163+
self._compare_reduce_and_allreduce(simple_fc_net, False)
164+
155165
def check_simple_fc_parallel_accuracy(self, use_cuda):
156166
if use_cuda and not core.is_compiled_with_cuda():
157167
return
@@ -178,6 +188,10 @@ def check_simple_fc_parallel_accuracy(self, use_cuda):
178188
for p_l in parallel_last_loss:
179189
self.assertAlmostEquals(p_l, single_last_loss[0], delta=1e-6)
180190

191+
def test_simple_fc_parallel_accuracy(self):
192+
self.check_simple_fc_parallel_accuracy(True)
193+
self.check_simple_fc_parallel_accuracy(False)
194+
181195
def check_batchnorm_fc_convergence(self, use_cuda):
182196
if use_cuda and not core.is_compiled_with_cuda():
183197
return
@@ -192,31 +206,13 @@ def check_batchnorm_fc_convergence(self, use_cuda):
192206
"label": label},
193207
use_cuda=use_cuda)
194208

195-
def check_batchnorm_fc_convergence_use_reduce(self, use_cuda):
196-
if use_cuda and not core.is_compiled_with_cuda():
197-
return
198-
self.check_network_convergence(
199-
fc_with_batchnorm, use_cuda=use_cuda, use_reduce=False)
200-
"""
201-
img, label = self._init_data()
202-
203-
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
204-
fc_with_batchnorm,
205-
feed_dict={"image": img,
206-
"label": label},
207-
use_cuda=use_cuda,
208-
use_reduce=False)
209-
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
210-
fc_with_batchnorm,
211-
feed_dict={"image": img,
212-
"label": label},
213-
use_cuda=use_cuda,
214-
use_reduce=True)
215-
"""
209+
def test_batchnorm_fc(self):
210+
self.check_batchnorm_fc_convergence(True)
211+
self.check_batchnorm_fc_convergence(False)
216212

217213
def test_batchnorm_fc_with_new_strategy(self):
218-
self.check_batchnorm_fc_convergence_use_reduce(True)
219-
# self.check_batchnorm_fc_convergence_use_reduce(False)
214+
self._compare_reduce_and_allreduce(fc_with_batchnorm, True)
215+
self._compare_reduce_and_allreduce(fc_with_batchnorm, False)
220216

221217

222218
if __name__ == '__main__':

0 commit comments

Comments
 (0)