Skip to content

Commit 437debf

Browse files
typhoonzerogongweibao
authored andcommitted
Fix mac ci dist (#13393)
1 parent 3c5c6e7 commit 437debf

File tree

4 files changed

+31
-19
lines changed

4 files changed

+31
-19
lines changed

paddle/fluid/operators/distributed/grpc_client.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ VarHandlePtr GRPCClient::AsyncGetVar(const std::string& ep,
125125
VarHandlePtr h(new VarHandle(ep, "Get", var_name_val, p_ctx, p_scope));
126126
s->Prepare(h, time_out);
127127

128-
framework::AsyncIO([var_name_val, p_scope, p_ctx, s, this] {
128+
framework::AsyncIO([var_name_val, s, this] {
129129
// prepare input
130130
sendrecv::VariableMessage req;
131131
req.set_varname(var_name_val);
@@ -166,7 +166,7 @@ VarHandlePtr GRPCClient::AsyncPrefetchVar(const std::string& ep,
166166
s->Prepare(h, time_out);
167167

168168
framework::AsyncIO([in_var_name_val, out_var_name_val, ep_val, p_scope, p_ctx,
169-
time_out, s, this] {
169+
s, this] {
170170
auto* var = p_scope->FindVar(in_var_name_val);
171171

172172
::grpc::ByteBuffer req;

python/paddle/fluid/tests/unittests/dist_transformer.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ class TrainTaskConfig(object):
9292
src_vocab_fpath = data_path + "vocab.bpe.32000"
9393
trg_vocab_fpath = data_path + "vocab.bpe.32000"
9494
train_file_pattern = data_path + "train.tok.clean.bpe.32000.en-de"
95-
val_file_pattern = data_path + "newstest2013.tok.bpe.32000.en-de"
95+
val_file_pattern = data_path + "newstest2013.tok.bpe.32000.en-de.cut"
9696
pool_size = 2000
9797
sort_type = None
9898
local = True
@@ -624,11 +624,12 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
624624
init = True
625625

626626
# Validate and save the model for inference.
627-
if TrainTaskConfig.val_file_pattern is not None:
628-
val_avg_cost, val_ppl = test()
629-
print("[%f]" % val_avg_cost)
630-
else:
631-
assert (False)
627+
if batch_id == 0 or batch_id == 4:
628+
if TrainTaskConfig.val_file_pattern is not None:
629+
val_avg_cost, val_ppl = test()
630+
print("[%f]" % val_avg_cost)
631+
else:
632+
assert (False)
632633

633634

634635
#import transformer_reader as reader
@@ -1701,8 +1702,9 @@ def run_pserver(self, args):
17011702
exe.run(startup_prog)
17021703
exe.run(pserver_prog)
17031704

1704-
def run_trainer(self, place, args):
1705-
1705+
def run_trainer(self, use_cuda, args):
1706+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
1707+
TrainTaskConfig.use_gpu = use_cuda
17061708
sum_cost, avg_cost, predict, token_num, local_lr_scheduler = get_model(
17071709
args.is_dist, not args.sync_mode)
17081710

python/paddle/fluid/tests/unittests/test_dist_base.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,10 @@ def run_pserver(self, args):
6161
exe.run(startup_prog)
6262
exe.run(pserver_prog)
6363

64-
def run_trainer(self, place, args):
64+
def run_trainer(self, use_cuda, args):
6565
import paddle
6666
import paddle.fluid as fluid
67+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
6768
test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
6869
self.get_model(batch_size=2)
6970
if args.mem_opt:
@@ -91,7 +92,7 @@ def run_trainer(self, place, args):
9192
build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
9293

9394
exe = fluid.ParallelExecutor(
94-
True,
95+
use_cuda,
9596
loss_name=avg_cost.name,
9697
exec_strategy=strategy,
9798
build_strategy=build_stra)
@@ -142,9 +143,8 @@ def runtime_main(test_class):
142143
if args.role == "pserver" and args.is_dist:
143144
model.run_pserver(args)
144145
else:
145-
p = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
146-
) else fluid.CPUPlace()
147-
model.run_trainer(p, args)
146+
use_cuda = True if core.is_compiled_with_cuda() else False
147+
model.run_trainer(use_cuda, args)
148148

149149

150150
import paddle.compat as cpt
@@ -225,11 +225,12 @@ def _wait_ps_ready(self, pid):
225225
def check_with_place(self, model_file, delta=1e-3, check_error_log=False):
226226
# TODO(typhoonzero): should auto adapt GPU count on the machine.
227227
required_envs = {
228-
"PATH": os.getenv("PATH"),
229-
"PYTHONPATH": os.getenv("PYTHONPATH"),
230-
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH"),
228+
"PATH": os.getenv("PATH", ""),
229+
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
230+
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
231231
"FLAGS_fraction_of_gpu_memory_to_use": "0.15",
232-
"FLAGS_cudnn_deterministic": "1"
232+
"FLAGS_cudnn_deterministic": "1",
233+
"CPU_NUM": "1"
233234
}
234235

235236
if check_error_log:

python/paddle/fluid/tests/unittests/test_dist_transformer.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from __future__ import print_function
1616

17+
import os
1718
import unittest
1819
import paddle
1920
from test_dist_base import TestDistBase
@@ -44,6 +45,14 @@ def download_files():
4445
test_url = url_prefix + 'newstest2013.tok.bpe.32000.en-de'
4546
test_md5 = '9dd74a266dbdb25314183899f269b4a2'
4647
paddle.dataset.common.download(test_url, 'test_dist_transformer', test_md5)
48+
# cut test data for faster CI
49+
orig_path = os.path.join(paddle.dataset.common.DATA_HOME,
50+
"test_dist_transformer",
51+
"newstest2013.tok.bpe.32000.en-de")
52+
head_path = os.path.join(paddle.dataset.common.DATA_HOME,
53+
"test_dist_transformer",
54+
"newstest2013.tok.bpe.32000.en-de.cut")
55+
os.system("head -n10 %s > %s" % (orig_path, head_path))
4756

4857

4958
class TestDistTransformer2x2Sync(TestDistBase):

0 commit comments

Comments
 (0)