Skip to content

Commit 5856062

Browse files
authored
Merge pull request #11854 from JiayiFeng/dev_data_balance
Data balance for the ParallelExecutor
2 parents 87dd01d + ff4317c commit 5856062

File tree

13 files changed

+469
-7
lines changed

13 files changed

+469
-7
lines changed

paddle/fluid/framework/details/CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,12 @@ else()
2525
cc_library(broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor)
2626
endif()
2727

28+
cc_library(data_balance_op_handle SRCS data_balance_op_handle.cc DEPS op_handle_base scope lod_tensor)
2829
cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor)
2930
cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope)
3031

3132
cc_library(multi_devices_graph_builder SRCS multi_devices_graph_builder.cc DEPS ssa_graph_builder computation_op_handle
32-
scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle)
33+
scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle)
3334

3435

3536
cc_library(ssa_graph_builder_factory SRCS ssa_graph_builder_factory.cc DEPS multi_devices_graph_builder ssa_graph_printer ssa_graph_checker)

paddle/fluid/framework/details/build_strategy.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ struct BuildStrategy {
3333
GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice};
3434

3535
std::string debug_graphviz_path_{""};
36+
37+
bool enable_data_balance_{true};
3638
};
3739

3840
} // namespace details
Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
16+
#include <algorithm>
17+
#include "paddle/fluid/framework/details/container_cast.h"
18+
19+
namespace paddle {
20+
namespace framework {
21+
namespace details {
22+
23+
#ifdef PADDLE_WITH_CUDA
24+
DataBalanceOpHandle::DataBalanceOpHandle(
25+
const std::vector<Scope *> &local_scopes,
26+
const std::vector<platform::Place> &places,
27+
const platform::NCCLContextMap *ctxs)
28+
: local_scopes_(local_scopes), places_(places) {
29+
if (ctxs) {
30+
for (auto &p : places_) {
31+
this->dev_ctxes_[p] = ctxs->DevCtx(p);
32+
}
33+
}
34+
}
35+
#else
36+
DataBalanceOpHandle::DataBalanceOpHandle(
37+
const std::vector<Scope *> &local_scopes,
38+
const std::vector<platform::Place> &places)
39+
: local_scopes_(local_scopes), places_(places) {}
40+
#endif
41+
42+
std::string DataBalanceOpHandle::Name() const { return "data balance"; }
43+
44+
std::vector<std::array<int, 3>> DataBalanceOpHandle::GetBalancePlan(
45+
const std::vector<int> &device_sizes) {
46+
int device_num = device_sizes.size();
47+
int total_size = 0;
48+
int empty_num = 0;
49+
std::vector<std::array<int, 2>> size_device_vec;
50+
size_device_vec.reserve(device_num);
51+
for (int i = 0; i < device_num; ++i) {
52+
if (device_sizes[i] == 0) {
53+
++empty_num;
54+
}
55+
total_size += device_sizes[i];
56+
size_device_vec.push_back({{device_sizes[i], i}});
57+
}
58+
std::vector<std::array<int, 3>> res;
59+
if (empty_num == 0) {
60+
// No need to do data balance.
61+
return res;
62+
}
63+
if (total_size < device_num) {
64+
// No enough data.
65+
PADDLE_THROW("There is no next data.");
66+
}
67+
std::sort(size_device_vec.begin(), size_device_vec.end(),
68+
[](const std::array<int, 2> &a, const std::array<int, 2> &b) {
69+
return a[0] > b[0];
70+
});
71+
int expected_device_size = total_size / device_num;
72+
int src_idx = 0;
73+
for (int dst_idx = device_num - empty_num; dst_idx < device_num; ++dst_idx) {
74+
if (size_device_vec[src_idx][0] <= expected_device_size) {
75+
++src_idx;
76+
PADDLE_ENFORCE_LT(
77+
src_idx, device_num - empty_num,
78+
"In current srategy an empty tensor should not be copy source.");
79+
}
80+
size_device_vec[src_idx][0] -= expected_device_size;
81+
size_device_vec[dst_idx][0] += expected_device_size;
82+
res.push_back({{size_device_vec[src_idx][1], size_device_vec[dst_idx][1],
83+
expected_device_size}});
84+
}
85+
return res;
86+
}
87+
88+
void DataBalanceOpHandle::RunImpl() {
89+
if (places_.size() == 1) {
90+
return;
91+
}
92+
auto in_var_handles = DynamicCast<VarHandle>(inputs_);
93+
auto out_var_handles = DynamicCast<VarHandle>(outputs_);
94+
PADDLE_ENFORCE(in_var_handles.size() % places_.size() == 0);
95+
PADDLE_ENFORCE_EQ(
96+
in_var_handles.size(), out_var_handles.size(),
97+
"The NoDummyInputSize and NoDummyOutputSize should be equal.");
98+
int data_num = in_var_handles.size() / places_.size();
99+
WaitInputVarGenerated();
100+
std::vector<std::vector<LoDTensor *>> lod_tensors(data_num);
101+
std::vector<int> device_sizes;
102+
for (int i = 0; i < static_cast<int>(in_var_handles.size()); ++i) {
103+
PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_,
104+
"The name of input and output should be equal.");
105+
int place_idx = i / data_num;
106+
int data_idx = i % data_num;
107+
auto *local_scope =
108+
local_scopes_[place_idx]->FindVar(kLocalExecScopeName)->Get<Scope *>();
109+
auto *tensor_var = local_scope->FindVar(in_var_handles[i]->name_);
110+
PADDLE_ENFORCE(tensor_var->IsType<LoDTensor>());
111+
auto *tensor = tensor_var->GetMutable<LoDTensor>();
112+
lod_tensors[data_idx].push_back(tensor);
113+
int ins_size =
114+
tensor->lod().empty() ? tensor->dims()[0] : tensor->NumElements();
115+
if (data_idx == 0) {
116+
device_sizes.emplace_back(ins_size);
117+
} else {
118+
PADDLE_ENFORCE_EQ(
119+
ins_size, device_sizes.at(place_idx),
120+
"All data on the same device shall have the same batch size.");
121+
}
122+
}
123+
const auto &balance_plan = GetBalancePlan(device_sizes);
124+
125+
for (const auto &trans : balance_plan) {
126+
for (int data_idx = 0; data_idx < data_num; ++data_idx) {
127+
LoDTensor *src_tensor = lod_tensors[data_idx][trans[0]];
128+
LoDTensor *dst_tensor = lod_tensors[data_idx][trans[1]];
129+
int trans_ins_size = trans[2];
130+
LoD src_lod = src_tensor->lod();
131+
int src_ins_size =
132+
src_lod.empty() ? src_tensor->dims()[0] : src_tensor->NumElements();
133+
int cut_point = src_ins_size - trans_ins_size;
134+
if (!src_lod.empty()) {
135+
for (auto &level : src_lod) {
136+
cut_point = level[cut_point];
137+
}
138+
}
139+
TensorCopySync(src_tensor->Slice(cut_point, src_tensor->dims()[0]),
140+
dst_tensor->place(), dst_tensor);
141+
src_tensor->ShareDataWith(src_tensor->Slice(0, cut_point));
142+
if (!src_lod.empty()) {
143+
dst_tensor->set_lod(SliceInLevel(
144+
src_lod, 0, src_ins_size - trans_ins_size, src_ins_size));
145+
src_tensor->set_lod(
146+
SliceInLevel(src_lod, 0, 0, src_ins_size - trans_ins_size));
147+
}
148+
}
149+
}
150+
}
151+
152+
} // namespace details
153+
} // namespace framework
154+
} // namespace paddle
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#pragma once
16+
17+
#include <string>
18+
#include <vector>
19+
#include "paddle/fluid/framework/details/op_handle_base.h"
20+
#include "paddle/fluid/framework/lod_tensor.h"
21+
#include "paddle/fluid/framework/scope.h"
22+
#ifdef PADDLE_WITH_CUDA
23+
#include "paddle/fluid/platform/nccl_helper.h"
24+
#endif
25+
26+
namespace paddle {
27+
namespace framework {
28+
namespace details {
29+
30+
struct DataBalanceOpHandle : public OpHandleBase {
31+
public:
32+
#ifdef PADDLE_WITH_CUDA
33+
DataBalanceOpHandle(const std::vector<Scope *> &local_scopes,
34+
const std::vector<platform::Place> &places,
35+
const platform::NCCLContextMap *ctxs);
36+
#else
37+
DataBalanceOpHandle(const std::vector<Scope *> &local_scopes,
38+
const std::vector<platform::Place> &places);
39+
#endif
40+
41+
std::string Name() const override;
42+
43+
bool IsMultiDeviceTransfer() override { return false; };
44+
45+
protected:
46+
void RunImpl() override;
47+
48+
private:
49+
// std::vector<(src_dev_id, dst_dev_id, trans_size)>
50+
std::vector<std::array<int, 3>> GetBalancePlan(
51+
const std::vector<int> &batch_size_per_device);
52+
53+
const std::vector<Scope *> local_scopes_;
54+
const std::vector<platform::Place> places_;
55+
};
56+
57+
} // namespace details
58+
} // namespace framework
59+
} // namespace paddle

paddle/fluid/framework/details/fetch_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ void FetchOpHandle::RunImpl() {
6767
#endif
6868
} else {
6969
tensors_[i].ShareDataWith(t);
70-
tensors_[i].set_lod(t.lod());
7170
}
71+
tensors_[i].set_lod(t.lod());
7272
}
7373

7474
this->WaitAndMergeCPUTensors();

paddle/fluid/framework/details/multi_devices_graph_builder.cc

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
2121
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
2222
#include "paddle/fluid/framework/details/computation_op_handle.h"
23+
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
2324
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
2425
#include "paddle/fluid/framework/details/reduce_op_handle.h"
2526
#include "paddle/fluid/framework/details/rpc_op_handle.h"
@@ -215,7 +216,14 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
215216
} else {
216217
// This op runs on all devices, and its output may have parameter's
217218
// gradients.
218-
CreateComputationalOps(&result, *op, places_.size());
219+
if (op->Type() == "read" && strategy_.enable_data_balance_) {
220+
op->SetAttr("throw_eof_exp", false);
221+
CreateComputationalOps(&result, *op, places_.size());
222+
const auto &data_var_names = op->Output("Out");
223+
InsertDataBalanceOp(&result, data_var_names);
224+
} else {
225+
CreateComputationalOps(&result, *op, places_.size());
226+
}
219227

220228
if (!is_forwarding && places_.size() > 1) {
221229
// Currently, we assume that once gradient is generated, it can be
@@ -360,6 +368,29 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(SSAGraph *result,
360368
}
361369
}
362370

371+
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
372+
SSAGraph *result, const std::vector<std::string> &datas) const {
373+
#ifdef PADDLE_WITH_CUDA
374+
result->ops_.emplace_back(
375+
new DataBalanceOpHandle(local_scopes_, places_, nccl_ctxs_));
376+
#else
377+
result->ops_.emplace_back(new DataBalanceOpHandle(local_scopes_, places_));
378+
#endif
379+
auto *op_handle = result->ops_.back().get();
380+
for (size_t i = 0; i < places_.size(); ++i) {
381+
auto &p = places_[i];
382+
SetCommunicationContext(op_handle, p);
383+
for (const std::string &d_name : datas) {
384+
auto &vars = result->vars_[i][d_name];
385+
PADDLE_ENFORCE(!vars.empty());
386+
op_handle->AddInput(vars.back().get());
387+
auto var = new VarHandle(vars.size(), i, d_name, p);
388+
vars.emplace_back(var);
389+
op_handle->AddOutput(var);
390+
}
391+
}
392+
}
393+
363394
bool MultiDevSSAGraphBuilder::IsParameterGradientOnce(
364395
const std::string &og,
365396
std::unordered_set<std::string> *og_has_been_broadcast) const {
@@ -512,7 +543,8 @@ void MultiDevSSAGraphBuilder::CreateRPCOp(SSAGraph *result,
512543
op_dev_id = GetVarDeviceID(op.InputArgumentNames()[0]);
513544
// the variable name which contains .block means it was splited by
514545
// split_byref op
515-
// so that we can balance the variable blocks to all the pserver instances.
546+
// so that we can balance the variable blocks to all the pserver
547+
// instances.
516548
if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
517549
op.InputArgumentNames()[0].find(".block") == std::string::npos) {
518550
op_dev_id = GetAppropriateDeviceID(op.InputArgumentNames());

paddle/fluid/framework/details/multi_devices_graph_builder.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,9 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder {
101101

102102
void InsertAllReduceOp(SSAGraph *result, const std::string &og) const;
103103

104+
void InsertDataBalanceOp(SSAGraph *result,
105+
const std::vector<std::string> &datas) const;
106+
104107
void CreateBroadcastOp(SSAGraph *result, const std::string &p_name,
105108
size_t src_dev_id) const;
106109

paddle/fluid/framework/details/op_handle_base.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,10 @@ void OpHandleBase::Run(bool use_cuda) {
5858

5959
void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) {
6060
#ifdef PADDLE_WITH_CUDA
61+
PADDLE_ENFORCE_NOT_NULL(waited_ctx);
6162
if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) {
6263
for (auto &dev_ctx : dev_ctxes_) {
64+
PADDLE_ENFORCE_NOT_NULL(dev_ctx.second);
6365
dev_ctx.second->Wait();
6466
}
6567
} else {

paddle/fluid/framework/lod_tensor.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ std::string LoDToString(const LoD &lod) {
9090
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
9191
size_t elem_end) {
9292
PADDLE_ENFORCE_LT(level, in.size());
93+
PADDLE_ENFORCE_LT(elem_begin, elem_end);
9394
PADDLE_ENFORCE_LT(elem_end, in[level].size());
9495

9596
LoD res;
@@ -393,6 +394,7 @@ void LoDTensor::MergeLoDTensor(
393394
new_dim[0] += t->dims()[0];
394395

395396
auto &lod = t->lod();
397+
PADDLE_ENFORCE_EQ(new_lod.size(), lod.size());
396398
for (size_t j = 0; j < lod.size(); ++j) {
397399
auto &sub_lod = new_lod[j];
398400
auto &offset = sub_lod.back();

paddle/fluid/operators/read_op.cc

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,19 @@ class ReadOp : public framework::OperatorBase {
6666
std::vector<std::string> out_arg_names = Outputs("Out");
6767
std::vector<framework::LoDTensor> ins;
6868
reader->ReadNext(&ins);
69-
PADDLE_ENFORCE(!ins.empty(), "There is no next data.");
69+
if (ins.empty()) {
70+
if (Attr<bool>("throw_eof_exp")) {
71+
PADDLE_THROW("There is no next data.");
72+
} else {
73+
ins.resize(out_arg_names.size());
74+
for (auto& tensor : ins) {
75+
// data type is not important for subsequent DataBalanceOpHandle
76+
tensor.mutable_data<float>(framework::make_ddim({0}), dev_place);
77+
}
78+
}
79+
}
7080
PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size());
71-
for (size_t i = 0; i < ins.size(); ++i) {
81+
for (size_t i = 0; i < out_arg_names.size(); ++i) {
7282
auto* out =
7383
scope.FindVar(out_arg_names[i])->GetMutable<framework::LoDTensor>();
7484
out->ShareDataWith(ins[i]);
@@ -82,6 +92,10 @@ class ReadOpMaker : public framework::OpProtoAndCheckerMaker {
8292
void Make() override {
8393
AddInput("Reader", "(ReaderHolder) The executed reader.");
8494
AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable();
95+
AddAttr<bool>("throw_eof_exp",
96+
"If set true, an exception will be thrown when the Reader "
97+
"yields empty (which means there is no next data).")
98+
.SetDefault(true);
8599
AddComment(R"DOC(
86100
Read Operator
87101

0 commit comments

Comments
 (0)