Skip to content

Commit 29b048b

Browse files
authored
[Lod][fluid_ops] LegacyLoD (#69822)
1 parent 8464c04 commit 29b048b

File tree

136 files changed

+354
-347
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

136 files changed

+354
-347
lines changed

paddle/common/ddim.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,8 @@ TEST_API DDim ComputeCompatibleDim(const DDim& dim1, const DDim& dim2);
243243

244244
namespace pir {
245245
using DDim = common::DDim;
246-
using LoD = std::vector<std::vector<size_t>>;
246+
using LegacyLoD = std::vector<std::vector<size_t>>;
247+
using LoD = LegacyLoD;
247248
} // namespace pir
248249

249250
namespace std {

paddle/fluid/distributed/fleet_executor/dist_model.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ bool LoadDataFromDistModelTensor(const DistModelTensor &input_data,
125125
"DistModel only supports CPU and GPU and XPU and CustomDevice."));
126126
}
127127

128-
phi::LoD dst_lod;
128+
phi::LegacyLoD dst_lod;
129129
for (auto &src_lod : input_data.lod) {
130130
dst_lod.emplace_back(src_lod);
131131
}

paddle/fluid/distributed/ps/service/brpc_utils.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ void SerializeDenseTensor(framework::Variable* var,
9191
butil::IOBuf* iobuf) {
9292
auto* tensor = var->GetMutable<phi::DenseTensor>();
9393
var_msg->set_type(::paddle::distributed::DENSE_TENSOR);
94-
const phi::LoD lod = tensor->lod();
94+
const phi::LegacyLoD lod = tensor->lod();
9595
if (!lod.empty()) {
9696
var_msg->set_lod_level(lod.size());
9797
for (auto& each : lod) {
@@ -231,7 +231,7 @@ void DeserializeDenseTensor(framework::Variable* var,
231231
}
232232
tensor->Resize(common::make_ddim(vec_dim));
233233

234-
phi::LoD lod;
234+
phi::LegacyLoD lod;
235235
for (int i = 0; i < msg.lod_level(); ++i) {
236236
phi::Vector<size_t> v;
237237
for (int j = 0; j < msg.lod(i).lod_data_size(); ++j) {

paddle/fluid/distributed/ps/wrapper/fleet.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1016,7 +1016,7 @@ std::default_random_engine& FleetWrapper::LocalRandomEngine() {
10161016
size_t FleetWrapper::GetAbsoluteSum(size_t start,
10171017
size_t end,
10181018
size_t level,
1019-
const phi::LoD& lod) {
1019+
const phi::LegacyLoD& lod) {
10201020
if (level >= lod.size() - 1) {
10211021
return end - start;
10221022
}

paddle/fluid/distributed/ps/wrapper/fleet.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ class FleetWrapper {
335335
size_t GetAbsoluteSum(size_t start,
336336
size_t end,
337337
size_t level,
338-
const phi::LoD& lod);
338+
const phi::LegacyLoD& lod);
339339

340340
protected:
341341
static bool is_initialized_;

paddle/fluid/distributed/test/brpc_utils_test.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ void CreateVarsOnScope(framework::Scope* scope,
3333
framework::Variable* var1 = scope->Var("x1");
3434
auto* tensor1 = var1->GetMutable<phi::DenseTensor>();
3535
tensor1->Resize(common::make_ddim({512, 8, 4, 2}));
36-
phi::LoD lod1;
36+
phi::LegacyLoD lod1;
3737
lod1.push_back(phi::Vector<size_t>({1, 3, 8}));
3838
tensor1->set_lod(lod1);
3939
tensor1->mutable_data<float>(*place);
@@ -43,7 +43,7 @@ void CreateVarsOnScope(framework::Scope* scope,
4343
framework::Variable* var2 = scope->Var("x2");
4444
auto* tensor2 = var2->GetMutable<phi::DenseTensor>();
4545
tensor2->Resize(common::make_ddim({1000, 64}));
46-
phi::LoD lod2;
46+
phi::LegacyLoD lod2;
4747
lod2.push_back(phi::Vector<size_t>({1, 1}));
4848
tensor2->set_lod(lod2);
4949
tensor2->mutable_data<int>(*place);

paddle/fluid/framework/data_feed.cc

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1053,7 +1053,7 @@ void MultiSlotDataFeed::PutToFeedVec(
10531053
}
10541054

10551055
if (!use_slots_is_dense_[i]) {
1056-
LoD data_lod{offset};
1056+
LegacyLoD data_lod{offset};
10571057
feed_vec_[i]->set_lod(data_lod);
10581058
}
10591059
if (use_slots_is_dense_[i]) {
@@ -1446,7 +1446,7 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec(const Record* ins_vec, int num) {
14461446
}
14471447
auto& slot_offset = offset_[i];
14481448
if (this->input_type_ == 0) {
1449-
LoD data_lod{slot_offset};
1449+
LegacyLoD data_lod{slot_offset};
14501450
feed_vec_[i]->set_lod(data_lod);
14511451
} else if (this->input_type_ == 1) {
14521452
if (!use_slots_is_dense_[i]) {
@@ -1463,7 +1463,7 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec(const Record* ins_vec, int num) {
14631463
tmp_offset.emplace_back(k);
14641464
}
14651465
slot_offset = tmp_offset;
1466-
LoD data_lod{slot_offset};
1466+
LegacyLoD data_lod{slot_offset};
14671467
feed_vec_[i]->set_lod(data_lod);
14681468
}
14691469
}
@@ -1545,7 +1545,7 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec(
15451545
auto& slot_offset = offset_[i];
15461546
if (this->input_type_ == 0) {
15471547
if (!use_slots_is_dense_[i]) {
1548-
LoD data_lod{slot_offset};
1548+
LegacyLoD data_lod{slot_offset};
15491549
feed_vec_[i]->set_lod(data_lod);
15501550
}
15511551
} else if (this->input_type_ == 1) {
@@ -1563,7 +1563,7 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec(
15631563
tmp_offset.emplace_back(k);
15641564
}
15651565
slot_offset = tmp_offset;
1566-
LoD data_lod{slot_offset};
1566+
LegacyLoD data_lod{slot_offset};
15671567
feed_vec_[i]->set_lod(data_lod);
15681568
}
15691569
}
@@ -1600,7 +1600,7 @@ void PrivateInstantDataFeed<T>::PutToFeedVec() {
16001600
tensor_ptr, &feasign[0], total_instance * sizeof(int64_t));
16011601
}
16021602

1603-
LoD data_lod{offset};
1603+
LegacyLoD data_lod{offset};
16041604
feed_vec_[i]->set_lod(data_lod);
16051605
if (use_slots_is_dense_[i]) {
16061606
int64_t total_dims = 1;
@@ -2048,7 +2048,7 @@ void PaddleBoxDataFeed::PutToFeedVec(const std::vector<Record*>& ins_vec) {
20482048
CopyToFeedTensor(tensor_ptr, feasign, total_instance * sizeof(int64_t));
20492049
}
20502050
auto& slot_offset = offset_[i];
2051-
LoD data_lod{slot_offset};
2051+
LegacyLoD data_lod{slot_offset};
20522052
feed_vec_[i]->set_lod(data_lod);
20532053
if (use_slots_is_dense_[i]) {
20542054
if (inductive_shape_index_[i] != -1) {
@@ -2704,7 +2704,7 @@ void SlotRecordInMemoryDataFeed::PutToFeedVec(const SlotRecord* ins_vec,
27042704
}
27052705
feed->Resize(common::make_ddim(info.local_shape));
27062706
} else {
2707-
LoD data_lod{slot_offset};
2707+
LegacyLoD data_lod{slot_offset};
27082708
feed_vec_[j]->set_lod(data_lod);
27092709
}
27102710
}
@@ -3117,7 +3117,7 @@ void SlotRecordInMemoryDataFeed::PackToScope(MiniBatchGpuPack* pack,
31173117
}
31183118
feed->Resize(common::make_ddim(info.local_shape));
31193119
} else {
3120-
LoD& lod = (*feed->mutable_lod());
3120+
LegacyLoD& lod = (*feed->mutable_lod());
31213121
lod.resize(1);
31223122
lod[0].resize(offset_cols_size);
31233123
phi::MixVector<size_t> mixv_lod(&lod[0]);

paddle/fluid/framework/data_feed.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1285,14 +1285,14 @@ int GraphDataGenerator::GenerateBatch() {
12851285
}
12861286
sage_batch_count_ += 1;
12871287
}
1288-
LoD lod{offset_};
1288+
LegacyLoD lod{offset_};
12891289

12901290
if (conf_.accumulate_num >= 2) {
12911291
offset_.clear();
12921292
offset_.push_back(0);
12931293
offset_.push_back(uniq_instance_vec_[sage_batch_count_ * 2]);
12941294
}
1295-
LoD lod2{offset_};
1295+
LegacyLoD lod2{offset_};
12961296

12971297
if (conf_.accumulate_num == 1) {
12981298
for (int tensor_pair_idx = 0; tensor_pair_idx < conf_.tensor_pair_num;

paddle/fluid/framework/downpour_worker.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ void DownpourWorker::FillSparseValue(size_t table_idx) {
232232
phi::CPUPlace());
233233
memset(ptr, 0, sizeof(float) * len * table.emb_dim());
234234
auto& tensor_lod = tensor->lod()[0];
235-
LoD data_lod{tensor_lod};
235+
LegacyLoD data_lod{tensor_lod};
236236
tensor_emb->set_lod(data_lod);
237237

238238
bool is_nid = (adjust_ins_weight_config_.need_adjust() &&

paddle/fluid/framework/fleet/fleet_wrapper.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1983,7 +1983,7 @@ int32_t FleetWrapper::CopyTableByFeasign(
19831983
size_t FleetWrapper::GetAbsoluteSum(size_t start,
19841984
size_t end,
19851985
size_t level,
1986-
const phi::LoD& lod) {
1986+
const phi::LegacyLoD& lod) {
19871987
if (level >= lod.size() - 1) {
19881988
return end - start;
19891989
}

0 commit comments

Comments
 (0)