Skip to content

Commit 31a2c87

Browse files
authored
fea/lightly support lod (#12451)
1 parent 38863a2 commit 31a2c87

File tree

6 files changed

+40
-27
lines changed

6 files changed

+40
-27
lines changed

paddle/fluid/inference/api/api_anakin_engine_tester.cc

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,19 +37,21 @@ TEST(inference, anakin) {
3737

3838
float data[1 * 3 * 224 * 224] = {1.0f};
3939

40-
PaddleTensor tensor{.name = "input_0",
41-
.shape = std::vector<int>({1, 3, 224, 224}),
42-
.data = PaddleBuf(data, sizeof(data)),
43-
.dtype = PaddleDType::FLOAT32};
40+
PaddleTensor tensor;
41+
tensor.name = "input_0";
42+
tensor.shape = std::vector<int>({1, 3, 224, 224});
43+
tensor.data = PaddleBuf(data, sizeof(data));
44+
tensor.dtype = PaddleDType::FLOAT32;
4445

4546
// For simplicity, we set all the slots with the same data.
4647
std::vector<PaddleTensor> paddle_tensor_feeds;
4748
paddle_tensor_feeds.emplace_back(std::move(tensor));
4849

49-
PaddleTensor tensor_out{.name = "prob_out",
50-
.shape = std::vector<int>({1000, 1}),
51-
.data = PaddleBuf(),
52-
.dtype = PaddleDType::FLOAT32};
50+
PaddleTensor tensor_out;
51+
tensor_out.name = "prob_out";
52+
tensor_out.shape = std::vector<int>({1000, 1});
53+
tensor_out.data = PaddleBuf();
54+
tensor_out.dtype = PaddleDType::FLOAT32;
5355

5456
std::vector<PaddleTensor> outputs;
5557
outputs.emplace_back(std::move(tensor_out));

paddle/fluid/inference/api/api_impl.cc

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,13 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
183183
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
184184
std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
185185
inputs[i].data.length());
186+
// TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
187+
framework::LoD lod;
188+
for (auto &level : inputs[i].lod) {
189+
lod.emplace_back(level);
190+
}
191+
input.set_lod(lod);
192+
186193
feeds->push_back(input);
187194
}
188195
return true;
@@ -248,6 +255,10 @@ bool NativePaddlePredictor::GetFetch(
248255
buffer.Resize(sizeof(float) * data.size());
249256
}
250257
std::memcpy(buffer.data(), data.data(), buffer.length());
258+
// copy LoD
259+
for (const auto &level : fetchs[i].lod()) {
260+
outputs->at(i).lod.emplace_back(level);
261+
}
251262
outputs->at(i).dtype = PaddleDType::FLOAT32;
252263
// TODO(panyx0718): support other types? fill tensor name? avoid a copy.
253264
}

paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,11 +49,10 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) {
4949
std::vector<int64_t> data(20);
5050
for (int i = 0; i < 20; i++) data[i] = i;
5151

52-
PaddleTensor tensor{
53-
.name = "",
54-
.shape = std::vector<int>({10, 1}),
55-
.data = PaddleBuf(data.data(), data.size() * sizeof(int64_t)),
56-
.dtype = PaddleDType::INT64};
52+
PaddleTensor tensor;
53+
tensor.shape = std::vector<int>({10, 1});
54+
tensor.data = PaddleBuf(data.data(), data.size() * sizeof(int64_t));
55+
tensor.dtype = PaddleDType::INT64;
5756

5857
// For simplicity, we set all the slots with the same data.
5958
std::vector<PaddleTensor> slots(4, tensor);

paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,10 @@ void Main(bool use_gpu) {
4747
//# 2. Prepare input.
4848
int64_t data[4] = {1, 2, 3, 4};
4949

50-
PaddleTensor tensor{.name = "",
51-
.shape = std::vector<int>({4, 1}),
52-
.data = PaddleBuf(data, sizeof(data)),
53-
.dtype = PaddleDType::INT64};
50+
PaddleTensor tensor;
51+
tensor.shape = std::vector<int>({4, 1});
52+
tensor.data = PaddleBuf(data, sizeof(data));
53+
tensor.dtype = PaddleDType::INT64;
5454

5555
// For simplicity, we set all the slots with the same data.
5656
std::vector<PaddleTensor> slots(4, tensor);
@@ -94,10 +94,11 @@ void MainThreads(int num_threads, bool use_gpu) {
9494
for (int batch_id = 0; batch_id < num_batches; ++batch_id) {
9595
// 2. Dummy Input Data
9696
int64_t data[4] = {1, 2, 3, 4};
97-
PaddleTensor tensor{.name = "",
98-
.shape = std::vector<int>({4, 1}),
99-
.data = PaddleBuf(data, sizeof(data)),
100-
.dtype = PaddleDType::INT64};
97+
PaddleTensor tensor;
98+
tensor.shape = std::vector<int>({4, 1});
99+
tensor.data = PaddleBuf(data, sizeof(data));
100+
tensor.dtype = PaddleDType::INT64;
101+
101102
std::vector<PaddleTensor> inputs(4, tensor);
102103
std::vector<PaddleTensor> outputs;
103104
// 3. Run

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -123,11 +123,11 @@ void Main(bool use_gpu) {
123123
file.close();
124124

125125
// Inference.
126-
PaddleTensor input{
127-
.name = "xx",
128-
.shape = record.shape,
129-
.data = PaddleBuf(record.data.data(), record.data.size() * sizeof(float)),
130-
.dtype = PaddleDType::FLOAT32};
126+
PaddleTensor input;
127+
input.shape = record.shape;
128+
input.data =
129+
PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
130+
input.dtype = PaddleDType::FLOAT32;
131131

132132
VLOG(3) << "run executor";
133133
std::vector<PaddleTensor> output;

paddle/fluid/inference/api/paddle_inference_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,9 @@ struct PaddleTensor {
6767
PaddleTensor() = default;
6868
std::string name; // variable name.
6969
std::vector<int> shape;
70-
// TODO(Superjomn) for LoD support, add a vector<vector<int>> field if needed.
7170
PaddleBuf data; // blob of data.
7271
PaddleDType dtype;
72+
std::vector<std::vector<uint64_t>> lod; // lod data
7373
};
7474

7575
enum class PaddleEngineKind {

0 commit comments

Comments
 (0)