Skip to content

Commit b7026f7

Browse files
authored
Fix a bug related to dispensable inputs and refine the inference unittest (#10527)
* Fix a bug related to dispensable inputs and refine the inference unittest. * Fix the use of dispensable inputs in reshape_op. * Polish the enforce statements. * Fix an English writing typo.
1 parent 9fad436 commit b7026f7

File tree

5 files changed

+79
-37
lines changed

5 files changed

+79
-37
lines changed

paddle/fluid/framework/operator.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,10 @@ class ExecutionContext {
192192
return op_.Attr<T>(name);
193193
}
194194

195+
bool HasInput(const std::string& name) const { return op_.HasInputs(name); }
196+
197+
bool HasOutput(const std::string& name) const { return op_.HasOutputs(name); }
198+
195199
size_t InputSize(const std::string& name) const {
196200
return op_.Inputs(name).size();
197201
}

paddle/fluid/inference/tests/book/test_inference_image_classification.cc

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ limitations under the License. */
1616
#include "gtest/gtest.h"
1717
#include "paddle/fluid/inference/tests/test_helper.h"
1818

19-
DEFINE_string(data_set, "cifar10", "Data set to test");
2019
DEFINE_string(dirname, "", "Directory of the inference model.");
2120
DEFINE_string(fp16_dirname, "", "Directory of the float16 inference model.");
2221
DEFINE_int32(batch_size, 1, "Batch size of input data");
@@ -35,19 +34,19 @@ TEST(inference, image_classification) {
3534
// 0. Call `paddle::framework::InitDevices()` initialize all the devices
3635
// In unittests, this is done in paddle/testing/paddle_gtest_main.cc
3736

37+
const bool is_combined = false;
38+
std::vector<std::vector<int64_t>> feed_target_shapes =
39+
GetFeedTargetShapes(dirname, is_combined);
40+
3841
paddle::framework::LoDTensor input;
3942
// Use normilized image pixels as input data,
4043
// which should be in the range [0.0, 1.0].
41-
if (FLAGS_data_set == "cifar10") {
42-
SetupTensor<float>(&input, {FLAGS_batch_size, 3, 32, 32},
43-
static_cast<float>(0), static_cast<float>(1));
44-
} else if (FLAGS_data_set == "imagenet") {
45-
SetupTensor<float>(&input, {FLAGS_batch_size, 3, 224, 224},
46-
static_cast<float>(0), static_cast<float>(1));
47-
} else {
48-
LOG(FATAL) << "Only cifar10 or imagenet is supported.";
49-
}
50-
44+
feed_target_shapes[0][0] = FLAGS_batch_size;
45+
paddle::framework::DDim input_dims =
46+
paddle::framework::make_ddim(feed_target_shapes[0]);
47+
LOG(INFO) << input_dims;
48+
SetupTensor<float>(&input, input_dims, static_cast<float>(0),
49+
static_cast<float>(1));
5150
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
5251
cpu_feeds.push_back(&input);
5352

@@ -60,7 +59,7 @@ TEST(inference, image_classification) {
6059
LOG(INFO) << "--- CPU Runs: ---";
6160
LOG(INFO) << "Batch size is " << FLAGS_batch_size;
6261
TestInference<paddle::platform::CPUPlace, false, true>(
63-
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat);
62+
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined);
6463
LOG(INFO) << output1.dims();
6564
}
6665

@@ -73,7 +72,7 @@ TEST(inference, image_classification) {
7372
LOG(INFO) << "--- GPU Runs: ---";
7473
LOG(INFO) << "Batch size is " << FLAGS_batch_size;
7574
TestInference<paddle::platform::CUDAPlace, false, true>(
76-
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat);
75+
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined);
7776
LOG(INFO) << output2.dims();
7877

7978
if (!FLAGS_skip_cpu) {

paddle/fluid/inference/tests/test_helper.h

Lines changed: 45 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,50 @@ void CheckError(const paddle::framework::LoDTensor& output1,
8989
EXPECT_EQ(count, 0U) << "There are " << count << " different elements.";
9090
}
9191

92+
std::unique_ptr<paddle::framework::ProgramDesc> InitProgram(
93+
paddle::framework::Executor* executor, paddle::framework::Scope* scope,
94+
const std::string& dirname, const bool is_combined = false) {
95+
std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
96+
if (is_combined) {
97+
// All parameters are saved in a single file.
98+
// Hard-coding the file names of program and parameters in unittest.
99+
// The file names should be consistent with that used in Python API
100+
// `fluid.io.save_inference_model`.
101+
std::string prog_filename = "__model_combined__";
102+
std::string param_filename = "__params_combined__";
103+
inference_program =
104+
paddle::inference::Load(executor, scope, dirname + "/" + prog_filename,
105+
dirname + "/" + param_filename);
106+
} else {
107+
// Parameters are saved in separate files sited in the specified
108+
// `dirname`.
109+
inference_program = paddle::inference::Load(executor, scope, dirname);
110+
}
111+
return inference_program;
112+
}
113+
114+
std::vector<std::vector<int64_t>> GetFeedTargetShapes(
115+
const std::string& dirname, const bool is_combined = false) {
116+
auto place = paddle::platform::CPUPlace();
117+
auto executor = paddle::framework::Executor(place);
118+
auto* scope = new paddle::framework::Scope();
119+
120+
auto inference_program = InitProgram(&executor, scope, dirname, is_combined);
121+
auto& global_block = inference_program->Block(0);
122+
123+
const std::vector<std::string>& feed_target_names =
124+
inference_program->GetFeedTargetNames();
125+
std::vector<std::vector<int64_t>> feed_target_shapes;
126+
for (size_t i = 0; i < feed_target_names.size(); ++i) {
127+
auto* var = global_block.FindVar(feed_target_names[i]);
128+
std::vector<int64_t> var_shape = var->GetShape();
129+
feed_target_shapes.push_back(var_shape);
130+
}
131+
132+
delete scope;
133+
return feed_target_shapes;
134+
}
135+
92136
template <typename Place, bool CreateVars = true, bool PrepareContext = false>
93137
void TestInference(const std::string& dirname,
94138
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
@@ -124,22 +168,7 @@ void TestInference(const std::string& dirname,
124168
paddle::platform::RecordEvent record_event(
125169
"init_program",
126170
paddle::platform::DeviceContextPool::Instance().Get(place));
127-
128-
if (is_combined) {
129-
// All parameters are saved in a single file.
130-
// Hard-coding the file names of program and parameters in unittest.
131-
// The file names should be consistent with that used in Python API
132-
// `fluid.io.save_inference_model`.
133-
std::string prog_filename = "__model_combined__";
134-
std::string param_filename = "__params_combined__";
135-
inference_program = paddle::inference::Load(
136-
&executor, scope, dirname + "/" + prog_filename,
137-
dirname + "/" + param_filename);
138-
} else {
139-
// Parameters are saved in separate files sited in the specified
140-
// `dirname`.
141-
inference_program = paddle::inference::Load(&executor, scope, dirname);
142-
}
171+
inference_program = InitProgram(&executor, scope, dirname, is_combined);
143172
}
144173
// Disable the profiler and print the timing information
145174
paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault,

paddle/fluid/operators/math/sequence2batch.h

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,18 +64,22 @@ class LoDTensor2BatchFunctor {
6464
bool is_reverse = false) const {
6565
if (!is_cal_batch_lod) {
6666
auto lods = batch->lod();
67-
PADDLE_ENFORCE_GT(lods.size(), 2UL);
68-
PADDLE_ENFORCE_EQ(lods[1].size(),
69-
static_cast<size_t>(lod_tensor.dims()[0]));
67+
PADDLE_ENFORCE_GT(lods.size(), 2UL,
68+
"The LoD of LoDTensor should inlcude at least 2-level "
69+
"sequence information.");
70+
PADDLE_ENFORCE_EQ(
71+
lods[1].size(), static_cast<size_t>(lod_tensor.dims()[0]),
72+
"The LoD information should be consistent with the dims.");
7073
CopyMatrixRowsFunctor<DeviceContext, T> to_batch;
7174
to_batch(context, lod_tensor, lods[1], batch, true);
7275
return;
7376
}
7477

7578
auto lods = lod_tensor.lod();
76-
auto lod = lods[0];
7779
PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now.");
7880

81+
auto lod = lods[0];
82+
7983
std::vector<SeqInfo> seq_info;
8084
for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) {
8185
int length = lod[seq_id + 1] - lod[seq_id];
@@ -157,9 +161,12 @@ class Batch2LoDTensorFunctor {
157161
const framework::LoDTensor& batch,
158162
framework::LoDTensor* lod_tensor) const {
159163
auto in_lod = batch.lod();
160-
PADDLE_ENFORCE_GT(in_lod.size(), 2UL);
161-
PADDLE_ENFORCE_EQ(in_lod[1].size(),
162-
static_cast<size_t>(lod_tensor->dims()[0]));
164+
PADDLE_ENFORCE_GT(in_lod.size(), 2UL,
165+
"The LoD of LoDTensor should inlcude at least 2-level "
166+
"sequence information.");
167+
PADDLE_ENFORCE_EQ(
168+
in_lod[1].size(), static_cast<size_t>(lod_tensor->dims()[0]),
169+
"The LoD information should be consistent with the dims.");
163170
CopyMatrixRowsFunctor<DeviceContext, T> to_seq;
164171
to_seq(context, batch, in_lod[1], lod_tensor, false);
165172
}

paddle/fluid/operators/reshape_op.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,10 @@ class ReshapeKernel : public framework::OpKernel<T> {
124124
void Compute(const framework::ExecutionContext &ctx) const {
125125
auto *out = ctx.Output<framework::LoDTensor>("Out");
126126
auto *in = ctx.Input<framework::LoDTensor>("X");
127-
auto *shape_tensor = ctx.Input<framework::LoDTensor>("Shape");
127+
128+
auto *shape_tensor = ctx.HasInput("Shape")
129+
? ctx.Input<framework::LoDTensor>("Shape")
130+
: nullptr;
128131

129132
framework::DDim out_dims = out->dims();
130133

0 commit comments

Comments
 (0)