Skip to content

Commit 9f3ac22

Browse files
committed
Unify Fluid code to Google C++ style
1 parent d00bd9e commit 9f3ac22

20 files changed

+52
-109
lines changed
File renamed without changes.

paddle/fluid/inference/io.cc

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) {
4141
return false;
4242
}
4343

44-
void LoadPersistables(framework::Executor& executor,
45-
framework::Scope& scope,
44+
void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
4645
const framework::ProgramDesc& main_program,
4746
const std::string& dirname,
4847
const std::string& param_filename) {
@@ -108,10 +107,8 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor& executor,
108107
}
109108

110109
std::unique_ptr<framework::ProgramDesc> Load(
111-
framework::Executor& executor,
112-
framework::Scope& scope,
113-
const std::string& prog_filename,
114-
const std::string& param_filename) {
110+
framework::Executor& executor, framework::Scope& scope,
111+
const std::string& prog_filename, const std::string& param_filename) {
115112
std::string model_filename = prog_filename;
116113
std::string program_desc_str;
117114
ReadBinaryFile(model_filename, program_desc_str);

paddle/fluid/inference/io.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,7 @@ limitations under the License. */
2424
namespace paddle {
2525
namespace inference {
2626

27-
void LoadPersistables(framework::Executor& executor,
28-
framework::Scope& scope,
27+
void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
2928
const framework::ProgramDesc& main_program,
3029
const std::string& dirname,
3130
const std::string& param_filename);

paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ TEST(inference, fit_a_line) {
3030
// The second dim of the input tensor should be 13
3131
// The input data should be >= 0
3232
int64_t batch_size = 10;
33-
SetupTensor<float>(
34-
input, {batch_size, 13}, static_cast<float>(0), static_cast<float>(10));
33+
SetupTensor<float>(input, {batch_size, 13}, static_cast<float>(0),
34+
static_cast<float>(10));
3535
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
3636
cpu_feeds.push_back(&input);
3737

paddle/fluid/inference/tests/book/test_inference_image_classification.cc

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,8 @@ TEST(inference, image_classification) {
3535
paddle::framework::LoDTensor input;
3636
// Use normilized image pixels as input data,
3737
// which should be in the range [0.0, 1.0].
38-
SetupTensor<float>(input,
39-
{FLAGS_batch_size, 3, 32, 32},
40-
static_cast<float>(0),
41-
static_cast<float>(1));
38+
SetupTensor<float>(input, {FLAGS_batch_size, 3, 32, 32},
39+
static_cast<float>(0), static_cast<float>(1));
4240
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
4341
cpu_feeds.push_back(&input);
4442

@@ -48,8 +46,8 @@ TEST(inference, image_classification) {
4846

4947
// Run inference on CPU
5048
LOG(INFO) << "--- CPU Runs: ---";
51-
TestInference<paddle::platform::CPUPlace>(
52-
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat);
49+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
50+
FLAGS_repeat);
5351
LOG(INFO) << output1.dims();
5452

5553
#ifdef PADDLE_WITH_CUDA
@@ -59,8 +57,8 @@ TEST(inference, image_classification) {
5957

6058
// Run inference on CUDA GPU
6159
LOG(INFO) << "--- GPU Runs: ---";
62-
TestInference<paddle::platform::CUDAPlace>(
63-
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat);
60+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
61+
FLAGS_repeat);
6462
LOG(INFO) << output2.dims();
6563

6664
CheckError<float>(output1, output2);

paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) {
3636
int64_t predicate_dict_len = 3162;
3737
int64_t mark_dict_len = 2;
3838

39-
SetupLoDTensor(word,
40-
lod,
41-
static_cast<int64_t>(0),
39+
SetupLoDTensor(word, lod, static_cast<int64_t>(0),
4240
static_cast<int64_t>(word_dict_len - 1));
43-
SetupLoDTensor(predicate,
44-
lod,
45-
static_cast<int64_t>(0),
41+
SetupLoDTensor(predicate, lod, static_cast<int64_t>(0),
4642
static_cast<int64_t>(predicate_dict_len - 1));
47-
SetupLoDTensor(ctx_n2,
48-
lod,
49-
static_cast<int64_t>(0),
43+
SetupLoDTensor(ctx_n2, lod, static_cast<int64_t>(0),
5044
static_cast<int64_t>(word_dict_len - 1));
51-
SetupLoDTensor(ctx_n1,
52-
lod,
53-
static_cast<int64_t>(0),
45+
SetupLoDTensor(ctx_n1, lod, static_cast<int64_t>(0),
5446
static_cast<int64_t>(word_dict_len - 1));
55-
SetupLoDTensor(ctx_0,
56-
lod,
57-
static_cast<int64_t>(0),
47+
SetupLoDTensor(ctx_0, lod, static_cast<int64_t>(0),
5848
static_cast<int64_t>(word_dict_len - 1));
59-
SetupLoDTensor(ctx_p1,
60-
lod,
61-
static_cast<int64_t>(0),
49+
SetupLoDTensor(ctx_p1, lod, static_cast<int64_t>(0),
6250
static_cast<int64_t>(word_dict_len - 1));
63-
SetupLoDTensor(ctx_p2,
64-
lod,
65-
static_cast<int64_t>(0),
51+
SetupLoDTensor(ctx_p2, lod, static_cast<int64_t>(0),
6652
static_cast<int64_t>(word_dict_len - 1));
67-
SetupLoDTensor(mark,
68-
lod,
69-
static_cast<int64_t>(0),
53+
SetupLoDTensor(mark, lod, static_cast<int64_t>(0),
7054
static_cast<int64_t>(mark_dict_len - 1));
7155

7256
std::vector<paddle::framework::LoDTensor*> cpu_feeds;

paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,8 @@ TEST(inference, recognize_digits) {
3535
paddle::framework::LoDTensor input;
3636
// Use normilized image pixels as input data,
3737
// which should be in the range [-1.0, 1.0].
38-
SetupTensor<float>(input,
39-
{FLAGS_batch_size, 1, 28, 28},
40-
static_cast<float>(-1),
41-
static_cast<float>(1));
38+
SetupTensor<float>(input, {FLAGS_batch_size, 1, 28, 28},
39+
static_cast<float>(-1), static_cast<float>(1));
4240
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
4341
cpu_feeds.push_back(&input);
4442

@@ -49,8 +47,8 @@ TEST(inference, recognize_digits) {
4947

5048
// Run inference on CPU
5149
LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---";
52-
TestInference<paddle::platform::CPUPlace>(
53-
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined);
50+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
51+
FLAGS_repeat, is_combined);
5452
LOG(INFO) << output1.dims();
5553

5654
#ifdef PADDLE_WITH_CUDA
@@ -60,8 +58,8 @@ TEST(inference, recognize_digits) {
6058

6159
// Run inference on CUDA GPU
6260
LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---";
63-
TestInference<paddle::platform::CUDAPlace>(
64-
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined);
61+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
62+
FLAGS_repeat, is_combined);
6563
LOG(INFO) << output2.dims();
6664

6765
CheckError<float>(output1, output2);

paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) {
3232
paddle::framework::LoDTensor word_data, trg_word;
3333
paddle::framework::LoD lod{{0, 4, 10}};
3434

35-
SetupLoDTensor(
36-
word_data, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
37-
SetupLoDTensor(
38-
trg_word, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
35+
SetupLoDTensor(word_data, lod, static_cast<int64_t>(0),
36+
static_cast<int64_t>(1));
37+
SetupLoDTensor(trg_word, lod, static_cast<int64_t>(0),
38+
static_cast<int64_t>(1));
3939

4040
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
4141
cpu_feeds.push_back(&word_data);

paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) {
3333
paddle::framework::LoD lod{{0, 4, 10}};
3434
int64_t word_dict_len = 5147;
3535

36-
SetupLoDTensor(words,
37-
lod,
38-
static_cast<int64_t>(0),
36+
SetupLoDTensor(words, lod, static_cast<int64_t>(0),
3937
static_cast<int64_t>(word_dict_len - 1));
4038

4139
std::vector<paddle::framework::LoDTensor*> cpu_feeds;

paddle/fluid/inference/tests/test_helper.h

Lines changed: 8 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,7 @@ limitations under the License. */
1919

2020
template <typename T>
2121
void SetupTensor(paddle::framework::LoDTensor& input,
22-
paddle::framework::DDim dims,
23-
T lower,
24-
T upper) {
22+
paddle::framework::DDim dims, T lower, T upper) {
2523
srand(time(0));
2624
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
2725
for (int i = 0; i < input.numel(); ++i) {
@@ -33,27 +31,23 @@ void SetupTensor(paddle::framework::LoDTensor& input,
3331

3432
template <typename T>
3533
void SetupTensor(paddle::framework::LoDTensor& input,
36-
paddle::framework::DDim dims,
37-
std::vector<T>& data) {
34+
paddle::framework::DDim dims, std::vector<T>& data) {
3835
CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size()));
3936
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
4037
memcpy(input_ptr, data.data(), input.numel() * sizeof(T));
4138
}
4239

4340
template <typename T>
4441
void SetupLoDTensor(paddle::framework::LoDTensor& input,
45-
paddle::framework::LoD& lod,
46-
T lower,
47-
T upper) {
42+
paddle::framework::LoD& lod, T lower, T upper) {
4843
input.set_lod(lod);
4944
int dim = lod[0][lod[0].size() - 1];
5045
SetupTensor<T>(input, {dim, 1}, lower, upper);
5146
}
5247

5348
template <typename T>
5449
void SetupLoDTensor(paddle::framework::LoDTensor& input,
55-
paddle::framework::DDim dims,
56-
paddle::framework::LoD lod,
50+
paddle::framework::DDim dims, paddle::framework::LoD lod,
5751
std::vector<T>& data) {
5852
const size_t level = lod.size() - 1;
5953
CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back()));
@@ -92,8 +86,7 @@ template <typename Place>
9286
void TestInference(const std::string& dirname,
9387
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
9488
std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
95-
const int repeat = 1,
96-
const bool is_combined = false) {
89+
const int repeat = 1, const bool is_combined = false) {
9790
// 1. Define place, executor, scope
9891
auto place = Place();
9992
auto executor = paddle::framework::Executor(place);
@@ -132,11 +125,9 @@ void TestInference(const std::string& dirname,
132125
// `fluid.io.save_inference_model`.
133126
std::string prog_filename = "__model_combined__";
134127
std::string param_filename = "__params_combined__";
135-
inference_program =
136-
paddle::inference::Load(executor,
137-
*scope,
138-
dirname + "/" + prog_filename,
139-
dirname + "/" + param_filename);
128+
inference_program = paddle::inference::Load(
129+
executor, *scope, dirname + "/" + prog_filename,
130+
dirname + "/" + param_filename);
140131
} else {
141132
// Parameters are saved in separate files sited in the specified
142133
// `dirname`.

0 commit comments

Comments
 (0)