Skip to content

Commit 6402b59

Browse files
authored
Fix CPPLint issues in some tests in fluid/framework (#10068)
* Fix CPPLint in data_device_transform_test * Fix compilation error * Fix compilation error * Fix CPPLint errors in data_layout_transform_test * Fix CPPLint errors in data_type_transform_test * Fix CPPLint errors in data_type_transform_test.cu * Fix compilation error * Fix CPPLint issues in threadpool_test * Fix CPPLInt issues in op_registry_test * Fix CPPLint issues in operator_test * Fix compilation error * test
1 parent 12ae354 commit 6402b59

File tree

7 files changed

+240
-174
lines changed

7 files changed

+240
-174
lines changed

paddle/fluid/framework/data_device_transform_test.cu

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -103,9 +103,7 @@ static void BuildVar(const std::string& param_name,
103103
}
104104

105105
TEST(Operator, CPUtoGPU) {
106-
using namespace paddle::framework;
107-
using namespace paddle::platform;
108-
InitDevices(true);
106+
paddle::framework::InitDevices(true);
109107

110108
paddle::framework::Scope scope;
111109
paddle::platform::CPUPlace cpu_place;
@@ -118,8 +116,9 @@ TEST(Operator, CPUtoGPU) {
118116

119117
auto cpu_op = paddle::framework::OpRegistry::CreateOp(cpu_op_desc);
120118
// prepare input
121-
auto* in_t = scope.Var("IN1")->GetMutable<LoDTensor>();
122-
auto* src_ptr = in_t->mutable_data<float>({2, 3}, CPUPlace());
119+
auto* in_t = scope.Var("IN1")->GetMutable<paddle::framework::LoDTensor>();
120+
auto* src_ptr =
121+
in_t->mutable_data<float>({2, 3}, paddle::platform::CPUPlace());
123122
for (int i = 0; i < 2 * 3; ++i) {
124123
src_ptr[i] = static_cast<float>(i);
125124
}
@@ -128,7 +127,7 @@ TEST(Operator, CPUtoGPU) {
128127
auto* output = scope.Var("OUT1");
129128
cpu_op->Run(scope, cpu_place);
130129

131-
auto* output_ptr = output->Get<LoDTensor>().data<float>();
130+
auto* output_ptr = output->Get<paddle::framework::LoDTensor>().data<float>();
132131
for (int i = 0; i < 2 * 3; ++i) {
133132
ASSERT_EQ(output_ptr[i], static_cast<float>(i) * 2);
134133
}
@@ -153,12 +152,14 @@ TEST(Operator, CPUtoGPU) {
153152
VLOG(3) << "after gpu_op run";
154153

155154
// auto* output2_ptr = output2->Get<LoDTensor>().data<float>();
156-
DeviceContextPool& pool = DeviceContextPool::Instance();
155+
paddle::platform::DeviceContextPool& pool =
156+
paddle::platform::DeviceContextPool::Instance();
157157
auto dev_ctx = pool.Get(cuda_place);
158158

159159
paddle::framework::Tensor output_tensor;
160-
TensorCopy(output2->Get<LoDTensor>(), paddle::platform::CPUPlace(), *dev_ctx,
161-
&output_tensor);
160+
paddle::framework::TensorCopy(output2->Get<paddle::framework::LoDTensor>(),
161+
paddle::platform::CPUPlace(), *dev_ctx,
162+
&output_tensor);
162163

163164
dev_ctx->Wait();
164165
float* output2_ptr = output_tensor.data<float>();

paddle/fluid/framework/data_layout_transform_test.cc

Lines changed: 21 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -18,27 +18,28 @@
1818
#include "paddle/fluid/platform/device_context.h"
1919

2020
TEST(DataTransform, DataLayoutFunction) {
21-
using namespace paddle::framework;
22-
using namespace paddle::platform;
23-
24-
auto place = CPUPlace();
25-
Tensor in = Tensor();
26-
Tensor out = Tensor();
27-
in.mutable_data<double>(make_ddim({2, 3, 1, 2}), place);
28-
in.set_layout(DataLayout::kNHWC);
29-
30-
auto kernel_nhwc = OpKernelType(proto::VarType::FP32, place,
31-
DataLayout::kNHWC, LibraryType::kPlain);
32-
auto kernel_ncwh = OpKernelType(proto::VarType::FP32, place,
33-
DataLayout::kNCHW, LibraryType::kPlain);
34-
35-
TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out);
36-
37-
EXPECT_TRUE(out.layout() == DataLayout::kNCHW);
38-
EXPECT_TRUE(out.dims() == make_ddim({2, 2, 3, 1}));
21+
auto place = paddle::platform::CPUPlace();
22+
paddle::framework::Tensor in = paddle::framework::Tensor();
23+
paddle::framework::Tensor out = paddle::framework::Tensor();
24+
in.mutable_data<double>(paddle::framework::make_ddim({2, 3, 1, 2}), place);
25+
in.set_layout(paddle::framework::DataLayout::kNHWC);
26+
27+
auto kernel_nhwc = paddle::framework::OpKernelType(
28+
paddle::framework::proto::VarType::FP32, place,
29+
paddle::framework::DataLayout::kNHWC,
30+
paddle::framework::LibraryType::kPlain);
31+
auto kernel_ncwh = paddle::framework::OpKernelType(
32+
paddle::framework::proto::VarType::FP32, place,
33+
paddle::framework::DataLayout::kNCHW,
34+
paddle::framework::LibraryType::kPlain);
35+
36+
paddle::framework::TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out);
37+
38+
EXPECT_TRUE(out.layout() == paddle::framework::DataLayout::kNCHW);
39+
EXPECT_TRUE(out.dims() == paddle::framework::make_ddim({2, 2, 3, 1}));
3940

4041
TransDataLayout(kernel_ncwh, kernel_nhwc, in, &out);
4142

42-
EXPECT_TRUE(in.layout() == DataLayout::kNHWC);
43-
EXPECT_TRUE(in.dims() == make_ddim({2, 3, 1, 2}));
43+
EXPECT_TRUE(in.layout() == paddle::framework::DataLayout::kNHWC);
44+
EXPECT_TRUE(in.dims() == paddle::framework::make_ddim({2, 3, 1, 2}));
4445
}

paddle/fluid/framework/data_type_transform_test.cc

Lines changed: 76 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -17,43 +17,58 @@ limitations under the License. */
1717
#include "gtest/gtest.h"
1818

1919
TEST(DataTypeTransform, CPUTransform) {
20-
using namespace paddle::framework;
21-
using namespace paddle::platform;
22-
23-
auto place = CPUPlace();
24-
25-
auto kernel_fp16 = OpKernelType(proto::VarType::FP16, place,
26-
DataLayout::kAnyLayout, LibraryType::kPlain);
27-
auto kernel_fp32 = OpKernelType(proto::VarType::FP32, place,
28-
DataLayout::kAnyLayout, LibraryType::kPlain);
29-
auto kernel_fp64 = OpKernelType(proto::VarType::FP64, place,
30-
DataLayout::kAnyLayout, LibraryType::kPlain);
31-
auto kernel_int32 = OpKernelType(proto::VarType::INT32, place,
32-
DataLayout::kAnyLayout, LibraryType::kPlain);
33-
auto kernel_int64 = OpKernelType(proto::VarType::INT64, place,
34-
DataLayout::kAnyLayout, LibraryType::kPlain);
35-
auto kernel_bool = OpKernelType(proto::VarType::BOOL, place,
36-
DataLayout::kAnyLayout, LibraryType::kPlain);
20+
auto place = paddle::platform::CPUPlace();
21+
22+
auto kernel_fp16 = paddle::framework::OpKernelType(
23+
paddle::framework::proto::VarType::FP16, place,
24+
paddle::framework::DataLayout::kAnyLayout,
25+
paddle::framework::LibraryType::kPlain);
26+
27+
auto kernel_fp32 = paddle::framework::OpKernelType(
28+
paddle::framework::proto::VarType::FP32, place,
29+
paddle::framework::DataLayout::kAnyLayout,
30+
paddle::framework::LibraryType::kPlain);
31+
32+
auto kernel_fp64 = paddle::framework::OpKernelType(
33+
paddle::framework::proto::VarType::FP64, place,
34+
paddle::framework::DataLayout::kAnyLayout,
35+
paddle::framework::LibraryType::kPlain);
36+
37+
auto kernel_int32 = paddle::framework::OpKernelType(
38+
paddle::framework::proto::VarType::INT32, place,
39+
paddle::framework::DataLayout::kAnyLayout,
40+
paddle::framework::LibraryType::kPlain);
41+
42+
auto kernel_int64 = paddle::framework::OpKernelType(
43+
paddle::framework::proto::VarType::INT64, place,
44+
paddle::framework::DataLayout::kAnyLayout,
45+
paddle::framework::LibraryType::kPlain);
46+
47+
auto kernel_bool = paddle::framework::OpKernelType(
48+
paddle::framework::proto::VarType::BOOL, place,
49+
paddle::framework::DataLayout::kAnyLayout,
50+
paddle::framework::LibraryType::kPlain);
3751

3852
// data type transform from float32
3953
{
40-
Tensor in;
41-
Tensor out;
54+
paddle::framework::Tensor in;
55+
paddle::framework::Tensor out;
4256

43-
float* ptr = in.mutable_data<float>(make_ddim({2, 3}), place);
57+
float* ptr =
58+
in.mutable_data<float>(paddle::framework::make_ddim({2, 3}), place);
4459
int data_number = 2 * 3;
4560

4661
for (int i = 0; i < data_number; ++i) {
4762
ptr[i] = i / 3;
4863
}
4964

50-
TransDataType(kernel_fp32, kernel_fp64, in, &out);
65+
paddle::framework::TransDataType(kernel_fp32, kernel_fp64, in, &out);
5166
double* out_data_double = out.data<double>();
5267
for (int i = 0; i < data_number; ++i) {
5368
EXPECT_EQ(out_data_double[i], static_cast<double>(i / 3));
5469
}
5570

56-
TransDataType(kernel_fp32, kernel_int32, in, &out);
71+
paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out);
5772
int* out_data_int = out.data<int>();
5873
for (int i = 0; i < data_number; ++i) {
5974
EXPECT_EQ(out_data_int[i], static_cast<int>(i / 3));
@@ -62,105 +77,116 @@ TEST(DataTypeTransform, CPUTransform) {
6277

6378
// data type transform from/to float16
6479
{
65-
Tensor in;
66-
Tensor out;
80+
paddle::framework::Tensor in;
81+
paddle::framework::Tensor out;
6782

68-
float16* ptr = in.mutable_data<float16>(make_ddim({2, 3}), place);
83+
paddle::platform::float16* ptr = in.mutable_data<paddle::platform::float16>(
84+
paddle::framework::make_ddim({2, 3}), place);
6985
int data_number = 2 * 3;
7086

7187
for (int i = 0; i < data_number; ++i) {
7288
ptr[i] = i;
7389
}
7490

7591
// transform from float16 to other data types
76-
TransDataType(kernel_fp16, kernel_fp32, in, &out);
92+
paddle::framework::TransDataType(kernel_fp16, kernel_fp32, in, &out);
7793
float* out_data_float = out.data<float>();
7894
for (int i = 0; i < data_number; ++i) {
7995
EXPECT_EQ(out_data_float[i], static_cast<float>(ptr[i]));
8096
}
8197

82-
TransDataType(kernel_fp16, kernel_fp64, in, &out);
98+
paddle::framework::TransDataType(kernel_fp16, kernel_fp64, in, &out);
8399
double* out_data_double = out.data<double>();
84100
for (int i = 0; i < data_number; ++i) {
85101
EXPECT_EQ(out_data_double[i], static_cast<double>(ptr[i]));
86102
}
87103

88-
TransDataType(kernel_fp16, kernel_int32, in, &out);
104+
paddle::framework::TransDataType(kernel_fp16, kernel_int32, in, &out);
89105
int* out_data_int = out.data<int>();
90106
for (int i = 0; i < data_number; ++i) {
91107
EXPECT_EQ(out_data_int[i], static_cast<int>(ptr[i]));
92108
}
93109

94-
TransDataType(kernel_fp16, kernel_int64, in, &out);
110+
paddle::framework::TransDataType(kernel_fp16, kernel_int64, in, &out);
95111
int64_t* out_data_int64 = out.data<int64_t>();
96112
for (int i = 0; i < data_number; ++i) {
97113
EXPECT_EQ(out_data_int64[i], static_cast<int64_t>(ptr[i]));
98114
}
99115

100-
TransDataType(kernel_fp16, kernel_bool, in, &out);
116+
paddle::framework::TransDataType(kernel_fp16, kernel_bool, in, &out);
101117
bool* out_data_bool = out.data<bool>();
102118
for (int i = 0; i < data_number; ++i) {
103119
EXPECT_EQ(out_data_bool[i], static_cast<bool>(ptr[i]));
104120
}
105121

106122
// transform float to float16
107-
float* in_data_float = in.mutable_data<float>(make_ddim({2, 3}), place);
123+
float* in_data_float =
124+
in.mutable_data<float>(paddle::framework::make_ddim({2, 3}), place);
108125
for (int i = 0; i < data_number; ++i) {
109126
in_data_float[i] = i;
110127
}
111128

112-
TransDataType(kernel_fp32, kernel_fp16, in, &out);
113-
ptr = out.data<float16>();
129+
paddle::framework::TransDataType(kernel_fp32, kernel_fp16, in, &out);
130+
ptr = out.data<paddle::platform::float16>();
114131
for (int i = 0; i < data_number; ++i) {
115-
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_float[i]).x);
132+
EXPECT_EQ(ptr[i].x,
133+
static_cast<paddle::platform::float16>(in_data_float[i]).x);
116134
}
117135

118136
// transform double to float16
119-
double* in_data_double = in.mutable_data<double>(make_ddim({2, 3}), place);
137+
double* in_data_double =
138+
in.mutable_data<double>(paddle::framework::make_ddim({2, 3}), place);
120139
for (int i = 0; i < data_number; ++i) {
121140
in_data_double[i] = i;
122141
}
123142

124-
TransDataType(kernel_fp64, kernel_fp16, in, &out);
125-
ptr = out.data<float16>();
143+
paddle::framework::TransDataType(kernel_fp64, kernel_fp16, in, &out);
144+
ptr = out.data<paddle::platform::float16>();
126145
for (int i = 0; i < data_number; ++i) {
127-
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_double[i]).x);
146+
EXPECT_EQ(ptr[i].x,
147+
static_cast<paddle::platform::float16>(in_data_double[i]).x);
128148
}
129149

130150
// transform int to float16
131-
int* in_data_int = in.mutable_data<int>(make_ddim({2, 3}), place);
151+
int* in_data_int =
152+
in.mutable_data<int>(paddle::framework::make_ddim({2, 3}), place);
132153
for (int i = 0; i < data_number; ++i) {
133154
in_data_int[i] = i;
134155
}
135156

136-
TransDataType(kernel_int32, kernel_fp16, in, &out);
137-
ptr = out.data<float16>();
157+
paddle::framework::TransDataType(kernel_int32, kernel_fp16, in, &out);
158+
ptr = out.data<paddle::platform::float16>();
138159
for (int i = 0; i < data_number; ++i) {
139-
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_int[i]).x);
160+
EXPECT_EQ(ptr[i].x,
161+
static_cast<paddle::platform::float16>(in_data_int[i]).x);
140162
}
141163

142164
// transform int64 to float16
143-
int64_t* in_data_int64 = in.mutable_data<int64_t>(make_ddim({2, 3}), place);
165+
int64_t* in_data_int64 =
166+
in.mutable_data<int64_t>(paddle::framework::make_ddim({2, 3}), place);
144167
for (int i = 0; i < data_number; ++i) {
145168
in_data_int64[i] = i;
146169
}
147170

148-
TransDataType(kernel_int64, kernel_fp16, in, &out);
149-
ptr = out.data<float16>();
171+
paddle::framework::TransDataType(kernel_int64, kernel_fp16, in, &out);
172+
ptr = out.data<paddle::platform::float16>();
150173
for (int i = 0; i < data_number; ++i) {
151-
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_int64[i]).x);
174+
EXPECT_EQ(ptr[i].x,
175+
static_cast<paddle::platform::float16>(in_data_int64[i]).x);
152176
}
153177

154178
// transform bool to float16
155-
bool* in_data_bool = in.mutable_data<bool>(make_ddim({2, 3}), place);
179+
bool* in_data_bool =
180+
in.mutable_data<bool>(paddle::framework::make_ddim({2, 3}), place);
156181
for (int i = 0; i < data_number; ++i) {
157182
in_data_bool[i] = i;
158183
}
159184

160-
TransDataType(kernel_bool, kernel_fp16, in, &out);
161-
ptr = out.data<float16>();
185+
paddle::framework::TransDataType(kernel_bool, kernel_fp16, in, &out);
186+
ptr = out.data<paddle::platform::float16>();
162187
for (int i = 0; i < data_number; ++i) {
163-
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_bool[i]).x);
188+
EXPECT_EQ(ptr[i].x,
189+
static_cast<paddle::platform::float16>(in_data_bool[i]).x);
164190
}
165191
}
166192
}

0 commit comments

Comments
 (0)