Skip to content

Commit 58fd4c0

Browse files
authored
Merge pull request #10111 from abhinavarora/cpplint_tensor_util_test
Fix CPPLint issues in tensor_util_test
2 parents a4d3de0 + 1c70600 commit 58fd4c0

File tree

2 files changed

+56
-60
lines changed

2 files changed

+56
-60
lines changed

paddle/fluid/framework/tensor_util_test.cc

Lines changed: 48 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -105,16 +105,14 @@ TEST(TensorCopy, Tensor) {
105105
}
106106

107107
TEST(TensorFromVector, Tensor) {
108-
using namespace paddle::framework;
109-
using namespace paddle::platform;
110108
{
111109
std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
112-
Tensor cpu_tensor;
110+
paddle::framework::Tensor cpu_tensor;
113111

114112
// Copy to CPU Tensor
115-
cpu_tensor.Resize(make_ddim({3, 3}));
113+
cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
116114
auto cpu_place = new paddle::platform::CPUPlace();
117-
TensorFromVector<int>(src_vec, &cpu_tensor);
115+
paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
118116

119117
// Compare Tensors
120118
const int* cpu_ptr = cpu_tensor.data<int>();
@@ -125,8 +123,8 @@ TEST(TensorFromVector, Tensor) {
125123
}
126124

127125
src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
128-
cpu_tensor.Resize(make_ddim({2, 2}));
129-
TensorFromVector<int>(src_vec, &cpu_tensor);
126+
cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
127+
paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
130128
cpu_ptr = cpu_tensor.data<int>();
131129
src_ptr = src_vec.data();
132130
ASSERT_NE(src_ptr, cpu_ptr);
@@ -140,23 +138,23 @@ TEST(TensorFromVector, Tensor) {
140138
#ifdef PADDLE_WITH_CUDA
141139
{
142140
std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
143-
Tensor cpu_tensor;
144-
Tensor gpu_tensor;
145-
Tensor dst_tensor;
141+
paddle::framework::Tensor cpu_tensor;
142+
paddle::framework::Tensor gpu_tensor;
143+
paddle::framework::Tensor dst_tensor;
146144

147145
// Copy to CPU Tensor
148146
cpu_tensor.Resize(make_ddim({3, 3}));
149147
auto cpu_place = new paddle::platform::CPUPlace();
150-
CPUDeviceContext cpu_ctx(*cpu_place);
151-
TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
148+
paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place);
149+
paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
152150

153151
// Copy to GPUTensor
154-
gpu_tensor.Resize(make_ddim({3, 3}));
152+
gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
155153
auto gpu_place = new paddle::platform::CUDAPlace();
156-
CUDADeviceContext gpu_ctx(*gpu_place);
157-
TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
154+
paddle::platform::CUDADeviceContext gpu_ctx(*gpu_place);
155+
paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
158156
// Copy from GPU to CPU tensor for comparison
159-
TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
157+
paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
160158

161159
// Sync before Compare Tensors
162160
gpu_ctx.Wait();
@@ -172,11 +170,11 @@ TEST(TensorFromVector, Tensor) {
172170

173171
src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
174172

175-
cpu_tensor.Resize(make_ddim({2, 2}));
176-
TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
177-
gpu_tensor.Resize(make_ddim({2, 2}));
178-
TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
179-
TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
173+
cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
174+
paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
175+
gpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
176+
paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
177+
paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
180178

181179
// Sync before Compare Tensors
182180
gpu_ctx.Wait();
@@ -197,18 +195,16 @@ TEST(TensorFromVector, Tensor) {
197195
}
198196

199197
TEST(TensorToVector, Tensor) {
200-
using namespace paddle::framework;
201-
using namespace paddle::platform;
202198
{
203-
Tensor src;
204-
int* src_ptr = src.mutable_data<int>({3, 3}, CPUPlace());
199+
paddle::framework::Tensor src;
200+
int* src_ptr = src.mutable_data<int>({3, 3}, paddle::platform::CPUPlace());
205201
for (int i = 0; i < 3 * 3; ++i) {
206202
src_ptr[i] = i;
207203
}
208204

209-
CPUPlace place;
205+
paddle::platform::CPUPlace place;
210206
std::vector<int> dst;
211-
TensorToVector<int>(src, &dst);
207+
paddle::framework::TensorToVector<int>(src, &dst);
212208

213209
for (int i = 0; i < 3 * 3; ++i) {
214210
EXPECT_EQ(src_ptr[i], dst[i]);
@@ -217,13 +213,13 @@ TEST(TensorToVector, Tensor) {
217213
#ifdef PADDLE_WITH_CUDA
218214
{
219215
std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
220-
Tensor gpu_tensor;
221-
CUDAPlace place;
222-
CUDADeviceContext gpu_ctx(place);
223-
TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
216+
paddle::framework::Tensor gpu_tensor;
217+
paddle::platform::CUDAPlace place;
218+
paddle::platform::CUDADeviceContext gpu_ctx(place);
219+
paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
224220

225221
std::vector<int> dst;
226-
TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
222+
paddle::framework::TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
227223

228224
for (int i = 0; i < 3 * 3; ++i) {
229225
EXPECT_EQ(src_vec[i], dst[i]);
@@ -233,54 +229,54 @@ TEST(TensorToVector, Tensor) {
233229
}
234230

235231
TEST(TensorContainsNAN, CPU) {
236-
using namespace paddle::framework;
237-
using namespace paddle::platform;
238232
{
239-
Tensor src;
240-
float* buf = src.mutable_data<float>({3}, CPUPlace());
233+
paddle::framework::Tensor src;
234+
float* buf = src.mutable_data<float>({3}, paddle::platform::CPUPlace());
241235
buf[0] = 0.0;
242236
buf[1] = NAN;
243237
buf[2] = 0.0;
244-
ASSERT_TRUE(TensorContainsNAN(src));
238+
ASSERT_TRUE(paddle::framework::TensorContainsNAN(src));
245239
buf[1] = 0.0;
246-
ASSERT_FALSE(TensorContainsNAN(src));
240+
ASSERT_FALSE(paddle::framework::TensorContainsNAN(src));
247241
}
248242

249243
{
250-
Tensor src;
251-
float16* buf = src.mutable_data<float16>({3}, CPUPlace());
244+
paddle::framework::Tensor src;
245+
paddle::platform::float16* buf =
246+
src.mutable_data<paddle::platform::float16>(
247+
{3}, paddle::platform::CPUPlace());
252248
buf[0] = 0.0;
253249
buf[1].x = 0x7fff;
254250
buf[2] = 0.0;
255-
ASSERT_TRUE(TensorContainsNAN(src));
251+
ASSERT_TRUE(paddle::framework::TensorContainsNAN(src));
256252
buf[1] = 0.0;
257-
ASSERT_FALSE(TensorContainsNAN(src));
253+
ASSERT_FALSE(paddle::framework::TensorContainsNAN(src));
258254
}
259255
}
260256

261257
TEST(TensorContainsInf, CPU) {
262-
using namespace paddle::framework;
263-
using namespace paddle::platform;
264258
{
265-
Tensor src;
266-
double* buf = src.mutable_data<double>({3}, CPUPlace());
259+
paddle::framework::Tensor src;
260+
double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
267261
buf[0] = 1.0;
268262
buf[1] = INFINITY;
269263
buf[2] = 0.0;
270-
ASSERT_TRUE(TensorContainsInf(src));
264+
ASSERT_TRUE(paddle::framework::TensorContainsInf(src));
271265
buf[1] = 1.0;
272-
ASSERT_FALSE(TensorContainsInf(src));
266+
ASSERT_FALSE(paddle::framework::TensorContainsInf(src));
273267
}
274268

275269
{
276-
Tensor src;
277-
float16* buf = src.mutable_data<float16>({3}, CPUPlace());
270+
paddle::framework::Tensor src;
271+
paddle::platform::float16* buf =
272+
src.mutable_data<paddle::platform::float16>(
273+
{3}, paddle::platform::CPUPlace());
278274
buf[0] = 1.0;
279275
buf[1].x = 0x7c00;
280276
buf[2] = 0.0;
281-
ASSERT_TRUE(TensorContainsInf(src));
277+
ASSERT_TRUE(paddle::framework::TensorContainsInf(src));
282278
buf[1] = 1.0;
283-
ASSERT_FALSE(TensorContainsInf(src));
279+
ASSERT_FALSE(paddle::framework::TensorContainsInf(src));
284280
}
285281
}
286282

paddle/fluid/framework/tensor_util_test.cu

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,8 @@ static __global__ void FillInf(platform::float16* buf) {
4545
}
4646

4747
TEST(TensorContainsNAN, GPU) {
48-
using namespace paddle::platform;
49-
CUDAPlace gpu(0);
50-
auto& pool = DeviceContextPool::Instance();
48+
paddle::platform::CUDAPlace gpu(0);
49+
auto& pool = paddle::platform::DeviceContextPool::Instance();
5150
auto* cuda_ctx = pool.GetByPlace(gpu);
5251
{
5352
Tensor tensor;
@@ -58,17 +57,17 @@ TEST(TensorContainsNAN, GPU) {
5857
}
5958
{
6059
Tensor tensor;
61-
float16* buf = tensor.mutable_data<float16>({3}, gpu);
60+
paddle::platform::float16* buf =
61+
tensor.mutable_data<paddle::platform::float16>({3}, gpu);
6262
FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
6363
cuda_ctx->Wait();
6464
ASSERT_TRUE(TensorContainsNAN(tensor));
6565
}
6666
}
6767
6868
TEST(TensorContainsInf, GPU) {
69-
using namespace paddle::platform;
70-
CUDAPlace gpu(0);
71-
auto& pool = DeviceContextPool::Instance();
69+
paddle::platform::CUDAPlace gpu(0);
70+
auto& pool = paddle::platform::DeviceContextPool::Instance();
7271
auto* cuda_ctx = pool.GetByPlace(gpu);
7372
{
7473
Tensor tensor;
@@ -79,7 +78,8 @@ TEST(TensorContainsInf, GPU) {
7978
}
8079
{
8180
Tensor tensor;
82-
float16* buf = tensor.mutable_data<float16>({3}, gpu);
81+
paddle::platform::float16* buf =
82+
tensor.mutable_data<paddle::platform::float16>({3}, gpu);
8383
FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
8484
cuda_ctx->Wait();
8585
ASSERT_TRUE(TensorContainsInf(tensor));

0 commit comments

Comments
 (0)