Skip to content

Commit b6ec45d

Browse files
author
Abhinav Arora
committed
Fix CPPLint warnings in tensor_util_test
1 parent 12ae354 commit b6ec45d

File tree

2 files changed

+48
-58
lines changed

2 files changed

+48
-58
lines changed

paddle/fluid/framework/tensor_util_test.cc

Lines changed: 44 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -105,16 +105,14 @@ TEST(TensorCopy, Tensor) {
105105
}
106106

107107
TEST(TensorFromVector, Tensor) {
108-
using namespace paddle::framework;
109-
using namespace paddle::platform;
110108
{
111109
std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
112-
Tensor cpu_tensor;
110+
paddle::framework::Tensor cpu_tensor;
113111

114112
// Copy to CPU Tensor
115-
cpu_tensor.Resize(make_ddim({3, 3}));
113+
cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
116114
auto cpu_place = new paddle::platform::CPUPlace();
117-
TensorFromVector<int>(src_vec, &cpu_tensor);
115+
paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
118116

119117
// Compare Tensors
120118
const int* cpu_ptr = cpu_tensor.data<int>();
@@ -125,8 +123,8 @@ TEST(TensorFromVector, Tensor) {
125123
}
126124

127125
src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
128-
cpu_tensor.Resize(make_ddim({2, 2}));
129-
TensorFromVector<int>(src_vec, &cpu_tensor);
126+
cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
127+
paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
130128
cpu_ptr = cpu_tensor.data<int>();
131129
src_ptr = src_vec.data();
132130
ASSERT_NE(src_ptr, cpu_ptr);
@@ -140,23 +138,23 @@ TEST(TensorFromVector, Tensor) {
140138
#ifdef PADDLE_WITH_CUDA
141139
{
142140
std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
143-
Tensor cpu_tensor;
144-
Tensor gpu_tensor;
145-
Tensor dst_tensor;
141+
paddle::framework::Tensor cpu_tensor;
142+
paddle::framework::Tensor gpu_tensor;
143+
paddle::framework::Tensor dst_tensor;
146144

147145
// Copy to CPU Tensor
148146
cpu_tensor.Resize(make_ddim({3, 3}));
149147
auto cpu_place = new paddle::platform::CPUPlace();
150-
CPUDeviceContext cpu_ctx(*cpu_place);
151-
TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
148+
paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place);
149+
paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
152150

153151
// Copy to GPUTensor
154-
gpu_tensor.Resize(make_ddim({3, 3}));
152+
gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
155153
auto gpu_place = new paddle::platform::CUDAPlace();
156-
CUDADeviceContext gpu_ctx(*gpu_place);
157-
TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
154+
paddle::platform::CUDADeviceContext gpu_ctx(*gpu_place);
155+
paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
158156
// Copy from GPU to CPU tensor for comparison
159-
TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
157+
paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
160158

161159
// Sync before Compare Tensors
162160
gpu_ctx.Wait();
@@ -172,11 +170,11 @@ TEST(TensorFromVector, Tensor) {
172170

173171
src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
174172

175-
cpu_tensor.Resize(make_ddim({2, 2}));
176-
TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
177-
gpu_tensor.Resize(make_ddim({2, 2}));
178-
TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
179-
TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
173+
cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
174+
paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
175+
gpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
176+
paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
177+
paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
180178

181179
// Sync before Compare Tensors
182180
gpu_ctx.Wait();
@@ -197,18 +195,16 @@ TEST(TensorFromVector, Tensor) {
197195
}
198196

199197
TEST(TensorToVector, Tensor) {
200-
using namespace paddle::framework;
201-
using namespace paddle::platform;
202198
{
203-
Tensor src;
204-
int* src_ptr = src.mutable_data<int>({3, 3}, CPUPlace());
199+
paddle::framework::Tensor src;
200+
int* src_ptr = src.mutable_data<int>({3, 3}, paddle::platform::CPUPlace());
205201
for (int i = 0; i < 3 * 3; ++i) {
206202
src_ptr[i] = i;
207203
}
208204

209-
CPUPlace place;
205+
paddle::platform::CPUPlace place;
210206
std::vector<int> dst;
211-
TensorToVector<int>(src, &dst);
207+
paddle::framework::TensorToVector<int>(src, &dst);
212208

213209
for (int i = 0; i < 3 * 3; ++i) {
214210
EXPECT_EQ(src_ptr[i], dst[i]);
@@ -217,13 +213,13 @@ TEST(TensorToVector, Tensor) {
217213
#ifdef PADDLE_WITH_CUDA
218214
{
219215
std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
220-
Tensor gpu_tensor;
221-
CUDAPlace place;
222-
CUDADeviceContext gpu_ctx(place);
223-
TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
216+
paddle::framework::Tensor gpu_tensor;
217+
paddle::platform::CUDAPlace place;
218+
paddle::platform::CUDADeviceContext gpu_ctx(place);
219+
paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
224220

225221
std::vector<int> dst;
226-
TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
222+
paddle::framework::TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
227223

228224
for (int i = 0; i < 3 * 3; ++i) {
229225
EXPECT_EQ(src_vec[i], dst[i]);
@@ -233,54 +229,50 @@ TEST(TensorToVector, Tensor) {
233229
}
234230

235231
TEST(TensorContainsNAN, CPU) {
236-
using namespace paddle::framework;
237-
using namespace paddle::platform;
238232
{
239-
Tensor src;
240-
float* buf = src.mutable_data<float>({3}, CPUPlace());
233+
paddle::framework::Tensor src;
234+
float* buf = src.mutable_data<float>({3}, paddle::platform::CPUPlace());
241235
buf[0] = 0.0;
242236
buf[1] = NAN;
243237
buf[2] = 0.0;
244-
ASSERT_TRUE(TensorContainsNAN(src));
238+
ASSERT_TRUE(paddle::framework::TensorContainsNAN(src));
245239
buf[1] = 0.0;
246-
ASSERT_FALSE(TensorContainsNAN(src));
240+
ASSERT_FALSE(paddle::framework::TensorContainsNAN(src));
247241
}
248242

249243
{
250-
Tensor src;
251-
float16* buf = src.mutable_data<float16>({3}, CPUPlace());
244+
paddle::framework::Tensor src;
245+
float16* buf = src.mutable_data<float16>({3}, paddle::platform::CPUPlace());
252246
buf[0] = 0.0;
253247
buf[1].x = 0x7fff;
254248
buf[2] = 0.0;
255-
ASSERT_TRUE(TensorContainsNAN(src));
249+
ASSERT_TRUE(paddle::framework::TensorContainsNAN(src));
256250
buf[1] = 0.0;
257-
ASSERT_FALSE(TensorContainsNAN(src));
251+
ASSERT_FALSE(paddle::framework::TensorContainsNAN(src));
258252
}
259253
}
260254

261255
TEST(TensorContainsInf, CPU) {
262-
using namespace paddle::framework;
263-
using namespace paddle::platform;
264256
{
265-
Tensor src;
266-
double* buf = src.mutable_data<double>({3}, CPUPlace());
257+
paddle::framework::Tensor src;
258+
double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
267259
buf[0] = 1.0;
268260
buf[1] = INFINITY;
269261
buf[2] = 0.0;
270-
ASSERT_TRUE(TensorContainsInf(src));
262+
ASSERT_TRUE(paddle::framework::TensorContainsInf(src));
271263
buf[1] = 1.0;
272-
ASSERT_FALSE(TensorContainsInf(src));
264+
ASSERT_FALSE(paddle::framework::TensorContainsInf(src));
273265
}
274266

275267
{
276-
Tensor src;
277-
float16* buf = src.mutable_data<float16>({3}, CPUPlace());
268+
paddle::framework::Tensor src;
269+
float16* buf = src.mutable_data<float16>({3}, paddle::platform::CPUPlace());
278270
buf[0] = 1.0;
279271
buf[1].x = 0x7c00;
280272
buf[2] = 0.0;
281-
ASSERT_TRUE(TensorContainsInf(src));
273+
ASSERT_TRUE(paddle::framework::TensorContainsInf(src));
282274
buf[1] = 1.0;
283-
ASSERT_FALSE(TensorContainsInf(src));
275+
ASSERT_FALSE(paddle::framework::TensorContainsInf(src));
284276
}
285277
}
286278

paddle/fluid/framework/tensor_util_test.cu

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,8 @@ static __global__ void FillInf(platform::float16* buf) {
4545
}
4646

4747
TEST(TensorContainsNAN, GPU) {
48-
using namespace paddle::platform;
49-
CUDAPlace gpu(0);
50-
auto& pool = DeviceContextPool::Instance();
48+
paddle::platform::CUDAPlace gpu(0);
49+
auto& pool = paddle::platform::DeviceContextPool::Instance();
5150
auto* cuda_ctx = pool.GetByPlace(gpu);
5251
{
5352
Tensor tensor;
@@ -66,9 +65,8 @@ TEST(TensorContainsNAN, GPU) {
6665
}
6766
6867
TEST(TensorContainsInf, GPU) {
69-
using namespace paddle::platform;
70-
CUDAPlace gpu(0);
71-
auto& pool = DeviceContextPool::Instance();
68+
paddle::platform::CUDAPlace gpu(0);
69+
auto& pool = paddle::platform::DeviceContextPool::Instance();
7270
auto* cuda_ctx = pool.GetByPlace(gpu);
7371
{
7472
Tensor tensor;

0 commit comments

Comments
 (0)