@@ -17,17 +17,14 @@ limitations under the License. */
17
17
#include < vector>
18
18
#include " paddle/fluid/framework/tensor_util.h"
19
19
20
- using namespace paddle ::framework;
21
- using namespace paddle ::platform;
22
-
23
20
template <typename DeviceContext, typename Place>
24
21
void testConcat () {
25
- Tensor input_a_cpu;
26
- Tensor input_b_cpu;
27
- Tensor out_cpu;
28
- Tensor input_a;
29
- Tensor input_b;
30
- Tensor out;
22
+ paddle::framework:: Tensor input_a_cpu;
23
+ paddle::framework:: Tensor input_b_cpu;
24
+ paddle::framework:: Tensor out_cpu;
25
+ paddle::framework:: Tensor input_a;
26
+ paddle::framework:: Tensor input_b;
27
+ paddle::framework:: Tensor out;
31
28
32
29
DeviceContext* context = new DeviceContext (Place ());
33
30
// DeviceContext context(Place());
@@ -40,18 +37,18 @@ void testConcat() {
40
37
* output:
41
38
* out.shape: [5, 3, 4]
42
39
*/
43
- auto dim_a = make_ddim ({2 , 3 , 4 });
44
- auto dim_b = make_ddim ({3 , 3 , 4 });
45
- auto dim_out = make_ddim ({5 , 3 , 4 });
40
+ auto dim_a = paddle::framework:: make_ddim ({2 , 3 , 4 });
41
+ auto dim_b = paddle::framework:: make_ddim ({3 , 3 , 4 });
42
+ auto dim_out = paddle::framework:: make_ddim ({5 , 3 , 4 });
46
43
47
44
input_a.mutable_data <int >(dim_a, Place ());
48
45
input_b.mutable_data <int >(dim_b, Place ());
49
46
out.mutable_data <int >(dim_out, Place ());
50
47
51
48
if (paddle::platform::is_gpu_place (Place ())) {
52
- input_a_cpu.mutable_data <int >(dim_a, CPUPlace ());
53
- input_b_cpu.mutable_data <int >(dim_b, CPUPlace ());
54
- out_cpu.mutable_data <int >(dim_out, CPUPlace ());
49
+ input_a_cpu.mutable_data <int >(dim_a, paddle::platform:: CPUPlace ());
50
+ input_b_cpu.mutable_data <int >(dim_b, paddle::platform:: CPUPlace ());
51
+ out_cpu.mutable_data <int >(dim_out, paddle::platform:: CPUPlace ());
55
52
}
56
53
57
54
int * a_ptr;
@@ -72,11 +69,11 @@ void testConcat() {
72
69
}
73
70
74
71
if (paddle::platform::is_gpu_place (Place ())) {
75
- TensorCopySync (input_a_cpu, Place (), &input_a);
76
- TensorCopySync (input_b_cpu, Place (), &input_b);
72
+ paddle::framework::TensorCopy (input_a_cpu, Place (), *context , &input_a);
73
+ paddle::framework::TensorCopy (input_b_cpu, Place (), *context , &input_b);
77
74
}
78
75
79
- std::vector<Tensor> input;
76
+ std::vector<paddle::framework:: Tensor> input;
80
77
input.push_back (input_a);
81
78
input.push_back (input_b);
82
79
@@ -89,7 +86,8 @@ void testConcat() {
89
86
90
87
int * out_ptr;
91
88
if (paddle::platform::is_gpu_place (Place ())) {
92
- TensorCopySync (out, CPUPlace (), &out_cpu);
89
+ paddle::framework::TensorCopy (out, paddle::platform::CPUPlace (), *context,
90
+ &out_cpu);
93
91
out_ptr = out_cpu.data <int >();
94
92
} else {
95
93
out_ptr = out.data <int >();
@@ -115,9 +113,9 @@ void testConcat() {
115
113
* output:
116
114
* out.shape: [2, 7, 4]
117
115
*/
118
- dim_a = make_ddim ({2 , 3 , 4 });
119
- dim_b = make_ddim ({2 , 4 , 4 });
120
- dim_out = make_ddim ({2 , 7 , 4 });
116
+ dim_a = paddle::framework:: make_ddim ({2 , 3 , 4 });
117
+ dim_b = paddle::framework:: make_ddim ({2 , 4 , 4 });
118
+ dim_out = paddle::framework:: make_ddim ({2 , 7 , 4 });
121
119
122
120
input_a.Resize (dim_a);
123
121
input_b.Resize (dim_b);
@@ -144,8 +142,8 @@ void testConcat() {
144
142
}
145
143
146
144
if (paddle::platform::is_gpu_place (Place ())) {
147
- TensorCopySync (input_a_cpu, Place (), &input_a);
148
- TensorCopySync (input_b_cpu, Place (), &input_b);
145
+ paddle::framework::TensorCopy (input_a_cpu, Place (), *context , &input_a);
146
+ paddle::framework::TensorCopy (input_b_cpu, Place (), *context , &input_b);
149
147
}
150
148
151
149
input.clear ();
@@ -159,7 +157,8 @@ void testConcat() {
159
157
PADDLE_ENFORCE_EQ (input_b.dims (), dim_b);
160
158
161
159
if (paddle::platform::is_gpu_place (Place ())) {
162
- TensorCopySync (out, CPUPlace (), &out_cpu);
160
+ paddle::framework::TensorCopy (out, paddle::platform::CPUPlace (), *context,
161
+ &out_cpu);
163
162
out_ptr = out_cpu.data <int >();
164
163
} else {
165
164
out_ptr = out.data <int >();
@@ -187,9 +186,9 @@ void testConcat() {
187
186
* output:
188
187
* out.shape: [2, 3, 9]
189
188
*/
190
- dim_a = make_ddim ({2 , 3 , 4 });
191
- dim_b = make_ddim ({2 , 3 , 5 });
192
- dim_out = make_ddim ({2 , 3 , 9 });
189
+ dim_a = paddle::framework:: make_ddim ({2 , 3 , 4 });
190
+ dim_b = paddle::framework:: make_ddim ({2 , 3 , 5 });
191
+ dim_out = paddle::framework:: make_ddim ({2 , 3 , 9 });
193
192
194
193
input_a.Resize (dim_a);
195
194
input_b.Resize (dim_b);
@@ -216,8 +215,8 @@ void testConcat() {
216
215
}
217
216
218
217
if (paddle::platform::is_gpu_place (Place ())) {
219
- TensorCopySync (input_a_cpu, Place (), &input_a);
220
- TensorCopySync (input_b_cpu, Place (), &input_b);
218
+ paddle::framework::TensorCopy (input_a_cpu, Place (), *context , &input_a);
219
+ paddle::framework::TensorCopy (input_b_cpu, Place (), *context , &input_b);
221
220
}
222
221
223
222
input.clear ();
@@ -231,7 +230,8 @@ void testConcat() {
231
230
PADDLE_ENFORCE_EQ (input_b.dims (), dim_b);
232
231
233
232
if (paddle::platform::is_gpu_place (Place ())) {
234
- TensorCopySync (out, CPUPlace (), &out_cpu);
233
+ paddle::framework::TensorCopy (out, paddle::platform::CPUPlace (), *context,
234
+ &out_cpu);
235
235
out_ptr = out_cpu.data <int >();
236
236
} else {
237
237
out_ptr = out.data <int >();
@@ -261,9 +261,9 @@ void testConcat() {
261
261
* output:
262
262
* out.shape: [2, 6, 4]
263
263
*/
264
- dim_a = make_ddim ({2 , 3 , 4 });
265
- dim_b = make_ddim ({2 , 3 , 4 });
266
- dim_out = make_ddim ({2 , 6 , 4 });
264
+ dim_a = paddle::framework:: make_ddim ({2 , 3 , 4 });
265
+ dim_b = paddle::framework:: make_ddim ({2 , 3 , 4 });
266
+ dim_out = paddle::framework:: make_ddim ({2 , 6 , 4 });
267
267
268
268
input_a.Resize (dim_a);
269
269
input_b.Resize (dim_b);
@@ -290,8 +290,8 @@ void testConcat() {
290
290
}
291
291
292
292
if (paddle::platform::is_gpu_place (Place ())) {
293
- TensorCopySync (input_a_cpu, Place (), &input_a);
294
- TensorCopySync (input_b_cpu, Place (), &input_b);
293
+ paddle::framework::TensorCopy (input_a_cpu, Place (), *context , &input_a);
294
+ paddle::framework::TensorCopy (input_b_cpu, Place (), *context , &input_b);
295
295
}
296
296
297
297
input.clear ();
@@ -305,7 +305,8 @@ void testConcat() {
305
305
PADDLE_ENFORCE_EQ (input_b.dims (), dim_b);
306
306
307
307
if (paddle::platform::is_gpu_place (Place ())) {
308
- TensorCopySync (out, CPUPlace (), &out_cpu);
308
+ paddle::framework::TensorCopy (out, paddle::platform::CPUPlace (), *context,
309
+ &out_cpu);
309
310
out_ptr = out_cpu.data <int >();
310
311
} else {
311
312
out_ptr = out.data <int >();
0 commit comments