@@ -18,11 +18,12 @@ limitations under the License. */
18
18
#include " paddle/fluid/platform/hostdevice.h"
19
19
#include " paddle/fluid/platform/transform.h"
20
20
21
+ namespace {
22
+
21
23
template <typename T>
22
24
class Scale {
23
25
public:
24
26
explicit Scale (const T& scale) : scale_(scale) {}
25
-
26
27
HOSTDEVICE T operator ()(const T& a) const { return a * scale_; }
27
28
28
29
private:
@@ -35,26 +36,36 @@ class Multiply {
35
36
HOSTDEVICE T operator ()(const T& a, const T& b) const { return a * b; }
36
37
};
37
38
39
+ } // namespace
40
+
41
+ using paddle::memory::Alloc;
42
+ using paddle::memory::Free;
43
+ using paddle::memory::Copy;
44
+
45
+ using paddle::platform::CPUPlace;
46
+ using paddle::platform::CUDAPlace;
47
+ using paddle::platform::CPUDeviceContext;
48
+ using paddle::platform::CUDADeviceContext;
49
+
50
+ using paddle::platform::Transform;
51
+
38
52
TEST (Transform, CPUUnary) {
39
- using namespace paddle ::platform;
40
53
CPUDeviceContext ctx;
41
54
float buf[4 ] = {0.1 , 0.2 , 0.3 , 0.4 };
42
- Transform<paddle::platform:: CPUDeviceContext> trans;
55
+ Transform<CPUDeviceContext> trans;
43
56
trans (ctx, buf, buf + 4 , buf, Scale<float >(10 ));
44
57
for (int i = 0 ; i < 4 ; ++i) {
45
58
ASSERT_NEAR (buf[i], static_cast <float >(i + 1 ), 1e-5 );
46
59
}
47
60
}
48
61
49
62
TEST (Transform, GPUUnary) {
50
- using namespace paddle ::platform;
51
- using namespace paddle ::memory;
52
63
CUDAPlace gpu0 (0 );
53
64
CUDADeviceContext ctx (gpu0);
54
65
float cpu_buf[4 ] = {0.1 , 0.2 , 0.3 , 0.4 };
55
66
float * gpu_buf = static_cast <float *>(Alloc (gpu0, sizeof (float ) * 4 ));
56
67
Copy (gpu0, gpu_buf, CPUPlace (), cpu_buf, sizeof (cpu_buf), ctx.stream ());
57
- Transform<paddle::platform:: CUDADeviceContext> trans;
68
+ Transform<CUDADeviceContext> trans;
58
69
trans (ctx, gpu_buf, gpu_buf + 4 , gpu_buf, Scale<float >(10 ));
59
70
ctx.Wait ();
60
71
Copy (CPUPlace (), cpu_buf, gpu0, gpu_buf, sizeof (cpu_buf), ctx.stream ());
@@ -65,10 +76,8 @@ TEST(Transform, GPUUnary) {
65
76
}
66
77
67
78
TEST (Transform, CPUBinary) {
68
- using namespace paddle ::platform;
69
- using namespace paddle ::memory;
70
79
int buf[4 ] = {1 , 2 , 3 , 4 };
71
- Transform<paddle::platform:: CPUDeviceContext> trans;
80
+ Transform<CPUDeviceContext> trans;
72
81
CPUDeviceContext ctx;
73
82
trans (ctx, buf, buf + 4 , buf, buf, Multiply<int >());
74
83
for (int i = 0 ; i < 4 ; ++i) {
@@ -77,14 +86,12 @@ TEST(Transform, CPUBinary) {
77
86
}
78
87
79
88
TEST (Transform, GPUBinary) {
80
- using namespace paddle ::platform;
81
- using namespace paddle ::memory;
82
89
int buf[4 ] = {1 , 2 , 3 , 4 };
83
90
CUDAPlace gpu0 (0 );
84
91
CUDADeviceContext ctx (gpu0);
85
92
int * gpu_buf = static_cast <int *>(Alloc (gpu0, sizeof (buf)));
86
93
Copy (gpu0, gpu_buf, CPUPlace (), buf, sizeof (buf), ctx.stream ());
87
- Transform<paddle::platform:: CUDADeviceContext> trans;
94
+ Transform<CUDADeviceContext> trans;
88
95
trans (ctx, gpu_buf, gpu_buf + 4 , gpu_buf, gpu_buf, Multiply<int >());
89
96
ctx.Wait ();
90
97
Copy (CPUPlace (), buf, gpu0, gpu_buf, sizeof (buf), ctx.stream ());
0 commit comments