Skip to content

Commit b1224da

Browse files
chengduowangkuiyi
authored andcommitted
Move reduceSum to elementwise_op_function.h (#9773)
* add cuda_device_functions.h * move reduceSum to elementwise_op_function.h
1 parent b44b6a4 commit b1224da

File tree

2 files changed

+75
-73
lines changed

2 files changed

+75
-73
lines changed

paddle/fluid/operators/elementwise_op_function.h

Lines changed: 75 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,15 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#pragma once
16+
#include <algorithm>
1617
#include "paddle/fluid/framework/eigen.h"
1718
#include "paddle/fluid/framework/op_registry.h"
1819
#include "paddle/fluid/framework/operator.h"
1920
#include "paddle/fluid/platform/transform.h"
2021

2122
#ifdef __NVCC__
23+
#include <cuda.h>
2224
#include <thrust/iterator/iterator_adaptor.h>
23-
#include "paddle/fluid/platform/cuda_helper.h"
2425
constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024;
2526
#endif
2627

@@ -43,35 +44,35 @@ namespace operators {
4344
*/
4445
inline void get_mid_dims(const framework::DDim& x_dims,
4546
const framework::DDim& y_dims, const int axis,
46-
int& pre, int& n, int& post) {
47-
pre = 1;
48-
n = 1;
49-
post = 1;
47+
int* pre, int* n, int* post) {
48+
*pre = 1;
49+
*n = 1;
50+
*post = 1;
5051
for (int i = 0; i < axis; ++i) {
51-
pre *= x_dims[i];
52+
(*pre) *= x_dims[i];
5253
}
5354

5455
for (int i = 0; i < y_dims.size(); ++i) {
5556
PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i],
5657
"Broadcast dimension mismatch.");
57-
n *= y_dims[i];
58+
(*n) *= y_dims[i];
5859
}
5960

6061
for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) {
61-
post *= x_dims[i];
62+
(*post) *= x_dims[i];
6263
}
6364
}
6465

65-
inline void trim_trailing_singular_dims(framework::DDim& dims) {
66+
inline void trim_trailing_singular_dims(framework::DDim* dims) {
6667
// Remove trailing dimensions of size 1 for y
67-
auto actual_dims_size = dims.size();
68+
auto actual_dims_size = dims->size();
6869
for (; actual_dims_size != 0; --actual_dims_size) {
69-
if (dims[actual_dims_size - 1] != 1) break;
70+
if ((*dims)[actual_dims_size - 1] != 1) break;
7071
}
71-
if (actual_dims_size != dims.size()) {
72-
auto actual_dims = framework::vectorize(dims);
72+
if (actual_dims_size != dims->size()) {
73+
auto actual_dims = framework::vectorize(*dims);
7374
actual_dims.resize(actual_dims_size);
74-
dims = framework::make_ddim(actual_dims);
75+
*dims = framework::make_ddim(actual_dims);
7576
}
7677
}
7778

@@ -159,7 +160,7 @@ class RowwiseTransformIterator<T, platform::CUDADeviceContext>
159160
RowwiseTransformIterator<T, platform::CUDADeviceContext>, const T*>
160161
super_t;
161162
HOSTDEVICE RowwiseTransformIterator(const T* x, int n)
162-
: super_t(x), begin_(x), n_(n){};
163+
: super_t(x), begin_(x), n_(n) {}
163164
friend class thrust::iterator_core_access;
164165

165166
private:
@@ -179,7 +180,7 @@ class MidWiseTransformIterator<T, platform::CUDADeviceContext>
179180
MidWiseTransformIterator<T, platform::CUDADeviceContext>, const T*>
180181
super_t;
181182
HOSTDEVICE MidWiseTransformIterator(const T* x, int n, int post)
182-
: super_t(x), begin_(x), n_(n), post_(post){};
183+
: super_t(x), begin_(x), n_(n), post_(post) {}
183184
friend class thrust::iterator_core_access;
184185

185186
private:
@@ -333,6 +334,55 @@ static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out,
333334
}
334335
}
335336
#ifdef __NVCC__
337+
338+
// __shfl_down has been deprecated as of CUDA 9.0.
339+
#if CUDA_VERSION < 9000
340+
template <typename T>
341+
__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) {
342+
return __shfl_down(val, delta);
343+
}
344+
#define CREATE_SHFL_MASK(mask, predicate) mask = 0u;
345+
#else
346+
#define FULL_WARP_MASK 0xFFFFFFFF
347+
#define CREATE_SHFL_MASK(mask, predicate) \
348+
mask = __ballot_sync(FULL_WARP_MASK, (predicate))
349+
#endif
350+
351+
template <typename T>
352+
__device__ T reduceSum(T val, int tid, int len) {
353+
// TODO(zcd): The warp size should be taken from the
354+
// parameters of the GPU but not specified as 32 simply.
355+
// To make the reduceSum more efficiently,
356+
// I use Warp-Level Parallelism and assume the Warp size
357+
// is 32 which may be different for different GPU,
358+
// but most card's warp size is 32.
359+
__shared__ T shm[32];
360+
const int warpSize = 32;
361+
unsigned mask = 0u;
362+
CREATE_SHFL_MASK(mask, tid < len);
363+
364+
for (int offset = warpSize / 2; offset > 0; offset /= 2)
365+
val += __shfl_down_sync(mask, val, offset);
366+
367+
if (tid < warpSize) shm[tid] = 0;
368+
369+
__syncthreads();
370+
371+
if (tid % warpSize == 0) {
372+
shm[tid / warpSize] = val;
373+
}
374+
375+
CREATE_SHFL_MASK(mask, tid < warpSize);
376+
377+
if (tid < warpSize) {
378+
val = shm[tid];
379+
for (int offset = warpSize / 2; offset > 0; offset /= 2)
380+
val += __shfl_down_sync(mask, val, offset);
381+
}
382+
383+
return val;
384+
}
385+
336386
template <typename T, typename DX_OP, typename DY_OP>
337387
static __global__ void ElemwiseGradBroadcast1CUDAKernel(
338388
const T* x, const T* y, const T* out, const T* dout, int h, int w,
@@ -355,7 +405,7 @@ static __global__ void ElemwiseGradBroadcast1CUDAKernel(
355405

356406
if (dy) {
357407
h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h;
358-
val = platform::reduceSum(val, tid, h);
408+
val = reduceSum(val, tid, h);
359409
if (threadIdx.x == 0) {
360410
dy[j] = val;
361411
}
@@ -432,7 +482,7 @@ static __global__ void ElemwiseGradBroadcast2CUDAKernel(
432482
if (dy) {
433483
int h = pre * post;
434484
h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h;
435-
val = platform::reduceSum(val, tid, h);
485+
val = reduceSum(val, tid, h);
436486
if (threadIdx.x == 0) {
437487
dy[j] = val;
438488
}
@@ -472,11 +522,11 @@ void ElemwiseGradCompute(const framework::ExecutionContext& ctx,
472522
auto y_dim = y.dims();
473523

474524
axis = (axis == -1 ? x_dim.size() - y_dim.size() : axis);
475-
trim_trailing_singular_dims(y_dim);
525+
trim_trailing_singular_dims(&y_dim);
476526
axis = (y_dim.size() == 0) ? x_dim.size() : axis;
477527

478528
int pre, n, post;
479-
get_mid_dims(x_dim, y_dim, axis, pre, n, post);
529+
get_mid_dims(x_dim, y_dim, axis, &pre, &n, &post);
480530
if (post == 1) {
481531
int h = pre;
482532
int w = n;
@@ -514,7 +564,7 @@ void ElemwiseGradCompute(const framework::ExecutionContext& ctx,
514564
}
515565
}
516566
}
517-
};
567+
}
518568

519569
template <typename DeviceContext, typename T, typename functor,
520570
typename broadcastfunctor, typename broadcast2functor>
@@ -543,11 +593,11 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx,
543593
}
544594

545595
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
546-
trim_trailing_singular_dims(y_dims);
596+
trim_trailing_singular_dims(&y_dims);
547597
axis = (y_dims.size() == 0) ? x_dims.size() : axis;
548598

549599
int pre, n, post;
550-
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
600+
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
551601

552602
if (post == 1) {
553603
broadcastfunctor f;
@@ -582,11 +632,11 @@ void ElementwiseComputeEx(const framework::ExecutionContext& ctx,
582632
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
583633
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
584634
"Axis should be in range [0, x_dims)");
585-
trim_trailing_singular_dims(y_dims);
635+
trim_trailing_singular_dims(&y_dims);
586636
axis = (y_dims.size() == 0) ? x_dims.size() : axis;
587637

588638
int pre, n, post;
589-
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
639+
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
590640
if (post == 1) {
591641
functor.RunRowWise(n, pre);
592642
return;

paddle/fluid/platform/cuda_helper.h

Lines changed: 0 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -62,53 +62,5 @@ CUDA_ATOMIC_WRAPPER(Add, double) {
6262
}
6363
#endif
6464

65-
// __shfl_down has been deprecated as of CUDA 9.0.
66-
#if CUDA_VERSION < 9000
67-
template <typename T>
68-
__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) {
69-
return __shfl_down(val, delta);
70-
}
71-
#define CREATE_SHFL_MASK(mask, predicate) mask = 0u;
72-
#else
73-
#define FULL_WARP_MASK 0xFFFFFFFF
74-
#define CREATE_SHFL_MASK(mask, predicate) \
75-
mask = __ballot_sync(FULL_WARP_MASK, (predicate))
76-
#endif
77-
78-
template <typename T>
79-
__device__ T reduceSum(T val, int tid, int len) {
80-
// TODO(zcd): The warp size should be taken from the
81-
// parameters of the GPU but not specified as 32 simply.
82-
// To make the reduceSum more efficiently,
83-
// I use Warp-Level Parallelism and assume the Warp size
84-
// is 32 which may be different for different GPU,
85-
// but most card's warp size is 32.
86-
__shared__ T shm[32];
87-
const int warpSize = 32;
88-
unsigned mask = 0u;
89-
CREATE_SHFL_MASK(mask, tid < len);
90-
91-
for (int offset = warpSize / 2; offset > 0; offset /= 2)
92-
val += __shfl_down_sync(mask, val, offset);
93-
94-
if (tid < warpSize) shm[tid] = 0;
95-
96-
__syncthreads();
97-
98-
if (tid % warpSize == 0) {
99-
shm[tid / warpSize] = val;
100-
}
101-
102-
CREATE_SHFL_MASK(mask, tid < warpSize);
103-
104-
if (tid < warpSize) {
105-
val = shm[tid];
106-
for (int offset = warpSize / 2; offset > 0; offset /= 2)
107-
val += __shfl_down_sync(mask, val, offset);
108-
}
109-
110-
return val;
111-
}
112-
11365
} // namespace platform
11466
} // namespace paddle

0 commit comments

Comments
 (0)