Skip to content

Commit 60d6348

Browse files
committed
Revert develop
1 parent 86af6bd commit 60d6348

File tree

1 file changed

+30
-52
lines changed

1 file changed

+30
-52
lines changed

paddle/fluid/operators/math/pooling.cu

Lines changed: 30 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

15-
#include <algorithm>
16-
#include <vector>
1715
#include "paddle/fluid/operators/math/pooling.h"
1816
#include "paddle/fluid/platform/cuda_primitives.h"
1917

@@ -22,7 +20,7 @@ namespace operators {
2220
namespace math {
2321

2422
template <typename PoolProcess, typename T>
25-
__global__ void KernelPool2D(const int nthreads, const T* input_data, // NOLINT
23+
__global__ void KernelPool2D(const int nthreads, const T* input_data,
2624
const int channels, const int input_height,
2725
const int input_width, const int output_height,
2826
const int output_width, const int ksize_height,
@@ -60,8 +58,8 @@ __global__ void KernelPool2D(const int nthreads, const T* input_data, // NOLINT
6058

6159
template <typename PoolProcess, typename T>
6260
__global__ void KernelPool2DGrad(
63-
const int nthreads, const T* input_data, const T* output_data, // NOLINT
64-
const T* output_grad, const int channels, const int input_height, // NOLINT
61+
const int nthreads, const T* input_data, const T* output_data,
62+
const T* output_grad, const int channels, const int input_height,
6563
const int input_width, const int output_height, const int output_width,
6664
const int ksize_height, const int ksize_width, const int stride_height,
6765
const int stride_width, const int padding_height, const int padding_width,
@@ -108,8 +106,8 @@ __global__ void KernelPool2DGrad(
108106

109107
template <typename T>
110108
__global__ void KernelMaxPool2DGrad(
111-
const int nthreads, const T* input_data, const T* output_data, // NOLINT
112-
const T* output_grad, const int channels, const int input_height, // NOLINT
109+
const int nthreads, const T* input_data, const T* output_data,
110+
const T* output_grad, const int channels, const int input_height,
113111
const int input_width, const int output_height, const int output_width,
114112
const int ksize_height, const int ksize_width, const int stride_height,
115113
const int stride_width, const int padding_height, const int padding_width,
@@ -160,10 +158,8 @@ template <typename PoolProcess, typename T>
160158
class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
161159
public:
162160
void operator()(const platform::CUDADeviceContext& context,
163-
const framework::Tensor& input,
164-
std::vector<int>& ksize, // NOLINT
165-
std::vector<int>& strides, // NOLINT
166-
std::vector<int>& paddings, // NOLINT
161+
const framework::Tensor& input, std::vector<int>& ksize,
162+
std::vector<int>& strides, std::vector<int>& paddings,
167163
PoolProcess pool_process, framework::Tensor* output) {
168164
const int batch_size = input.dims()[0];
169165
const int input_channels = input.dims()[1];
@@ -205,10 +201,8 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
205201
void operator()(const platform::CUDADeviceContext& context,
206202
const framework::Tensor& input,
207203
const framework::Tensor& output,
208-
const framework::Tensor& output_grad,
209-
std::vector<int>& ksize, // NOLINT
210-
std::vector<int>& strides, // NOLINT
211-
std::vector<int>& paddings, // NOLINT
204+
const framework::Tensor& output_grad, std::vector<int>& ksize,
205+
std::vector<int>& strides, std::vector<int>& paddings,
212206
PoolProcess pool_process, framework::Tensor* input_grad) {
213207
const int batch_size = input.dims()[0];
214208
const int input_channels = input.dims()[1];
@@ -252,10 +246,8 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
252246
void operator()(const platform::CUDADeviceContext& context,
253247
const framework::Tensor& input,
254248
const framework::Tensor& output,
255-
const framework::Tensor& output_grad,
256-
std::vector<int>& ksize, // NOLINT
257-
std::vector<int>& strides, // NOLINT
258-
std::vector<int>& paddings, // NOLINT
249+
const framework::Tensor& output_grad, std::vector<int>& ksize,
250+
std::vector<int>& strides, std::vector<int>& paddings,
259251
framework::Tensor* input_grad) {
260252
const int batch_size = input.dims()[0];
261253
const int input_channels = input.dims()[1];
@@ -314,7 +306,7 @@ template class Pool2dGradFunctor<platform::CUDADeviceContext,
314306
double>;
315307

316308
template <typename PoolProcess, typename T>
317-
__global__ void KernelPool3D(const int nthreads, const T* input_data, // NOLINT
309+
__global__ void KernelPool3D(const int nthreads, const T* input_data,
318310
const int channels, const int input_depth,
319311
const int input_height, const int input_width,
320312
const int output_depth, const int output_height,
@@ -360,8 +352,8 @@ __global__ void KernelPool3D(const int nthreads, const T* input_data, // NOLINT
360352

361353
template <typename PoolProcess, typename T>
362354
__global__ void KernelPool3DGrad(
363-
const int nthreads, const T* input_data, const T* output_data, // NOLINT
364-
const T* output_grad, const int channels, const int input_depth, // NOLINT
355+
const int nthreads, const T* input_data, const T* output_data,
356+
const T* output_grad, const int channels, const int input_depth,
365357
const int input_height, const int input_width, const int output_depth,
366358
const int output_height, const int output_width, const int ksize_depth,
367359
const int ksize_height, const int ksize_width, const int stride_depth,
@@ -424,8 +416,8 @@ __global__ void KernelPool3DGrad(
424416

425417
template <typename T>
426418
__global__ void KernelMaxPool3DGrad(
427-
const int nthreads, const T* input_data, const T* output_data, // NOLINT
428-
const T* output_grad, const int channels, const int input_depth, // NOLINT
419+
const int nthreads, const T* input_data, const T* output_data,
420+
const T* output_grad, const int channels, const int input_depth,
429421
const int input_height, const int input_width, const int output_depth,
430422
const int output_height, const int output_width, const int ksize_depth,
431423
const int ksize_height, const int ksize_width, const int stride_depth,
@@ -482,10 +474,8 @@ template <typename PoolProcess, class T>
482474
class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
483475
public:
484476
void operator()(const platform::CUDADeviceContext& context,
485-
const framework::Tensor& input,
486-
std::vector<int>& ksize, // NOLINT
487-
std::vector<int>& strides, // NOLINT
488-
std::vector<int>& paddings, // NOLINT
477+
const framework::Tensor& input, std::vector<int>& ksize,
478+
std::vector<int>& strides, std::vector<int>& paddings,
489479
PoolProcess pool_process, framework::Tensor* output) {
490480
const int batch_size = input.dims()[0];
491481
const int input_channels = input.dims()[1];
@@ -535,10 +525,8 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
535525
void operator()(const platform::CUDADeviceContext& context,
536526
const framework::Tensor& input,
537527
const framework::Tensor& output,
538-
const framework::Tensor& output_grad,
539-
std::vector<int>& ksize, // NOLINT
540-
std::vector<int>& strides, // NOLINT
541-
std::vector<int>& paddings, // NOLINT
528+
const framework::Tensor& output_grad, std::vector<int>& ksize,
529+
std::vector<int>& strides, std::vector<int>& paddings,
542530
PoolProcess pool_process, framework::Tensor* input_grad) {
543531
const int batch_size = input.dims()[0];
544532
const int input_channels = input.dims()[1];
@@ -590,10 +578,8 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
590578
void operator()(const platform::CUDADeviceContext& context,
591579
const framework::Tensor& input,
592580
const framework::Tensor& output,
593-
const framework::Tensor& output_grad,
594-
std::vector<int>& ksize, // NOLINT
595-
std::vector<int>& strides, // NOLINT
596-
std::vector<int>& paddings, // NOLINT
581+
const framework::Tensor& output_grad, std::vector<int>& ksize,
582+
std::vector<int>& strides, std::vector<int>& paddings,
597583
framework::Tensor* input_grad) {
598584
const int batch_size = input.dims()[0];
599585
const int input_channels = input.dims()[1];
@@ -750,10 +736,8 @@ template <typename T1, typename T2>
750736
class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
751737
public:
752738
void operator()(const platform::CUDADeviceContext& context,
753-
const framework::Tensor& input,
754-
std::vector<int>& ksize, // NOLINT
755-
std::vector<int>& strides, // NOLINT
756-
std::vector<int>& paddings, // NOLINT
739+
const framework::Tensor& input, std::vector<int>& ksize,
740+
std::vector<int>& strides, std::vector<int>& paddings,
757741
framework::Tensor* output, framework::Tensor* mask) {
758742
const int batch_size = input.dims()[0];
759743
const int input_channels = input.dims()[1];
@@ -795,10 +779,8 @@ class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
795779
public:
796780
void operator()(const platform::CUDADeviceContext& context,
797781
const framework::Tensor& output_grad,
798-
const framework::Tensor& mask,
799-
std::vector<int>& ksize, // NOLINT
800-
std::vector<int>& strides, // NOLINT
801-
std::vector<int>& paddings, // NOLINT
782+
const framework::Tensor& mask, std::vector<int>& ksize,
783+
std::vector<int>& strides, std::vector<int>& paddings,
802784
framework::Tensor* input_grad) {
803785
const int batch_size = input_grad->dims()[0];
804786
const int input_channels = input_grad->dims()[1];
@@ -955,10 +937,8 @@ template <typename T1, typename T2>
955937
class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
956938
public:
957939
void operator()(const platform::CUDADeviceContext& context,
958-
const framework::Tensor& input,
959-
std::vector<int>& ksize, // NOLINT
960-
std::vector<int>& strides, // NOLINT
961-
std::vector<int>& paddings, // NOLINT
940+
const framework::Tensor& input, std::vector<int>& ksize,
941+
std::vector<int>& strides, std::vector<int>& paddings,
962942
framework::Tensor* output, framework::Tensor* mask) {
963943
const int batch_size = input.dims()[0];
964944
const int input_channels = input.dims()[1];
@@ -1007,10 +987,8 @@ class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
1007987
public:
1008988
void operator()(const platform::CUDADeviceContext& context,
1009989
const framework::Tensor& output_grad,
1010-
const framework::Tensor& mask,
1011-
std::vector<int>& ksize, // NOLINT
1012-
std::vector<int>& strides, // NOLINT
1013-
std::vector<int>& paddings, // NOLINT
990+
const framework::Tensor& mask, std::vector<int>& ksize,
991+
std::vector<int>& strides, std::vector<int>& paddings,
1014992
framework::Tensor* input_grad) {
1015993
const int batch_size = input_grad->dims()[0];
1016994
const int input_channels = input_grad->dims()[1];

0 commit comments

Comments
 (0)