Skip to content

Commit 3206094

Browse files
author
sweetsky0901
committed
format code
1 parent d2ee3c9 commit 3206094

File tree

5 files changed

+46
-45
lines changed

5 files changed

+46
-45
lines changed

paddle/operators/math/unpooling.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ template <typename T>
2020
class Unpool2dMaxFunctor<platform::CPUPlace, T> {
2121
public:
2222
void operator()(const platform::DeviceContext& context,
23-
const framework::Tensor& input,
24-
const framework::Tensor& indices, framework::Tensor* output) {
23+
const framework::Tensor& input,
24+
const framework::Tensor& indices, framework::Tensor* output) {
2525
const int batch_size = input.dims()[0];
2626
const int input_height = input.dims()[2];
2727
const int input_width = input.dims()[3];

paddle/operators/math/unpooling.cu

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,12 @@ namespace operators {
2020
namespace math {
2121
template <typename T>
2222
__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
23-
const int* indices_data,
24-
const int input_height, const int input_width,
25-
const int channels, T* output_data,
26-
const int output_height,
27-
const int output_width) {
23+
const int* indices_data,
24+
const int input_height,
25+
const int input_width,
26+
const int channels, T* output_data,
27+
const int output_height,
28+
const int output_width) {
2829
int in_n_stride = input_height * input_width * channels;
2930
int in_c_stride = input_height * input_width;
3031
int out_n_stride = output_height * output_width * channels;
@@ -42,12 +43,11 @@ __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
4243
}
4344
}
4445
template <typename T>
45-
__global__ void KernelUnpool2dMaxGrad(const int nthreads, const T* input_data,
46-
const int* indices_data,
47-
const int input_height, const int input_width,
48-
const int channels, const T* output_data,
49-
const T* output_grad, const int output_height,
50-
const int output_width, T* input_grad) {
46+
__global__ void KernelUnpool2dMaxGrad(
47+
const int nthreads, const T* input_data, const int* indices_data,
48+
const int input_height, const int input_width, const int channels,
49+
const T* output_data, const T* output_grad, const int output_height,
50+
const int output_width, T* input_grad) {
5151
int in_n_stride = input_height * input_width * channels;
5252
int in_c_stride = input_height * input_width;
5353
int out_n_stride = output_height * output_width * channels;
@@ -71,8 +71,8 @@ template <typename T>
7171
class Unpool2dMaxFunctor<platform::GPUPlace, T> {
7272
public:
7373
void operator()(const platform::DeviceContext& context,
74-
const framework::Tensor& input, const framework::Tensor& indices,
75-
framework::Tensor* output) {
74+
const framework::Tensor& input,
75+
const framework::Tensor& indices, framework::Tensor* output) {
7676
const int batch_size = input.dims()[0];
7777
const int input_height = input.dims()[2];
7878
const int input_width = input.dims()[3];
@@ -88,8 +88,8 @@ class Unpool2dMaxFunctor<platform::GPUPlace, T> {
8888
T><<<grid, threads, 0,
8989
reinterpret_cast<const platform::CUDADeviceContext&>(context)
9090
.stream()>>>(input.numel(), input_data, indices_data,
91-
input_height, input_width, output_channels,
92-
output_data, output_height, output_width);
91+
input_height, input_width, output_channels,
92+
output_data, output_height, output_width);
9393
}
9494
};
9595
/*
@@ -121,9 +121,9 @@ class Unpool2dMaxGradFunctor<platform::GPUPlace, T> {
121121
T><<<grid, threads, 0,
122122
reinterpret_cast<const platform::CUDADeviceContext&>(context)
123123
.stream()>>>(input.numel(), input_data, indices_data,
124-
input_height, input_width, output_channels, output_data,
125-
output_grad_data, output_height, output_width,
126-
input_grad_data);
124+
input_height, input_width, output_channels,
125+
output_data, output_grad_data, output_height,
126+
output_width, input_grad_data);
127127
}
128128
};
129129
template class Unpool2dMaxGradFunctor<platform::GPUPlace, float>;

paddle/operators/math/unpooling.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,7 @@ class Unpool2dMaxFunctor {
2323
public:
2424
void operator()(const platform::DeviceContext& context,
2525
const framework::Tensor& input,
26-
const framework::Tensor& indices,
27-
framework::Tensor* output);
26+
const framework::Tensor& indices, framework::Tensor* output);
2827
};
2928
template <typename Place, class T>
3029
class Unpool2dMaxGradFunctor {

paddle/operators/unpool_op.cc

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -75,36 +75,38 @@ int OutputSize(int input_size, int ksize, int padding, int stride) {
7575
class UnpoolOp : public framework::OperatorWithKernel {
7676
protected:
7777
framework::OpKernelType GetKernelType(
78-
const framework::ExecutionContext& ctx) const override {
79-
return framework::OpKernelType(
80-
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
78+
const framework::ExecutionContext& ctx) const override {
79+
return framework::OpKernelType(
80+
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
8181
ctx.device_context());
82-
}
82+
}
8383

8484
public:
8585
using framework::OperatorWithKernel::OperatorWithKernel;
8686
void InferShape(framework::InferShapeContext* ctx) const override {
87-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of UnpoolOp"
88-
"should not be null.");
89-
PADDLE_ENFORCE(ctx->HasInput("Indices"), "Input(Indices) of UnpoolOp"
87+
PADDLE_ENFORCE(ctx->HasInput("X"),
88+
"Input(X) of UnpoolOp"
89+
"should not be null.");
90+
PADDLE_ENFORCE(ctx->HasInput("Indices"),
91+
"Input(Indices) of UnpoolOp"
9092
"should not be null.");
9193
PADDLE_ENFORCE(ctx->HasOutput("Out"),
9294
"Output(Out) of UnpoolOp should not be null.");
9395
auto in_x_dims = ctx->GetInputDim("X");
9496
auto in_y_dims = ctx->GetInputDim("Indices");
95-
std::string unpooling_type =
96-
ctx->Attrs().Get<std::string>("unpooling_type");
97+
std::string unpooling_type = ctx->Attrs()
98+
.Get<std::string>("unpooling_type");
9799
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
98100
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
99101
std::vector<int> paddings =
100102
ctx->Attrs().Get<std::vector<int>>("paddings");
101103
PADDLE_ENFORCE(in_x_dims.size() == 4,
102-
"Unpooling intput must be of 4-dimensional.");
104+
"Unpooling intput must be of 4-dimensional.");
103105
PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims);
104106
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
105107
for (size_t i = 0; i < ksize.size(); ++i) {
106108
output_shape.push_back(
107-
OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i]));
109+
OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i]));
108110
}
109111
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
110112
}
@@ -113,30 +115,30 @@ class UnpoolOp : public framework::OperatorWithKernel {
113115
class UnpoolOpGrad : public framework::OperatorWithKernel {
114116
protected:
115117
framework::OpKernelType GetKernelType(
116-
const framework::ExecutionContext& ctx) const override {
117-
return framework::OpKernelType(
118+
const framework::ExecutionContext& ctx) const override {
119+
return framework::OpKernelType(
118120
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
119121
ctx.device_context());
120-
}
122+
}
121123

122124
public:
123125
using framework::OperatorWithKernel::OperatorWithKernel;
124126
void InferShape(framework::InferShapeContext* ctx) const override {
125127
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
126128
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
127-
"Input(X@GRAD) should not be null.");
129+
"Input(X@GRAD) should not be null.");
128130
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
129131
}
130132
};
131-
} // namespace operators
132-
} // namespace paddle
133+
} // namespace operators
134+
} // namespace paddle
133135

134136
namespace ops = paddle::operators;
135137
REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad,
136138
ops::UnpoolOpGrad);
137-
REGISTER_OP_CPU_KERNEL(
138-
unpool, ops::UnpoolKernel<paddle::platform::CPUPlace, float>,
139-
ops::UnpoolKernel<paddle::platform::CPUPlace, double>);
139+
REGISTER_OP_CPU_KERNEL(unpool,
140+
ops::UnpoolKernel<paddle::platform::CPUPlace, float>,
141+
ops::UnpoolKernel<paddle::platform::CPUPlace, double>);
140142
REGISTER_OP_CPU_KERNEL(
141143
unpool_grad, ops::UnpoolGradKernel<paddle::platform::CPUPlace, float>,
142144
ops::UnpoolGradKernel<paddle::platform::CPUPlace, double>);

paddle/operators/unpool_op.cu.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@ limitations under the License. */
1515
#include "paddle/operators/unpool_op.h"
1616

1717
namespace ops = paddle::operators;
18-
REGISTER_OP_GPU_KERNEL(
19-
unpool, ops::UnpoolKernel<paddle::platform::GPUPlace, float>,
20-
ops::UnpoolKernel<paddle::platform::GPUPlace, double>);
18+
REGISTER_OP_GPU_KERNEL(unpool,
19+
ops::UnpoolKernel<paddle::platform::GPUPlace, float>,
20+
ops::UnpoolKernel<paddle::platform::GPUPlace, double>);
2121
REGISTER_OP_GPU_KERNEL(
2222
unpool_grad, ops::UnpoolGradKernel<paddle::platform::GPUPlace, float>,
2323
ops::UnpoolGradKernel<paddle::platform::GPUPlace, double>);

0 commit comments

Comments
 (0)