Skip to content

Commit 4ffb73f

Browse files
author
sweetsky0901
committed
format ..
1 parent 5b449b6 commit 4ffb73f

File tree

2 files changed

+15
-18
lines changed

2 files changed

+15
-18
lines changed

paddle/operators/math/unpooling.cu

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ namespace math {
2121
template <typename T>
2222
__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
2323
const int* indices_data,
24-
const int input_height,
25-
const int input_width,
24+
const int input_height, const int input_width,
2625
const int channels, T* output_data,
2726
const int output_height,
2827
const int output_width) {
@@ -71,8 +70,8 @@ template <typename T>
7170
class Unpool2dMaxFunctor<platform::GPUPlace, T> {
7271
public:
7372
void operator()(const platform::DeviceContext& context,
74-
const framework::Tensor& input,
75-
const framework::Tensor& indices, framework::Tensor* output) {
73+
const framework::Tensor& input,
74+
const framework::Tensor& indices, framework::Tensor* output) {
7675
const int batch_size = input.dims()[0];
7776
const int input_height = input.dims()[2];
7877
const int input_width = input.dims()[3];
@@ -86,10 +85,10 @@ class Unpool2dMaxFunctor<platform::GPUPlace, T> {
8685
int grid = (input.numel() + threads - 1) / threads;
8786
KernelUnpool2dMax<
8887
T><<<grid, threads, 0,
89-
reinterpret_cast<const platform::CUDADeviceContext&>(context)
90-
.stream()>>>(input.numel(), input_data, indices_data,
91-
input_height, input_width, output_channels,
92-
output_data, output_height, output_width);
88+
reinterpret_cast<const platform::CUDADeviceContext&>(context)
89+
.stream()>>>(input.numel(), input_data, indices_data,
90+
input_height, input_width, output_channels,
91+
output_data, output_height, output_width);
9392
}
9493
};
9594
/*
@@ -119,11 +118,11 @@ class Unpool2dMaxGradFunctor<platform::GPUPlace, T> {
119118
int grid = (input.numel() + threads - 1) / threads;
120119
KernelUnpool2dMaxGrad<
121120
T><<<grid, threads, 0,
122-
reinterpret_cast<const platform::CUDADeviceContext&>(context)
123-
.stream()>>>(input.numel(), input_data, indices_data,
124-
input_height, input_width, output_channels,
125-
output_data, output_grad_data, output_height,
126-
output_width, input_grad_data);
121+
reinterpret_cast<const platform::CUDADeviceContext&>(context)
122+
.stream()>>>(input.numel(), input_data, indices_data,
123+
input_height, input_width, output_channels,
124+
output_data, output_grad_data, output_height,
125+
output_width, input_grad_data);
127126
}
128127
};
129128
template class Unpool2dMaxGradFunctor<platform::GPUPlace, float>;

paddle/operators/unpool_op.cc

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -94,12 +94,11 @@ class UnpoolOp : public framework::OperatorWithKernel {
9494
"Output(Out) of UnpoolOp should not be null.");
9595
auto in_x_dims = ctx->GetInputDim("X");
9696
auto in_y_dims = ctx->GetInputDim("Indices");
97-
std::string unpooling_type = ctx->Attrs()
98-
.Get<std::string>("unpooling_type");
97+
std::string unpooling_type =
98+
ctx->Attrs().Get<std::string>("unpooling_type");
9999
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
100100
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
101-
std::vector<int> paddings =
102-
ctx->Attrs().Get<std::vector<int>>("paddings");
101+
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
103102
PADDLE_ENFORCE(in_x_dims.size() == 4,
104103
"Unpooling intput must be of 4-dimensional.");
105104
PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims);
@@ -142,4 +141,3 @@ REGISTER_OP_CPU_KERNEL(unpool,
142141
REGISTER_OP_CPU_KERNEL(
143142
unpool_grad, ops::UnpoolGradKernel<paddle::platform::CPUPlace, float>,
144143
ops::UnpoolGradKernel<paddle::platform::CPUPlace, double>);
145-

0 commit comments

Comments
 (0)