Skip to content

Commit f67f0ca

Browse files
author
wangyang59
committed
finished testing cpu bilinear_interp_op
1 parent c7cd6d1 commit f67f0ca

File tree

3 files changed

+97
-8
lines changed

3 files changed

+97
-8
lines changed

paddle/fluid/operators/bilinear_interp_op.cc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@ class BilinearInterpOp : public framework::OperatorWithKernel {
2727
PADDLE_ENFORCE(ctx->HasOutput("Out"),
2828
"Output(Out) of BilinearInterOp should not be null.");
2929

30-
auto dim_x = ctx->GetInputDim("Input"); // NCHW format
30+
auto dim_x = ctx->GetInputDim("X"); // NCHW format
3131
int out_h = ctx->Attrs().Get<int>("out_h");
3232
int out_w = ctx->Attrs().Get<int>("out_w");
3333
PADDLE_ENFORCE_EQ(dim_x.size(), 4, "X's dimension must be 4");
3434

3535
std::vector<int64_t> dim_out({dim_x[0], dim_x[1], out_h, out_w});
36-
ctx->SetOutputDim("Output", framework::make_ddim(dim_out));
36+
ctx->SetOutputDim("Out", framework::make_ddim(dim_out));
3737
}
3838
};
3939

@@ -83,4 +83,5 @@ namespace ops = paddle::operators;
8383
REGISTER_OP(bilinear_interp, ops::BilinearInterpOp, ops::BilinearInterpOpMaker,
8484
bilinear_interp_grad, ops::BilinearInterpOpGrad);
8585
REGISTER_OP_CPU_KERNEL(bilinear_interp, ops::BilinearInterpKernel<float>);
86-
REGISTER_OP_CPU_KERNEL(bilinear_interp_grad, ops::BilinearInterpKernel<float>);
86+
REGISTER_OP_CPU_KERNEL(bilinear_interp_grad,
87+
ops::BilinearInterpGradKernel<float>);

paddle/fluid/operators/bilinear_interp_op.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ class BilinearInterpKernel : public framework::OpKernel<T> {
4646
T ratio_w = (out_w > 1) ? static_cast<T>(in_w - 1) / (out_w - 1) : 0.f;
4747

4848
if (in_h == out_h && in_w == out_w) {
49-
memcpy(output, input, product(input_t->dims()) * sizeof(T));
49+
memcpy(output, input, input_t->numel() * sizeof(T));
5050
} else {
5151
for (int k = 0; k < batch_size; ++k) { // loop for batches
5252
for (int i = 0; i < out_h; ++i) { // loop for images
@@ -123,10 +123,10 @@ class BilinearInterpGradKernel : public framework::OpKernel<T> {
123123
const T* out_pos = &d_output[k * out_chw + i * out_w + j];
124124

125125
for (int c = 0; c < channels; ++c) { // loop for channels
126-
in_pos[0] = h2lambda * w2lambda * out_pos[0];
127-
in_pos[wid] = h2lambda * w1lambda * out_pos[0];
128-
in_pos[hid * in_w] = h1lambda * w2lambda * out_pos[0];
129-
in_pos[hid * in_w + wid] = h1lambda * w1lambda * out_pos[0];
126+
in_pos[0] += h2lambda * w2lambda * out_pos[0];
127+
in_pos[wid] += h2lambda * w1lambda * out_pos[0];
128+
in_pos[hid * in_w] += h1lambda * w2lambda * out_pos[0];
129+
in_pos[hid * in_w + wid] += h1lambda * w1lambda * out_pos[0];
130130
in_pos += in_hw;
131131
out_pos += out_hw;
132132
}
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
import numpy as np
17+
from op_test import OpTest
18+
19+
20+
def bilinear_interp_np(input, out_h, out_w):
21+
batch_size, channel, in_h, in_w = input.shape
22+
if out_h > 1:
23+
ratio_h = (in_h - 1.0) / (out_h - 1.0)
24+
else:
25+
ratio_h = 0.0
26+
if out_w > 1:
27+
ratio_w = (in_w - 1.0) / (out_w - 1.0)
28+
else:
29+
ratio_w = 0.0
30+
31+
out = np.zeros((batch_size, channel, out_h, out_w))
32+
for i in range(out_h):
33+
h = int(ratio_h * i)
34+
hid = 1 if h < in_h - 1 else 0
35+
h1lambda = ratio_h * i - h
36+
h2lambda = 1.0 - h1lambda
37+
for j in range(out_w):
38+
w = int(ratio_w * j)
39+
wid = 1 if w < in_w - 1 else 0
40+
w1lambda = ratio_w * j - w
41+
w2lambda = 1.0 - w1lambda
42+
43+
out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] +
44+
w1lambda*input[:, :, h, w+wid]) + \
45+
h1lambda*(w2lambda*input[:, :, h+hid, w] +
46+
w1lambda*input[:, :, h+hid, w+wid])
47+
return out.astype("float32")
48+
49+
50+
class TestBilinearInterpOp(OpTest):
51+
def setUp(self):
52+
self.init_test_case()
53+
self.op_type = "bilinear_interp"
54+
input_np = np.random.random(self.input_shape).astype("float32")
55+
output_np = bilinear_interp_np(input_np, self.out_h, self.out_w)
56+
57+
self.inputs = {'X': input_np}
58+
self.attrs = {'out_h': self.out_h, 'out_w': self.out_w}
59+
self.outputs = {'Out': output_np}
60+
61+
def test_check_output(self):
62+
self.check_output()
63+
64+
def test_check_grad(self):
65+
self.check_grad(['X'], 'Out', in_place=True)
66+
67+
def init_test_case(self):
68+
self.input_shape = [2, 3, 4, 4]
69+
self.out_h = 2
70+
self.out_w = 2
71+
72+
73+
class TestCase1(TestBilinearInterpOp):
74+
def init_test_case(self):
75+
self.input_shape = [4, 1, 7, 8]
76+
self.out_h = 1
77+
self.out_w = 1
78+
79+
80+
class TestCase2(TestBilinearInterpOp):
81+
def init_test_case(self):
82+
self.input_shape = [3, 3, 9, 6]
83+
self.out_h = 12
84+
self.out_w = 12
85+
86+
87+
if __name__ == "__main__":
88+
unittest.main()

0 commit comments

Comments
 (0)