Skip to content

Commit 312500d

Browse files
mozga-inteltensor-tang
authored andcommitted
Enable pool2d operator for a ngraph engine (#15395)
* Enable pool2d operator for a ngraph engine test=develop * Update test=develop
1 parent b4c24f3 commit 312500d

File tree

4 files changed

+228
-0
lines changed

4 files changed

+228
-0
lines changed

paddle/fluid/operators/ngraph/ngraph_bridge.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ std::map<std::string,
3838
{"mean_grad", NG_OPS::BuildMeanGradNode},
3939
{"mul", NG_OPS::BuildMulNode},
4040
{"mul_grad", NG_OPS::BuildMulGradNode},
41+
{"pool2d", NG_OPS::BuildPool2dNode},
42+
{"pool2d_grad", NG_OPS::BuildPool2dGradNode},
4143
{"softmax", NG_OPS::BuildSoftmaxNode},
4244
{"softmax_grad", NG_OPS::BuildSoftmaxGradNode},
4345
{"scale", NG_OPS::BuildScaleNode},

paddle/fluid/operators/ngraph/ngraph_ops.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ limitations under the License. */
2626
#include "ops/fill_constant_op.h"
2727
#include "ops/mean_op.h"
2828
#include "ops/mul_op.h"
29+
#include "ops/pool2d_op.h"
2930
#include "ops/scale_op.h"
3031
#include "ops/softmax_op.h"
3132
#include "ops/top_k_op.h"
Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#pragma once
16+
17+
#include <string>
18+
#include <vector>
19+
20+
#include "ngraph/ngraph.hpp"
21+
#include "paddle/fluid/platform/ngraph_helper.h"
22+
23+
namespace paddle {
24+
namespace operators {
25+
namespace ngraphs {
26+
27+
void BuildPool2dNode(
28+
const std::shared_ptr<paddle::framework::OperatorBase>& op,
29+
std::shared_ptr<
30+
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
31+
ngb_node_map) {
32+
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
33+
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
34+
auto x_shape = x->get_shape();
35+
36+
std::string pooling_type = op_attrs.Get<std::string>("pooling_type");
37+
std::vector<int> ksize = op_attrs.Get<std::vector<int>>("ksize");
38+
std::vector<int> strides = op_attrs.Get<std::vector<int>>("strides");
39+
std::vector<int> paddings = op_attrs.Get<std::vector<int>>("paddings");
40+
41+
PADDLE_ENFORCE_EQ(x_shape.size() - 2, ksize.size(),
42+
"Handling 2d pooling only");
43+
44+
if (op_attrs.Get<bool>("global_pooling")) {
45+
for (size_t i = 0; i < ksize.size(); ++i) {
46+
paddings[i] = 0;
47+
ksize[i] = static_cast<int>(x_shape.at(i + 2));
48+
}
49+
}
50+
51+
ngraph::Shape ng_padding_below{static_cast<size_t>(paddings.at(0)),
52+
static_cast<size_t>(paddings.at(1))};
53+
ngraph::Shape ng_padding_above{static_cast<size_t>(paddings.at(0)),
54+
static_cast<size_t>(paddings.at(1))};
55+
ngraph::Shape ng_ksize_shape{static_cast<size_t>(ksize.at(0)),
56+
static_cast<size_t>(ksize.at(1))};
57+
ngraph::Strides ng_strides{static_cast<size_t>(strides.at(0)),
58+
static_cast<size_t>(strides.at(1))};
59+
60+
auto ComputeCeiledOutput = [](size_t in, size_t k, size_t p, size_t s) {
61+
return (in - k + 2 * p) / s + 1;
62+
};
63+
64+
if (op_attrs.Get<bool>("ceil_mode")) {
65+
auto dummy_out = paddle::platform::GetOutputNode(op, "Out", ngb_node_map);
66+
auto dummpy_shape = dummy_out->get_shape();
67+
for (size_t i = 0; i < ng_padding_above.size(); ++i) {
68+
auto desired_size = ComputeCeiledOutput(x_shape[i + 2], ksize[i],
69+
paddings[i], strides[i]);
70+
if (desired_size != dummpy_shape[i + 2]) {
71+
ng_padding_above[i] += strides[i];
72+
}
73+
}
74+
}
75+
76+
bool padding_exclusive = op_attrs.Get<bool>("exclusive");
77+
if (pooling_type == "max") {
78+
auto pool2d = std::make_shared<ngraph::op::MaxPool>(
79+
x, ng_ksize_shape, ng_strides, ng_padding_below, ng_padding_above);
80+
paddle::platform::SetOutputNode(op, "Out", pool2d, ngb_node_map);
81+
} else if (pooling_type == "avg") {
82+
std::shared_ptr<ngraph::Node> pool2d;
83+
if (op_attrs.Get<bool>("adaptive")) {
84+
auto ComputeAdaptive = [](size_t in, size_t k) {
85+
return std::floor(in / k);
86+
};
87+
ng_strides[0] = x_shape.size() == 4
88+
? ComputeAdaptive(x_shape[3], ksize[0])
89+
: ng_strides[0];
90+
ng_strides[1] = x_shape.size() == 4
91+
? ComputeAdaptive(x_shape[3], ksize[0])
92+
: ng_strides[1];
93+
pool2d =
94+
std::make_shared<ngraph::op::AvgPool>(x, ng_ksize_shape, ng_strides);
95+
} else {
96+
pool2d = std::make_shared<ngraph::op::AvgPool>(
97+
x, ng_ksize_shape, ng_strides, ng_padding_below, ng_padding_above,
98+
!padding_exclusive);
99+
}
100+
paddle::platform::SetOutputNode(op, "Out", pool2d, ngb_node_map);
101+
} else {
102+
PADDLE_THROW("Support max and avg pooling only");
103+
}
104+
}
105+
106+
void BuildPool2dGradNode(
107+
const std::shared_ptr<paddle::framework::OperatorBase>& op,
108+
std::shared_ptr<
109+
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
110+
ngb_node_map) {
111+
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
112+
auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map);
113+
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
114+
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
115+
auto x_shape = x->get_shape();
116+
117+
std::string pooling_type = op_attrs.Get<std::string>("pooling_type");
118+
std::vector<int> ksize = op_attrs.Get<std::vector<int>>("ksize");
119+
std::vector<int> strides = op_attrs.Get<std::vector<int>>("strides");
120+
std::vector<int> paddings = op_attrs.Get<std::vector<int>>("paddings");
121+
122+
PADDLE_ENFORCE_EQ(x_shape.size() - 2, ksize.size(),
123+
"Handling 2d pooling only");
124+
125+
if (op_attrs.Get<bool>("global_pooling")) {
126+
for (size_t i = 0; i < ksize.size(); ++i) {
127+
paddings[i] = 0;
128+
ksize[i] = static_cast<int>(x_shape.at(i + 2));
129+
}
130+
}
131+
132+
ngraph::Shape ng_padding_below{static_cast<size_t>(paddings.at(0)),
133+
static_cast<size_t>(paddings.at(1))};
134+
ngraph::Shape ng_padding_above{static_cast<size_t>(paddings.at(0)),
135+
static_cast<size_t>(paddings.at(1))};
136+
ngraph::Shape ng_ksize_shape{static_cast<size_t>(ksize.at(0)),
137+
static_cast<size_t>(ksize.at(1))};
138+
ngraph::Strides ng_strides{static_cast<size_t>(strides.at(0)),
139+
static_cast<size_t>(strides.at(1))};
140+
141+
bool padding_exclusive = op_attrs.Get<bool>("exclusive");
142+
if (pooling_type == "max") {
143+
auto pool2d_grad = std::make_shared<ngraph::op::MaxPoolBackprop>(
144+
x, dout, out, ng_ksize_shape, ng_strides, ng_padding_below,
145+
ng_padding_above);
146+
paddle::platform::SetOutputNode(op, "X@GRAD", pool2d_grad, ngb_node_map);
147+
} else if (pooling_type == "avg") {
148+
std::shared_ptr<ngraph::Node> pool2d_grad;
149+
if (op_attrs.Get<bool>("adaptive")) {
150+
auto ComputeAdaptive = [](size_t in, size_t k) {
151+
return std::floor(in / k);
152+
};
153+
ng_strides[0] = x_shape.size() == 4
154+
? ComputeAdaptive(x_shape[3], ksize[0])
155+
: ng_strides[0];
156+
ng_strides[1] = x_shape.size() == 4
157+
? ComputeAdaptive(x_shape[3], ksize[0])
158+
: ng_strides[1];
159+
pool2d_grad = std::make_shared<ngraph::op::AvgPoolBackprop>(
160+
x->get_shape(), dout, ng_ksize_shape, ng_strides, ng_padding_below,
161+
ng_padding_above, !padding_exclusive);
162+
} else {
163+
pool2d_grad = std::make_shared<ngraph::op::AvgPoolBackprop>(
164+
x->get_shape(), dout, ng_ksize_shape, ng_strides, ng_padding_below,
165+
ng_padding_above, !padding_exclusive);
166+
}
167+
paddle::platform::SetOutputNode(op, "X@GRAD", pool2d_grad, ngb_node_map);
168+
} else {
169+
PADDLE_THROW("Support max and avg pooling only");
170+
}
171+
}
172+
} // namespace ngraphs
173+
} // namespace operators
174+
} // namespace paddle
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
from paddle.fluid.tests.unittests.test_pool2d_op import *
18+
19+
20+
class TestNGRAPHPool2D_Op(TestPool2D_Op):
21+
def init_test_case(self):
22+
super(TestNGRAPHPool2D_Op, self).init_test_case()
23+
24+
25+
class TestNGRAPHCase1(TestCase1):
26+
def init_test_case(self):
27+
super(TestNGRAPHCase1, self).init_test_case()
28+
29+
30+
class TestNGRAPHCase2(TestCase2):
31+
def init_test_case(self):
32+
super(TestNGRAPHCase2, self).init_test_case()
33+
34+
35+
class TestNGRAPHCase3(TestCase3):
36+
def init_pool_type(self):
37+
super(TestNGRAPHCase3, self).init_pool_type()
38+
39+
40+
class TestNGRAPHCase4(TestCase4):
41+
def init_pool_type(self):
42+
super(TestNGRAPHCase4, self).init_pool_type()
43+
44+
45+
class TestNGRAPHCase5(TestCase5):
46+
def init_pool_type(self):
47+
super(TestNGRAPHCase5, self).init_pool_type()
48+
49+
50+
if __name__ == '__main__':
51+
unittest.main()

0 commit comments

Comments
 (0)