Skip to content

Commit 43a67a2

Browse files
mozga-inteltensor-tang
authored andcommitted
Enable conv2d operator for a ngraph engine (#15269)
test=develop
1 parent a6a1a92 commit 43a67a2

File tree

4 files changed

+290
-0
lines changed

4 files changed

+290
-0
lines changed

paddle/fluid/operators/ngraph/ngraph_bridge.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ std::map<std::string,
3131
std::shared_ptr<std::unordered_map<
3232
std::string, std::shared_ptr<ngraph::Node>>>)>>
3333
NgraphBridge::NG_NODE_MAP = {
34+
{"conv2d", NG_OPS::BuildConv2dNode},
35+
{"conv2d_grad", NG_OPS::BuildConv2dGradNode},
3436
{"elementwise_add", NG_OPS::BuildElementwiseAddNode},
3537
{"elementwise_add_grad", NG_OPS::BuildElementwiseAddGradNode},
3638
{"fill_constant", NG_OPS::BuildFillConstantNode},

paddle/fluid/operators/ngraph/ngraph_ops.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ limitations under the License. */
2222
#pragma once
2323

2424
#include "ops/binary_unnary_op.h"
25+
#include "ops/conv2d_op.h"
2526
#include "ops/elementwise_add_op.h"
2627
#include "ops/fill_constant_op.h"
2728
#include "ops/mean_op.h"
Lines changed: 235 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,235 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#pragma once
16+
17+
#include <string>
18+
#include <vector>
19+
#include "ngraph/ngraph.hpp"
20+
#include "paddle/fluid/platform/ngraph_helper.h"
21+
22+
namespace paddle {
23+
namespace operators {
24+
namespace ngraphs {
25+
26+
std::shared_ptr<ngraph::Node> GroupedConvolution(
27+
const std::shared_ptr<ngraph::Node>& data_batch,
28+
const std::shared_ptr<ngraph::Node>& filters, const ngraph::Strides strides,
29+
const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings,
30+
size_t groups) {
31+
auto& data_shape = data_batch->get_shape();
32+
auto& filter_shape = filters->get_shape();
33+
ngraph::NodeVector ng_slices;
34+
35+
for (size_t i = 0; i < groups; ++i) {
36+
size_t channel_step = filter_shape.at(1);
37+
const std::vector<size_t> lower_bound{0, i * channel_step, 0, 0};
38+
const std::vector<size_t> upper_bound{data_shape.at(0),
39+
(i + 1) * channel_step,
40+
data_shape.at(2), data_shape.at(3)};
41+
auto data_slice = std::make_shared<ngraph::op::Slice>(
42+
data_batch, lower_bound, upper_bound);
43+
44+
size_t filter_step = filter_shape.at(0) / groups;
45+
const std::vector<size_t> filter_lower_bound{i * filter_step, 0, 0, 0};
46+
const std::vector<size_t> filter_upper_bound{
47+
(i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2),
48+
filter_shape.at(3)};
49+
auto filter_slice = std::make_shared<ngraph::op::Slice>(
50+
filters, filter_lower_bound, filter_upper_bound);
51+
auto ng_conv = std::make_shared<ngraph::op::Convolution>(
52+
data_slice, filter_slice, strides, dilations, paddings, paddings);
53+
ng_slices.push_back(ng_conv);
54+
}
55+
56+
size_t concat_axis = 1;
57+
return std::make_shared<ngraph::op::Concat>(ng_slices, concat_axis);
58+
}
59+
60+
std::shared_ptr<ngraph::Node> GroupedGradConvolutionFilter(
61+
const std::shared_ptr<ngraph::Node>& data_batch,
62+
const std::shared_ptr<ngraph::Node>& filters,
63+
const std::shared_ptr<ngraph::Node>& doutput, const ngraph::Strides strides,
64+
const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings,
65+
size_t groups) {
66+
auto& data_shape = data_batch->get_shape();
67+
auto& filter_shape = filters->get_shape();
68+
auto& out_shape = doutput->get_shape();
69+
ngraph::NodeVector ng_slices;
70+
71+
for (size_t i = 0; i < groups; ++i) {
72+
size_t channel_step = filter_shape.at(1);
73+
const std::vector<size_t> lower_bound{0, i * channel_step, 0, 0};
74+
const std::vector<size_t> upper_bound{data_shape.at(0),
75+
(i + 1) * channel_step,
76+
data_shape.at(2), data_shape.at(3)};
77+
auto data_slice = std::make_shared<ngraph::op::Slice>(
78+
data_batch, lower_bound, upper_bound);
79+
80+
size_t filter_step = data_shape.at(0);
81+
82+
const std::vector<size_t> filter_lower_bound{i * filter_step, 0, 0, 0};
83+
const std::vector<size_t> filter_upper_bound{
84+
(i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2),
85+
filter_shape.at(3)};
86+
auto filter_slice = std::make_shared<ngraph::op::Slice>(
87+
filters, filter_lower_bound, filter_upper_bound);
88+
89+
const std::vector<size_t> olower_bound{0, i * filter_step, 0, 0};
90+
const std::vector<size_t> oupper_bound{out_shape.at(0),
91+
(i + 1) * filter_step,
92+
out_shape.at(2), out_shape.at(3)};
93+
auto out_slice = std::make_shared<ngraph::op::Slice>(doutput, olower_bound,
94+
oupper_bound);
95+
96+
auto ng_conv = std::make_shared<ngraph::op::ConvolutionBackpropFilters>(
97+
data_slice, filter_slice->get_shape(), out_slice, strides, dilations,
98+
paddings, paddings, ngraph::Strides{1, 1});
99+
100+
ng_slices.push_back(ng_conv);
101+
}
102+
103+
size_t concat_axis = 0;
104+
return std::make_shared<ngraph::op::Concat>(ng_slices, concat_axis);
105+
}
106+
107+
std::shared_ptr<ngraph::Node> GroupedGradConvolutionData(
108+
const std::shared_ptr<ngraph::Node>& data_batch,
109+
const std::shared_ptr<ngraph::Node>& filters,
110+
const std::shared_ptr<ngraph::Node>& doutput, const ngraph::Strides strides,
111+
const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings,
112+
size_t groups) {
113+
auto& data_shape = data_batch->get_shape();
114+
auto& filter_shape = filters->get_shape();
115+
auto& out_shape = doutput->get_shape();
116+
ngraph::NodeVector ng_slices;
117+
118+
for (size_t i = 0; i < groups; ++i) {
119+
size_t channel_step = filter_shape.at(1);
120+
const std::vector<size_t> lower_bound{0, i * channel_step, 0, 0};
121+
const std::vector<size_t> upper_bound{data_shape.at(0),
122+
(i + 1) * channel_step,
123+
data_shape.at(2), data_shape.at(3)};
124+
auto data_slice = std::make_shared<ngraph::op::Slice>(
125+
data_batch, lower_bound, upper_bound);
126+
127+
size_t filter_step = data_shape.at(0);
128+
129+
const std::vector<size_t> filter_lower_bound{i * filter_step, 0, 0, 0};
130+
const std::vector<size_t> filter_upper_bound{
131+
(i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2),
132+
filter_shape.at(3)};
133+
auto filter_slice = std::make_shared<ngraph::op::Slice>(
134+
filters, filter_lower_bound, filter_upper_bound);
135+
136+
const std::vector<size_t> olower_bound{0, i * filter_step, 0, 0};
137+
const std::vector<size_t> oupper_bound{out_shape.at(0),
138+
(i + 1) * filter_step,
139+
out_shape.at(2), out_shape.at(3)};
140+
auto out_slice = std::make_shared<ngraph::op::Slice>(doutput, olower_bound,
141+
oupper_bound);
142+
143+
auto ng_conv = std::make_shared<ngraph::op::ConvolutionBackpropData>(
144+
data_slice->get_shape(), filter_slice, out_slice, strides, dilations,
145+
paddings, paddings, ngraph::Strides{1, 1});
146+
ng_slices.push_back(ng_conv);
147+
}
148+
149+
size_t concat_axis = 1;
150+
return std::make_shared<ngraph::op::Concat>(ng_slices, concat_axis);
151+
}
152+
153+
void BuildConv2dNode(
154+
const std::shared_ptr<paddle::framework::OperatorBase>& op,
155+
std::shared_ptr<
156+
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
157+
ngb_node_map) {
158+
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
159+
auto filters = paddle::platform::GetInputNode(op, "Filter", ngb_node_map);
160+
auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map);
161+
162+
std::vector<int> strides = op_attrs.Get<std::vector<int>>("strides");
163+
std::vector<int> paddings = op_attrs.Get<std::vector<int>>("paddings");
164+
std::vector<int> dilations = op_attrs.Get<std::vector<int>>("dilations");
165+
166+
const ngraph::Strides ng_strides{static_cast<size_t>(strides.at(0)),
167+
static_cast<size_t>(strides.at(1))};
168+
const ngraph::Strides ng_dilations{static_cast<size_t>(dilations.at(0)),
169+
static_cast<size_t>(dilations.at(1))};
170+
const ngraph::CoordinateDiff ng_paddings{
171+
static_cast<std::ptrdiff_t>(paddings.at(0)),
172+
static_cast<std::ptrdiff_t>(paddings.at(1))};
173+
174+
int groups = static_cast<size_t>(op_attrs.Get<int>("groups"));
175+
PADDLE_ENFORCE_GE(groups, 1, "conv groups needs be no less than 1");
176+
177+
std::shared_ptr<ngraph::Node> result;
178+
if (groups == 1) {
179+
result = std::make_shared<ngraph::op::Convolution>(
180+
input, filters, ng_strides, ng_dilations, ng_paddings, ng_paddings);
181+
} else {
182+
result = GroupedConvolution(input, filters, ng_strides, ng_dilations,
183+
ng_paddings, groups);
184+
}
185+
paddle::platform::SetOutputNode(op, "Output", result, ngb_node_map);
186+
}
187+
188+
void BuildConv2dGradNode(
189+
const std::shared_ptr<paddle::framework::OperatorBase>& op,
190+
std::shared_ptr<
191+
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
192+
ngb_node_map) {
193+
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
194+
auto filter = paddle::platform::GetInputNode(op, "Filter", ngb_node_map);
195+
auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map);
196+
auto doutput =
197+
paddle::platform::GetInputNode(op, "Output@GRAD", ngb_node_map);
198+
199+
int groups = op_attrs.Get<int>("groups");
200+
std::vector<int> strides = op_attrs.Get<std::vector<int>>("strides");
201+
std::vector<int> paddings = op_attrs.Get<std::vector<int>>("paddings");
202+
std::vector<int> dilations = op_attrs.Get<std::vector<int>>("dilations");
203+
204+
const ngraph::Strides ng_strides{static_cast<size_t>(strides.at(0)),
205+
static_cast<size_t>(strides.at(1))};
206+
const ngraph::Strides ng_dilations{static_cast<size_t>(dilations.at(0)),
207+
static_cast<size_t>(dilations.at(1))};
208+
const ngraph::CoordinateDiff ng_paddings{
209+
static_cast<std::ptrdiff_t>(paddings.at(0)),
210+
static_cast<std::ptrdiff_t>(paddings.at(1))};
211+
212+
std::shared_ptr<ngraph::Node> dfilter;
213+
std::shared_ptr<ngraph::Node> dinput;
214+
if (groups == 1) {
215+
dfilter = std::make_shared<ngraph::op::ConvolutionBackpropFilters>(
216+
input, filter->get_shape(), doutput, ng_strides, ng_dilations,
217+
ng_paddings, ng_paddings, ngraph::Strides{1, 1});
218+
219+
dinput = std::make_shared<ngraph::op::ConvolutionBackpropData>(
220+
input->get_shape(), filter, doutput, ng_strides, ng_dilations,
221+
ng_paddings, ng_paddings, ngraph::Strides{1, 1});
222+
223+
} else {
224+
dfilter = GroupedGradConvolutionFilter(input, filter, doutput, ng_strides,
225+
ng_dilations, ng_paddings, groups);
226+
dinput = GroupedGradConvolutionData(input, filter, doutput, ng_strides,
227+
ng_dilations, ng_paddings, groups);
228+
}
229+
230+
paddle::platform::SetOutputNode(op, "Filter@GRAD", dfilter, ngb_node_map);
231+
paddle::platform::SetOutputNode(op, "Input@GRAD", dinput, ngb_node_map);
232+
}
233+
} // namespace ngraphs
234+
} // namespace operators
235+
} // namespace paddle
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import unittest
18+
from paddle.fluid.tests.unittests.test_conv2d_op import *
19+
20+
21+
class TestNGRAPH(TestConv2dOp):
22+
def init_kernel_type(self):
23+
super(TestNGRAPH, self).init_kernel_type()
24+
25+
26+
class TestNGRAPHWithPad(TestWithPad):
27+
def init_kernel_type(self):
28+
super(TestNGRAPHWithPad, self).init_kernel_type()
29+
30+
31+
class TestNGRAPHWithStride(TestWithStride):
32+
def init_kernel_type(self):
33+
super(TestNGRAPHWithStride, self).init_kernel_type()
34+
35+
36+
class TestNGRAPHWithGroup(TestWithGroup):
37+
def init_kernel_type(self):
38+
super(TestNGRAPHWithGroup, self).init_kernel_type()
39+
40+
41+
class TestNGRAPHWith1x1(TestWith1x1):
42+
def init_kernel_type(self):
43+
super(TestNGRAPHWith1x1, self).init_kernel_type()
44+
45+
46+
class TestNGRAPHWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
47+
def init_kernel_type(self):
48+
super(TestNGRAPHWithInput1x1Filter1x1, self).init_kernel_type()
49+
50+
51+
if __name__ == '__main__':
52+
unittest.main()

0 commit comments

Comments
 (0)