Skip to content

Commit 7da2f65

Browse files
authored
[onert/ruy] Move operation visitors to separate layer files (#16269)
This commit moves Conv2D and FullyConnected visitor implementations from KernelGenerator.cc to their respective operation layer files. It also updates Validator.h to use visitor declarations instead of inline implementations. ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
1 parent 6701cbc commit 7da2f65

File tree

4 files changed

+97
-79
lines changed

4 files changed

+97
-79
lines changed

runtime/onert/backend/ruy/KernelGenerator.cc

Lines changed: 1 addition & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@
1616

1717
#include "KernelGenerator.h"
1818

19-
#include "ops/Conv2DLayer.h"
20-
#include "ops/FullyConnectedLayer.h"
21-
2219
#include <backend/Backend.h>
2320
#include <backend/IConfig.h>
2421
#include <memory>
@@ -72,79 +69,6 @@ KernelGenerator::KernelGenerator(const ir::Graph &graph,
7269
// DO NOTHING
7370
}
7471

75-
void KernelGenerator::visit(const ir::operation::Conv2D &node)
76-
{
77-
using ir::operation::Conv2D;
78-
79-
const auto ofm_index{node.getOutputs().at(0)};
80-
const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
81-
const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
82-
const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
83-
84-
auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
85-
auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
86-
auto ker_tensor = _tensor_reg->getPortableTensor(ker_index);
87-
auto bias_tensor = _tensor_reg->getPortableTensor(bias_index);
88-
89-
const auto stride = node.param().stride;
90-
const auto activation = node.param().activation;
91-
const auto &param_padding = node.param().padding;
92-
const auto dilation = node.param().dilation;
93-
auto fn = std::make_unique<ops::ConvolutionLayer>();
94-
95-
if (_ctx.at(ifm_index).info().isDynamic() || _ctx.at(ker_index).info().isDynamic())
96-
{
97-
fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, param_padding.param.left,
98-
param_padding.param.right, param_padding.param.top, param_padding.param.bottom,
99-
stride.horizontal, stride.vertical, dilation.width_factor, dilation.height_factor,
100-
activation, ofm_tensor, _external_context);
101-
102-
_return_fn = std::move(fn);
103-
return;
104-
}
105-
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
106-
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
107-
// Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
108-
const auto &ker_shape = _ctx.at(ker_index).shape();
109-
const auto ker_height = ker_shape.dim(1);
110-
const auto ker_width = ker_shape.dim(2);
111-
112-
const auto padding =
113-
ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
114-
dilation.width_factor, dilation.height_factor);
115-
116-
fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, padding.left,
117-
padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical,
118-
dilation.width_factor, dilation.height_factor, activation, ofm_tensor,
119-
_external_context);
120-
121-
_return_fn = std::move(fn);
122-
}
123-
124-
void KernelGenerator::visit(const ir::operation::FullyConnected &node)
125-
{
126-
using ir::operation::FullyConnected;
127-
128-
const auto output_index{node.getOutputs().at(0)};
129-
const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)};
130-
const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
131-
const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
132-
const auto activation = node.param().activation;
133-
const auto weights_format = node.param().weights_format;
134-
if (weights_format != ir::FullyConnectedWeightsFormat::Default)
135-
throw std::runtime_error("Unsupported FullyConnected Weights Format");
136-
137-
auto output_tensor = _tensor_reg->getPortableTensor(output_index);
138-
auto input_tensor = _tensor_reg->getPortableTensor(input_index);
139-
auto weight_tensor = _tensor_reg->getPortableTensor(weight_index);
140-
auto bias_tensor = bias_index.undefined() ? nullptr : _tensor_reg->getPortableTensor(bias_index);
141-
142-
auto fn = std::make_unique<ops::FullyConnectedLayer>();
143-
144-
fn->configure(input_tensor, weight_tensor, bias_tensor, activation, output_tensor,
145-
_external_context);
146-
147-
_return_fn = std::move(fn);
148-
}
72+
// Visitors for each operation is in ops/<InternalName>Layer.cc file
14973

15074
} // namespace onert::backend::ruy

runtime/onert/backend/ruy/Validator.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@ class Validator : public backend::ValidatorBase
3030
Validator(const ir::Graph &graph) : backend::ValidatorBase(graph) {}
3131

3232
private:
33-
#define OP(InternalName) \
34-
void visit(const ir::operation::InternalName &) override { _supported = true; }
33+
#define OP(InternalName) void visit(const ir::operation::InternalName &) override;
3534
#include "Operation.lst"
3635
#undef OP
3736
};

runtime/onert/backend/ruy/ops/Conv2DLayer.cc

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,68 @@
1616

1717
#include "Conv2DLayer.h"
1818

19+
#include "../KernelGenerator.h"
1920
#include "../Tensor.h"
21+
#include "../Validator.h"
22+
2023
#include "ir/Padding.h"
2124

25+
namespace onert::backend::ruy
26+
{
27+
28+
void Validator::visit(const ir::operation::Conv2D &) { _supported = true; }
29+
30+
void KernelGenerator::visit(const ir::operation::Conv2D &node)
31+
{
32+
using ir::operation::Conv2D;
33+
34+
const auto ofm_index{node.getOutputs().at(0)};
35+
const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
36+
const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
37+
const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
38+
39+
auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
40+
auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
41+
auto ker_tensor = _tensor_reg->getPortableTensor(ker_index);
42+
auto bias_tensor = _tensor_reg->getPortableTensor(bias_index);
43+
44+
const auto stride = node.param().stride;
45+
const auto activation = node.param().activation;
46+
const auto &param_padding = node.param().padding;
47+
const auto dilation = node.param().dilation;
48+
auto fn = std::make_unique<ops::ConvolutionLayer>();
49+
50+
if (_ctx.at(ifm_index).info().isDynamic() || _ctx.at(ker_index).info().isDynamic())
51+
{
52+
fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, param_padding.param.left,
53+
param_padding.param.right, param_padding.param.top, param_padding.param.bottom,
54+
stride.horizontal, stride.vertical, dilation.width_factor, dilation.height_factor,
55+
activation, ofm_tensor, _external_context);
56+
57+
_return_fn = std::move(fn);
58+
return;
59+
}
60+
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
61+
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
62+
// Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
63+
const auto &ker_shape = _ctx.at(ker_index).shape();
64+
const auto ker_height = ker_shape.dim(1);
65+
const auto ker_width = ker_shape.dim(2);
66+
67+
const auto padding =
68+
ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
69+
dilation.width_factor, dilation.height_factor);
70+
71+
fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, padding.left,
72+
padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical,
73+
dilation.width_factor, dilation.height_factor, activation, ofm_tensor,
74+
_external_context);
75+
76+
_return_fn = std::move(fn);
77+
}
78+
79+
} // namespace onert::backend::ruy
80+
2281
namespace onert::backend::ruy::ops
2382
{
2483
ConvolutionLayer::ConvolutionLayer()

runtime/onert/backend/ruy/ops/FullyConnectedLayer.cc

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,46 @@
1616

1717
#include "FullyConnectedLayer.h"
1818

19+
#include "../KernelGenerator.h"
1920
#include "../Tensor.h"
21+
#include "../Validator.h"
22+
2023
#include <ruy/operation/FullyConnected.h>
2124
#include <ruy/TensorUtils.h>
2225

26+
namespace onert::backend::ruy
27+
{
28+
29+
void Validator::visit(const ir::operation::FullyConnected &) { _supported = true; }
30+
31+
void KernelGenerator::visit(const ir::operation::FullyConnected &node)
32+
{
33+
using ir::operation::FullyConnected;
34+
35+
const auto output_index{node.getOutputs().at(0)};
36+
const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)};
37+
const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
38+
const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
39+
const auto activation = node.param().activation;
40+
const auto weights_format = node.param().weights_format;
41+
if (weights_format != ir::FullyConnectedWeightsFormat::Default)
42+
throw std::runtime_error("Unsupported FullyConnected Weights Format");
43+
44+
auto output_tensor = _tensor_reg->getPortableTensor(output_index);
45+
auto input_tensor = _tensor_reg->getPortableTensor(input_index);
46+
auto weight_tensor = _tensor_reg->getPortableTensor(weight_index);
47+
auto bias_tensor = bias_index.undefined() ? nullptr : _tensor_reg->getPortableTensor(bias_index);
48+
49+
auto fn = std::make_unique<ops::FullyConnectedLayer>();
50+
51+
fn->configure(input_tensor, weight_tensor, bias_tensor, activation, output_tensor,
52+
_external_context);
53+
54+
_return_fn = std::move(fn);
55+
}
56+
57+
} // namespace onert::backend::ruy
58+
2359
namespace onert::backend::ruy::ops
2460
{
2561

0 commit comments

Comments
 (0)