Skip to content

Commit f0cf999

Browse files
committed
Reduce Operator Initial Code pushed
1 parent 2c97479 commit f0cf999

File tree

8 files changed

+380
-1
lines changed

8 files changed

+380
-1
lines changed

tmva/sofie/CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ ROOT_STANDARD_LIBRARY_PACKAGE(ROOTTMVASofie
3333
TMVA/ROperator_Concat.hxx
3434
TMVA/ROperator_Identity.hxx
3535
TMVA/ROperator_Softmax.hxx
36+
TMVA/ROperator_Reduce.hxx
3637
TMVA/ROperator_Cast.hxx
3738
TMVA/SOFIE_common.hxx
3839
TMVA/SOFIEHelpers.hxx
@@ -51,4 +52,4 @@ set_target_properties(ROOTTMVASofie PROPERTIES
5152
# tests requires protobuf
5253
if (tmva-sofie)
5354
ROOT_ADD_TEST_SUBDIRECTORY(test)
54-
endif()
55+
endif()

tmva/sofie/inc/TMVA/OperatorList.hxx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,5 @@
1818
#include "TMVA/ROperator_Identity.hxx"
1919
#include "TMVA/ROperator_Softmax.hxx"
2020
#include "TMVA/ROperator_Concat.hxx"
21+
#include "TMVA/ROperator_Reduce.hxx"
2122
#include "TMVA/ROperator_Cast.hxx"
Lines changed: 301 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,301 @@
1+
2+
// #ifndef TMVA_SOFIE_ROPERATOR_Reduce
3+
// #define TMVA_SOFIE_ROPERATOR_Reduce
4+
5+
// #include "TMVA/SOFIE_common.hxx"
6+
// #include "TMVA/ROperator.hxx"
7+
// #include "TMVA/RModel.hxx"
8+
9+
// #include <memory>
10+
// #include <sstream>
11+
// #include <algorithm>
12+
// #include <stdexcept>
13+
// #include <vector>
14+
// #include <cassert>
15+
16+
// namespace TMVA{
17+
// namespace Experimental{
18+
// namespace SOFIE{
19+
20+
// enum ReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd };
21+
22+
// template <typename T, ReduceOpMode Op1>
23+
// struct ReduceOperatorTrait {
24+
// const char *Name() { return ""; }
25+
// };
26+
// template <typename T>
27+
// struct ReduceOperatorTrait <T, ReduceMean> {
28+
// static const char *Name() { return "ReduceMean"; }
29+
// };
30+
31+
// template <typename T>
32+
// struct ReduceOperatorTrait <T, ReduceProd> {
33+
// static const char *Name() { return "ReduceProd"; }
34+
// };
35+
36+
// template <typename T>
37+
// struct ReduceOperatorTrait <T, ReduceSumsquare> {
38+
// static const char *Name() { return "ReduceSumsquare"; }
39+
// };
40+
41+
// template <typename T, ReduceOpMode Op>
42+
// class ROperator_Reduce final : public ROperator
43+
// {
44+
// private:
45+
// /* Attributes*/
46+
// int fAxis = 1;
47+
// ReduceOpMode fReduceMode;
48+
// int fkeepdims = 1; //default value
49+
// std::string fNX;
50+
// std::string fNY;
51+
// std::vector<size_t> fShapeX;
52+
// std::vector<size_t> fShapeY;
53+
54+
// public:
55+
56+
// ROperator_Reduce(){}
57+
// ROperator_Reduce(int keepdims,int axis,std::string nameX, std::string nameY):
58+
// fkeepdims(keepdims), fAxis(axis), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
59+
60+
// // type of output given input
61+
// std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
62+
// return input;
63+
// }
64+
65+
// // shape of output tensors given input tensors
66+
// std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
67+
// // assume now inputs have same shape (no broadcasting)
68+
// auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
69+
// return ret;
70+
// }
71+
// void Initialize(RModel& model){
72+
73+
// fUseSession = model.UseSession();
74+
75+
// if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor
76+
// throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
77+
// }
78+
// fShapeX = model.GetTensorShape(fNX);
79+
// // find shape of Y and add it in the list of intermediate tensors
80+
// fShapeY = ShapeInference({fShapeX})[0];
81+
// model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
82+
// }
83+
84+
// std::string Generate(std::string OpName){
85+
// OpName = "op_" + OpName;
86+
// if (fShapeX.empty() || fShapeY.empty()) {
87+
// throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
88+
// }
89+
90+
// size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);
91+
92+
// auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
93+
// auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
94+
95+
// size_t dim = fShapeY.size();
96+
// std::vector<size_t> idx(dim);
97+
98+
// std::stringstream out;
99+
// for (size_t i = 0; i < outputLength; i++) {
100+
101+
// if (dim == 2) {
102+
// idx[0] = i / outputStrides[0];
103+
// idx[1] = i % outputStrides[0];
104+
// }
105+
// if (dim == 3) {
106+
// idx[0] = i / outputStrides[0];
107+
// idx[1] = (i % outputStrides[0]) / outputStrides[1];
108+
// idx[2] = (i % outputStrides[0]) % outputStrides[1];
109+
// }
110+
// if (dim == 4) {
111+
// idx[0] = i / outputStrides[0];
112+
// idx[1] = (i % outputStrides[0]) / outputStrides[1];
113+
// idx[2] = ((i % outputStrides[0]) % outputStrides[1]) / outputStrides[2];
114+
// idx[3] = ((i % outputStrides[0]) % outputStrides[1]) % outputStrides[2];
115+
// }
116+
117+
// assert(idx[fAxis] == 0); // we can avoid computing this for the reduction axis which by definition is always zero
118+
119+
// out << SP << "float sum = 0;\n";
120+
// // float sum = 0;
121+
// for (size_t k = 0; k < fShapeX[fAxis]; k++) {
122+
// idx[fAxis] = k;
123+
// // compute input index j
124+
// size_t j = 0;
125+
// if (dim == 2) j = idx[0]*inputStrides[0] + idx[1];
126+
// if (dim == 3) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2];
127+
// if (dim == 4) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]*inputStrides[2] + idx[3];
128+
129+
// out << SP << SP << "sum += tensor_" << fNX[j] << ";\n";
130+
// }
131+
// out << SP << "float average = sum/float(" << fShapeX[fAxis] << ")\n;";
132+
// out << SP << "tensor_" << fNY[i] << " = average;\n";
133+
// }
134+
// return out.str();
135+
// }
136+
137+
// };
138+
139+
// }//SOFIE
140+
// }//Experimental
141+
// }//TMVA
142+
143+
144+
// #endif //TMVA_SOFIE_ROPERATOR_Reduce
145+
146+
#ifndef TMVA_SOFIE_ROPERATOR_Reduce
147+
#define TMVA_SOFIE_ROPERATOR_Reduce
148+
149+
#include "TMVA/SOFIE_common.hxx"
150+
#include "TMVA/ROperator.hxx"
151+
#include "TMVA/RModel.hxx"
152+
153+
#include <memory>
154+
#include <sstream>
155+
#include <algorithm>
156+
#include <stdexcept>
157+
#include <vector>
158+
#include <cassert>
159+
160+
namespace TMVA{
161+
namespace Experimental{
162+
namespace SOFIE{
163+
164+
template <typename T, ReduceOpMode Op1>
165+
struct ReduceOperatorTrait {
166+
const char *Name() { return ""; }
167+
};
168+
template <typename T>
169+
struct ReduceOperatorTrait <T, ReduceMean> {
170+
static const char *Name() { return "ReduceMean"; }
171+
};
172+
173+
template <typename T>
174+
struct ReduceOperatorTrait <T, ReduceProd> {
175+
static const char *Name() { return "ReduceProd"; }
176+
};
177+
178+
template <typename T>
179+
struct ReduceOperatorTrait <T, ReduceSumsquare> {
180+
static const char *Name() { return "ReduceSumsquare"; }
181+
};
182+
183+
template <typename T, ReduceOpMode Op>
184+
class ROperator_Reduce final : public ROperator
185+
{
186+
private:
187+
/* Attributes*/
188+
int fkeepdims = 1; //default value
189+
std::string fNX;
190+
std::string fNY;
191+
std::vector<size_t> fShapeX;
192+
std::vector<size_t> fShapeY;
193+
int fAttrAxes;
194+
195+
public:
196+
ROperator_Reduce(){}
197+
ROperator_Reduce(int keepdims,int attrAxes,std::string nameX, std::string nameY):
198+
fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
199+
200+
// type of output given input
201+
std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
202+
return input;
203+
}
204+
205+
// shape of output tensors given input tensors
206+
std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
207+
208+
// std::vector<std::vector<size_t>> ret;
209+
// auto & input_shape = input[0];
210+
// auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
211+
// return ret;
212+
auto ret = input; //suggest copy to compiler
213+
ret[fAttrAxes] = 1;
214+
return ret;
215+
}
216+
void Initialize(RModel& model){
217+
218+
fUseSession = model.UseSession();
219+
220+
if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor
221+
throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
222+
}
223+
fShapeX = model.GetTensorShape(fNX);
224+
// find shape of Y and add it in the list of intermediate tensors
225+
fShapeY = ShapeInference(fShapeX);
226+
model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
227+
}
228+
229+
std::string Generate(std::string OpName){
230+
OpName = "op_" + OpName;
231+
if (fShapeX.empty() || fShapeY.empty()) {
232+
throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
233+
}
234+
235+
size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);
236+
237+
auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
238+
auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
239+
240+
// write here according to size of shape
241+
// in generation code can be done automatically
242+
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
243+
// and we have for the inverse
244+
// i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
245+
246+
// don't need to divide by last stride s[n-1] since it is 1 by definition
247+
248+
std::stringstream out;
249+
out << "\n//---- operator " << std::string(ReduceOperatorTrait<T,Op>::Name()) << " " << OpName << "\n";
250+
out << SP << "size_t dim = " << fShapeY.size() << ";\n";
251+
252+
out << SP << "std::vector<size_t> idx(dim);";
253+
254+
255+
out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
256+
257+
// write here according to size of shape
258+
// in generation code can be done automatically
259+
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
260+
// and we have for the inverse
261+
// i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
262+
263+
// don't need to divide by last stride s[n-1] since it is 1 by definition
264+
265+
out << SP << SP << "idx[j] = i;\n";
266+
out << SP << SP << "size_t k = 0;\n";
267+
out << SP << SP << SP << "for(k=0; k < dim-1; k++){\n";
268+
out << SP << SP << SP << "idx[k] = idx[k] %" << outputStrides << "[k];\n";
269+
out << SP << SP << SP << "};\n";
270+
out << SP << SP << "idx[j] = idx[j] /" << outputStrides << "[k];\n";
271+
272+
273+
out << SP << "assert(idx[" << fAttrAxes << "] == 0);\n"; // we can avoid computing this for the reduction axis which by definition is always zero
274+
275+
out << SP << "float sum = 0;\n";
276+
out << SP << SP << " for (size_t k = 0; k < inputShape[" << fAttrAxes << "]; k++) { \n";
277+
out << SP << SP << " idx[" << fAttrAxes << "] = k;\n";
278+
// compute input index j
279+
out << SP << SP << "size_t l = 0;\n";
280+
out << SP << SP << "size_t m = 0;\n";
281+
out << SP << SP << SP << "for(m=0; m < dim-1; m++){\n";
282+
out << SP << SP << SP << "l += idx[m] *" << inputStrides << "[m];\n";
283+
out << SP << SP << SP << "};\n";
284+
out << SP << SP << "l += idx[m];\n";
285+
out << SP << SP << "sum += tensor_" << fNX << "[l];\n";
286+
out << SP << SP << "};\n";
287+
out << SP << SP << "float average = sum/float(inputShape[" << fAttrAxes << "]);\n";
288+
out << SP << SP << "tensor_" << fNY << "[i] = average;\n";
289+
out << SP << "};\n";
290+
out << SP << "}\n";
291+
return out.str();
292+
}
293+
294+
};
295+
296+
}//SOFIE
297+
}//Experimental
298+
}//TMVA
299+
300+
301+
#endif //TMVA_SOFIE_ROPERATOR_Reduce

tmva/sofie/test/TestCustomModelsFromONNX.cxx

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@
3333
#include "Cast_FromONNX.hxx"
3434
#include "input_models/references/Cast.ref.hxx"
3535

36+
#include "ReduceMean_FromONNX.hxx"
37+
#include "input_models/references/ReduceMean.ref.hxx"
38+
3639
#include "LinearWithLeakyRelu_FromONNX.hxx"
3740
#include "input_models/references/LinearWithLeakyRelu.ref.hxx"
3841

@@ -725,6 +728,30 @@ TEST(ONNX, AvgPool){
725728

726729
}
727730

731+
TEST(ONNX, ReduceMean){
732+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
733+
734+
// Preparing the standard input
735+
std::vector<float> input({
736+
5, 2, 3,
737+
5, 5, 4
738+
});
739+
740+
TMVA_SOFIE_ReduceMean::Session s("ReduceMean_FromONNX.dat");
741+
std::vector<float> output = s.infer(input.data());
742+
// Checking output size
743+
EXPECT_EQ(output.size(), sizeof(ReduceMean_ExpectedOutput::output) / sizeof(float));
744+
745+
float *correct = ReduceMean_ExpectedOutput::output;
746+
747+
// Checking every output value, one by one
748+
for (size_t i = 0; i < output.size(); ++i) {
749+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
750+
}
751+
752+
}
753+
754+
728755
TEST(ONNX, RNNBatchwise)
729756
{
730757
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
167 Bytes
Binary file not shown.
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
namespace Reduce_mean_ExpectedOutput{
2+
float output[] = {
3+
4
4+
};
5+
} // namespace Reduce_mean_ExpectedOutput

tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ std::unique_ptr<ROperator> make_ROperator_Identity(const onnx::NodeProto &nodepr
4747
std::unique_ptr<ROperator> make_ROperator_Softmax(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
4848
std::unique_ptr<ROperator> make_ROperator_Concat(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
4949
std::unique_ptr<ROperator> make_ROperator_Cast(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
50+
std::unique_ptr<ROperator> make_ROperator_Reduce(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
5051

5152
using factoryMethodMap = std::unordered_map<std::string, std::unique_ptr<ROperator> (*)(const onnx::NodeProto&, const onnx::GraphProto&, std::unordered_map<std::string, ETensorType>&)>;
5253
const factoryMethodMap mapOptypeOperator = {
@@ -70,6 +71,9 @@ const factoryMethodMap mapOptypeOperator = {
7071
{"Mul", &make_ROperator_BasicBinary<Mul>},
7172
{"Div", &make_ROperator_BasicBinary<Div>},
7273
{"Neg", &make_ROperator_Neg},
74+
{"ReduceMean", &make_ROperator_Reduce<ReduceMean>},
75+
{"ReduceSumsquare", &make_ROperator_Reduce<ReduceSumsquare>},
76+
{"ReduceProd", &make_ROperator_Reduce<ReduceProd>},
7377
{"Reshape", &make_ROperator_Reshape},
7478
{"Flatten", &make_ROperator_Reshape},
7579
{"Slice", &make_ROperator_Slice},

0 commit comments

Comments
 (0)