Skip to content

Commit f675d07

Browse files
committed
Reduce Operator axis attribute added
1 parent 9addffe commit f675d07

File tree

2 files changed

+203
-45
lines changed

2 files changed

+203
-45
lines changed
Lines changed: 202 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,147 @@
1+
// #ifndef TMVA_SOFIE_ROPERATOR_Reduce
2+
// #define TMVA_SOFIE_ROPERATOR_Reduce
3+
4+
// #include "TMVA/SOFIE_common.hxx"
5+
// #include "TMVA/ROperator.hxx"
6+
// #include "TMVA/RModel.hxx"
7+
8+
// #include <memory>
9+
// #include <sstream>
10+
// #include <algorithm>
11+
// #include <stdexcept>
12+
// #include <vector>
13+
// #include <cassert>
14+
15+
// namespace TMVA{
16+
// namespace Experimental{
17+
// namespace SOFIE{
18+
19+
// enum ReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd };
20+
21+
// template <typename T, ReduceOpMode Op1>
22+
// struct ReduceOperatorTrait {
23+
// const char *Name() { return ""; }
24+
// };
25+
// template <typename T>
26+
// struct ReduceOperatorTrait <T, ReduceMean> {
27+
// static const char *Name() { return "ReduceMean"; }
28+
// };
29+
30+
// template <typename T>
31+
// struct ReduceOperatorTrait <T, ReduceProd> {
32+
// static const char *Name() { return "ReduceProd"; }
33+
// };
34+
35+
// template <typename T>
36+
// struct ReduceOperatorTrait <T, ReduceSumsquare> {
37+
// static const char *Name() { return "ReduceSumsquare"; }
38+
// };
39+
40+
// template <typename T, ReduceOpMode Op>
41+
// class ROperator_Reduce final : public ROperator
42+
// {
43+
// private:
44+
// /* Attributes*/
45+
// int fAxis = 1;
46+
// ReduceOpMode fReduceMode;
47+
// int fkeepdims = 1; //default value
48+
// std::string fNX;
49+
// std::string fNY;
50+
// std::vector<size_t> fShapeX;
51+
// std::vector<size_t> fShapeY;
52+
53+
// public:
54+
55+
// ROperator_Reduce(){}
56+
// ROperator_Reduce(int keepdims,int axis,std::string nameX, std::string nameY):
57+
// fkeepdims(keepdims), fAxis(axis), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
58+
59+
// // type of output given input
60+
// std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
61+
// return input;
62+
// }
63+
64+
// // shape of output tensors given input tensors
65+
// std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
66+
// // assume now inputs have same shape (no broadcasting)
67+
// auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
68+
// return ret;
69+
// }
70+
// void Initialize(RModel& model){
71+
72+
// fUseSession = model.UseSession();
73+
74+
// if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor
75+
// throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
76+
// }
77+
// fShapeX = model.GetTensorShape(fNX);
78+
// // find shape of Y and add it in the list of intermediate tensors
79+
// fShapeY = ShapeInference({fShapeX})[0];
80+
// model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
81+
// }
82+
83+
// std::string Generate(std::string OpName){
84+
// OpName = "op_" + OpName;
85+
// if (fShapeX.empty() || fShapeY.empty()) {
86+
// throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
87+
// }
88+
89+
// size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);
90+
91+
// auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
92+
// auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
93+
94+
// size_t dim = fShapeY.size();
95+
// std::vector<size_t> idx(dim);
96+
97+
// std::stringstream out;
98+
// for (size_t i = 0; i < outputLength; i++) {
99+
100+
// if (dim == 2) {
101+
// idx[0] = i / outputStrides[0];
102+
// idx[1] = i % outputStrides[0];
103+
// }
104+
// if (dim == 3) {
105+
// idx[0] = i / outputStrides[0];
106+
// idx[1] = (i % outputStrides[0]) / outputStrides[1];
107+
// idx[2] = (i % outputStrides[0]) % outputStrides[1];
108+
// }
109+
// if (dim == 4) {
110+
// idx[0] = i / outputStrides[0];
111+
// idx[1] = (i % outputStrides[0]) / outputStrides[1];
112+
// idx[2] = ((i % outputStrides[0]) % outputStrides[1]) / outputStrides[2];
113+
// idx[3] = ((i % outputStrides[0]) % outputStrides[1]) % outputStrides[2];
114+
// }
115+
116+
// assert(idx[fAxis] == 0); // we can avoid computing this for the reduction axis which by definition is always zero
117+
118+
// out << SP << "float sum = 0;\n";
119+
// // float sum = 0;
120+
// for (size_t k = 0; k < fShapeX[fAxis]; k++) {
121+
// idx[fAxis] = k;
122+
// // compute input index j
123+
// size_t j = 0;
124+
// if (dim == 2) j = idx[0]*inputStrides[0] + idx[1];
125+
// if (dim == 3) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2];
126+
// if (dim == 4) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]*inputStrides[2] + idx[3];
127+
128+
// out << SP << SP << "sum += tensor_" << fNX[j] << ";\n";
129+
// }
130+
// out << SP << "float average = sum/float(" << fShapeX[fAxis] << ")\n;";
131+
// out << SP << "tensor_" << fNY[i] << " = average;\n";
132+
// }
133+
// return out.str();
134+
// }
135+
136+
// };
137+
138+
// }//SOFIE
139+
// }//Experimental
140+
// }//TMVA
141+
142+
143+
// #endif //TMVA_SOFIE_ROPERATOR_Reduce
144+
1145
#ifndef TMVA_SOFIE_ROPERATOR_Reduce
2146
#define TMVA_SOFIE_ROPERATOR_Reduce
3147

@@ -16,8 +160,6 @@ namespace TMVA{
16160
namespace Experimental{
17161
namespace SOFIE{
18162

19-
enum ReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd };
20-
21163
template <typename T, ReduceOpMode Op1>
22164
struct ReduceOperatorTrait {
23165
const char *Name() { return ""; }
@@ -42,19 +184,17 @@ class ROperator_Reduce final : public ROperator
42184
{
43185
private:
44186
/* Attributes*/
45-
int fAxis = 1;
46-
ReduceOpMode fReduceMode;
47187
int fkeepdims = 1; //default value
48188
std::string fNX;
49189
std::string fNY;
50190
std::vector<size_t> fShapeX;
51191
std::vector<size_t> fShapeY;
192+
int fAttrAxes;
52193

53194
public:
54-
55195
ROperator_Reduce(){}
56-
ROperator_Reduce(int keepdims,int axis,std::string nameX, std::string nameY):
57-
fkeepdims(keepdims), fAxis(axis), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
196+
ROperator_Reduce(int keepdims,int attrAxes,std::string nameX, std::string nameY):
197+
fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
58198

59199
// type of output given input
60200
std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
@@ -63,8 +203,13 @@ public:
63203

64204
// shape of output tensors given input tensors
65205
std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
66-
// assume now inputs have same shape (no broadcasting)
67-
auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
206+
207+
// std::vector<std::vector<size_t>> ret;
208+
// auto & input_shape = input[0];
209+
// auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
210+
// return ret;
211+
auto ret = input; //suggest copy to compiler
212+
ret[fAttrAxes] = 1;
68213
return ret;
69214
}
70215
void Initialize(RModel& model){
@@ -76,7 +221,7 @@ public:
76221
}
77222
fShapeX = model.GetTensorShape(fNX);
78223
// find shape of Y and add it in the list of intermediate tensors
79-
fShapeY = ShapeInference({fShapeX})[0];
224+
fShapeY = ShapeInference(fShapeX);
80225
model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
81226
}
82227

@@ -90,46 +235,58 @@ public:
90235

91236
auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
92237
auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
238+
239+
// write here according to size of shape
240+
// in generation code can be done automatically
241+
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
242+
// and we have for the inverse
243+
// i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
244+
245+
// don't need to divide by last stride s[n-1] since it is 1 by definition
246+
247+
std::stringstream out;
248+
out << "\n//---- operator " << std::string(ReduceOperatorTrait<T,Op>::Name()) << " " << OpName << "\n";
249+
out << SP << "size_t dim = " << fShapeY.size() << ";\n";
93250

94-
size_t dim = fShapeY.size();
95-
std::vector<size_t> idx(dim);
251+
out << SP << "std::vector<size_t> idx(dim);";
96252

97-
std::stringstream out;
98-
for (size_t i = 0; i < outputLength; i++) {
99253

100-
if (dim == 2) {
101-
idx[0] = i / outputStrides[0];
102-
idx[1] = i % outputStrides[0];
103-
}
104-
if (dim == 3) {
105-
idx[0] = i / outputStrides[0];
106-
idx[1] = (i % outputStrides[0]) / outputStrides[1];
107-
idx[2] = (i % outputStrides[0]) % outputStrides[1];
108-
}
109-
if (dim == 4) {
110-
idx[0] = i / outputStrides[0];
111-
idx[1] = (i % outputStrides[0]) / outputStrides[1];
112-
idx[2] = ((i % outputStrides[0]) % outputStrides[1]) / outputStrides[2];
113-
idx[3] = ((i % outputStrides[0]) % outputStrides[1]) % outputStrides[2];
114-
}
254+
out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
255+
256+
// write here according to size of shape
257+
// in generation code can be done automatically
258+
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
259+
// and we have for the inverse
260+
// i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
115261

116-
assert(idx[fAxis] == 0); // we can avoid computing this for the reduction axis which by definition is always zero
262+
// don't need to divide by last stride s[n-1] since it is 1 by definition
263+
264+
out << SP << SP << "idx[j] = i;\n";
265+
out << SP << SP << "size_t k = 0;\n";
266+
out << SP << SP << SP << "for(k=0; k < dim-1; k++){\n";
267+
out << SP << SP << SP << "idx[k] = idx[k] %" << outputStrides << "[k];\n";
268+
out << SP << SP << SP << "};\n";
269+
out << SP << SP << "idx[j] = idx[j] /" << outputStrides << "[k];\n";
117270

271+
272+
out << SP << "assert(idx[" << fAttrAxes << "] == 0);\n"; // we can avoid computing this for the reduction axis which by definition is always zero
273+
118274
out << SP << "float sum = 0;\n";
119-
// float sum = 0;
120-
for (size_t k = 0; k < fShapeX[fAxis]; k++) {
121-
idx[fAxis] = k;
122-
// compute input index j
123-
size_t j = 0;
124-
if (dim == 2) j = idx[0]*inputStrides[0] + idx[1];
125-
if (dim == 3) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2];
126-
if (dim == 4) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]*inputStrides[2] + idx[3];
127-
128-
out << SP << SP << "sum += tensor_" << fNX[j] << ";\n";
129-
}
130-
out << SP << "float average = sum/float(" << fShapeX[fAxis] << ")\n;";
131-
out << SP << "tensor_" << fNY[i] << " = average;\n";
132-
}
275+
out << SP << SP << " for (size_t k = 0; k < inputShape[" << fAttrAxes << "]; k++) { \n";
276+
out << SP << SP << " idx[" << fAttrAxes << "] = k;\n";
277+
// compute input index j
278+
out << SP << SP << "size_t l = 0;\n";
279+
out << SP << SP << "size_t m = 0;\n";
280+
out << SP << SP << SP << "for(m=0; m < dim-1; m++){\n";
281+
out << SP << SP << SP << "l += idx[m] *" << inputStrides << "[m];\n";
282+
out << SP << SP << SP << "};\n";
283+
out << SP << SP << "l += idx[m];\n";
284+
out << SP << SP << "sum += tensor_" << fNX << "[l];\n";
285+
out << SP << SP << "};\n";
286+
out << SP << SP << "float average = sum/float(inputShape[" << fAttrAxes << "]);\n";
287+
out << SP << SP << "tensor_" << fNY << "[i] = average;\n";
288+
out << SP << "};\n";
289+
out << SP << "}\n";
133290
return out.str();
134291
}
135292

@@ -141,3 +298,4 @@ public:
141298

142299

143300
#endif //TMVA_SOFIE_ROPERATOR_Reduce
301+

tmva/sofie_parsers/src/RModelParser_ONNX.cxx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ auto input_name = nodeproto.input(0);
9999

100100
std::unique_ptr<ROperator> op;
101101
int attr_keepdims = 1;
102-
int attr_axis = 1;
102+
int attr_axis = 1;
103103
for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
104104
std::string attribute_name = nodeproto.attribute(i).name();
105105
if (attribute_name == "keepdims")

0 commit comments

Comments
 (0)