Skip to content

Commit ac7392a

Browse files
lmonetaNeel-Shah-29
authored andcommitted
Fix generated code for Reduce operator
Fix also correct namespace for reference output in reduce tests
1 parent 53a9a4c commit ac7392a

File tree

3 files changed

+81
-52
lines changed

3 files changed

+81
-52
lines changed

tmva/sofie/inc/TMVA/ROperator_Reduce.hxx

Lines changed: 79 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ private:
3030
std::string fNY;
3131
std::vector<size_t> fShapeX;
3232
std::vector<size_t> fShapeY;
33-
33+
3434

3535
public:
3636

@@ -41,9 +41,11 @@ public:
4141
return "Invalid";
4242
}
4343

44-
ROperator_Reduce(){}
44+
ROperator_Reduce(){}
4545
ROperator_Reduce(int keepdims,int attrAxes,std::string nameX, std::string nameY):
46-
fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
46+
fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {
47+
fReduceOpMode = Op;
48+
}
4749

4850
// type of output given input
4951
std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
@@ -80,19 +82,19 @@ public:
8082

8183
auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
8284
auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
83-
85+
8486
// write here according to size of shape
8587
// in generation code can be done automatically
8688
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
8789
// and we have for the inverse
8890
// i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
8991

9092
// don't need to divide by last stride s[n-1] since it is 1 by definition
91-
93+
9294
std::stringstream out;
9395
out << "\n//---- operator " << Name() << " " << OpName << "\n";
9496
out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
95-
97+
9698
// write here according to size of shape
9799
// in generation code can be done automatically
98100
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
@@ -102,61 +104,88 @@ public:
102104
// don't need to divide by last stride s[n-1] since it is 1 by definition
103105

104106
size_t dim = fShapeX.size(); // this is the input dimension (e.g. 2, 3 or 4 or more)
105-
out << SP << "std::vector<size_t> outputStrides = {" ;
106-
for (size_t k = 0; k < dim; k++) {
107-
out << outputStrides[k] ;
108-
if (k < dim-1)
109-
out << " ,";
110-
else
111-
out << " };\n";
112-
}
113-
114-
for (size_t k = 0; k < dim; k++) {
115-
size_t j;
116-
out << SP << "size_t idx_" << k <<" = i;\n";
117-
for(j = 0; j < k; j++ )
118-
out << SP << "idx_" << k << " = idx_" << k <<" % outputStrides[" << j << "];\n" ;
119-
120-
out << SP << "idx_" << k << " = idx_" << k << "/ outputStrides[" << j << "];\n";
107+
// out << SP << "std::vector<size_t> outputStrides = {" ;
108+
// for (size_t k = 0; k < dim; k++) {
109+
// out << outputStrides[k] ;
110+
// if (k < dim-1)
111+
// out << " ,";
112+
// else
113+
// out << " };\n";
114+
// }
115+
116+
// here we find output indices
117+
out << SP << SP << "size_t idx_0 = i / " << outputStrides[0] << ";\n" ;
118+
out << SP << SP << "size_t itmp = i;\n";
119+
for (size_t k = 1; k < dim; k++) {
120+
out << SP << SP << "itmp = itmp % " << outputStrides[k-1] << ";\n" ;
121+
if (k < dim-1)
122+
out << SP << SP << "size_t idx_" << k << " = itmp / " << outputStrides[k] << ";\n" ;
123+
else
124+
// to avoid division by 1 which is outputStrides[dim-1]
125+
out << SP << SP << "size_t idx_" << k << " = itmp;\n";
121126
}
122127

128+
129+
// for (size_t k = 0; k < dim; k++) {
130+
// size_t j;
131+
// out << SP << "size_t idx_" << k <<" = i;\n";
132+
// for(j = 0; j < k; j++ )
133+
// out << SP << "idx_" << k << " = idx_" << k <<" % outputStrides[" << j << "];\n" ;
134+
135+
// out << SP << "idx_" << k << " = idx_" << k << "/ outputStrides[" << j << "];\n";
136+
// }
137+
123138
// out << SP << "assert(idx[" << fAttrAxes << "] == 0);\n"; // we can avoid computing this for the reduction axis which by definition is always zero
124139

125-
out << SP << "float sum = 0;\n";
126-
out << SP << " for (size_t k = 0; k < " << fShapeX[fAttrAxes] <<"; k++) { \n";
127-
out << SP << SP << " idx_" << fAttrAxes << " = k;\n";
128-
// compute input index j
129-
out << SP << "std::vector<size_t> inputStrides = {" ;
130-
for (size_t k = 0; k < dim; k++) {
131-
out << inputStrides[k] ;
132-
if (k < dim-1)
133-
out << " ,";
134-
else
135-
out << " };\n";
140+
// compute reduction
141+
142+
out << SP << SP << "float sum = 0;\n";
143+
out << SP << SP << "for (size_t k = 0; k < " << fShapeX[fAttrAxes] <<"; k++) { \n";
144+
out << SP << SP << SP << "idx_" << fAttrAxes << " = k;\n";
145+
// compute input index j
146+
out << SP << SP << SP << "size_t l = ";
147+
for(int n = dim-1; n >= 0; n-- ) {
148+
if (n == dim-1)
149+
out << "idx_" << n;
150+
else
151+
out << " + " << "idx_" << n << " * " << inputStrides[n];
136152
}
137-
out << SP << SP << "size_t l = 0;\n";
138-
139-
size_t n ;
140-
for(n = 0; n < dim-1; n++ )
141-
out << SP << "l += idx_" << n << " * inputStrides[" << n << "];\n";
142-
143-
out << SP << "l += idx_" << n << ";\n";
144-
153+
out << ";\n";
154+
155+
156+
// out << SP << "std::vector<size_t> inputStrides = {" ;
157+
// for (size_t k = 0; k < dim; k++) {
158+
// out << inputStrides[k] ;
159+
// if (k < dim-1)
160+
// out << " ,";
161+
// else
162+
// out << " };\n";
163+
// }
164+
// out << SP << SP << "size_t l = 0;\n";
165+
166+
// size_t n ;
167+
// for(n = 0; n < dim-1; n++ )
168+
// out << SP << "l += idx_" << n << " * inputStrides[" << n << "];\n";
169+
170+
// out << SP << "l += idx_" << n << ";\n";
171+
145172
if(fReduceOpMode == ReduceMean){
146-
out << SP << SP << "sum += tensor_" << fNX << "[l];\n";
147-
out << SP << SP << "};\n";
173+
out << SP << SP << SP << "sum += tensor_" << fNX << "[l];\n";
174+
out << SP << SP << "}\n";
175+
out << SP << SP << "float reduceResult = sum/static_cast<float>(" << fShapeX[fAttrAxes] << ");\n";
148176
}
149177
else if(fReduceOpMode == ReduceSumsquare){
150-
out << SP << SP << "sum += tensor_" << fNX << "[l] * tensor_" << fNX << "[l];\n";
151-
out << SP << SP << "};\n";
178+
out << SP << SP << SP << "sum += tensor_" << fNX << "[l] * tensor_" << fNX << "[l];\n";
179+
out << SP << SP << "}\n";
180+
out << SP << SP << "float reduceResult = sum;\n";
152181
}
153182
else if(fReduceOpMode == ReduceProd){
154-
out << SP << SP << "sum *= tensor_" << fNX << "[l];\n";
155-
out << SP << SP << "};\n";
183+
out << SP << SP << SP << "sum *= tensor_" << fNX << "[l];\n";
184+
out << SP << SP << "}\n";
185+
out << SP << SP << "float reduceResult = sum;\n";
156186
}
157-
out << SP << SP << "float average = sum/(float)" << fShapeX[fAttrAxes] << ";\n";
158-
out << SP << SP << "tensor_" << fNY << "[i] = average;\n";
159-
out << SP << "};\n";
187+
188+
out << SP << SP << "tensor_" << fNY << "[i] = reduceResult;\n";
160189
out << SP << "}\n";
161190
return out.str();
162191
}

tmva/sofie/test/input_models/references/ReduceMean.ref.hxx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
namespace Reduce_mean_ExpectedOutput{
1+
namespace ReduceMean_ExpectedOutput{
22
float output[] = {
33
5.0, 3.5, 3.5
44
};

tmva/sofie/test/input_models/references/ReduceProd.ref.hxx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
namespace Reduce_mean_ExpectedOutput{
1+
namespace ReduceProd_ExpectedOutput{
22
float output[] = {
33
25.0, 10.0, 12.0
44
};

0 commit comments

Comments
 (0)