1+
2+ // #ifndef TMVA_SOFIE_ROPERATOR_Reduce
3+ // #define TMVA_SOFIE_ROPERATOR_Reduce
4+
5+ // #include "TMVA/SOFIE_common.hxx"
6+ // #include "TMVA/ROperator.hxx"
7+ // #include "TMVA/RModel.hxx"
8+
9+ // #include <memory>
10+ // #include <sstream>
11+ // #include <algorithm>
12+ // #include <stdexcept>
13+ // #include <vector>
14+ // #include <cassert>
15+
16+ // namespace TMVA{
17+ // namespace Experimental{
18+ // namespace SOFIE{
19+
20+ // enum ReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd };
21+
22+ // template <typename T, ReduceOpMode Op1>
23+ // struct ReduceOperatorTrait {
24+ // const char *Name() { return ""; }
25+ // };
26+ // template <typename T>
27+ // struct ReduceOperatorTrait <T, ReduceMean> {
28+ // static const char *Name() { return "ReduceMean"; }
29+ // };
30+
31+ // template <typename T>
32+ // struct ReduceOperatorTrait <T, ReduceProd> {
33+ // static const char *Name() { return "ReduceProd"; }
34+ // };
35+
36+ // template <typename T>
37+ // struct ReduceOperatorTrait <T, ReduceSumsquare> {
38+ // static const char *Name() { return "ReduceSumsquare"; }
39+ // };
40+
41+ // template <typename T, ReduceOpMode Op>
42+ // class ROperator_Reduce final : public ROperator
43+ // {
44+ // private:
45+ // /* Attributes*/
46+ // int fAxis = 1;
47+ // ReduceOpMode fReduceMode;
48+ // int fkeepdims = 1; //default value
49+ // std::string fNX;
50+ // std::string fNY;
51+ // std::vector<size_t> fShapeX;
52+ // std::vector<size_t> fShapeY;
53+
54+ // public:
55+
56+ // ROperator_Reduce(){}
57+ // ROperator_Reduce(int keepdims,int axis,std::string nameX, std::string nameY):
58+ // fkeepdims(keepdims), fAxis(axis), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
59+
60+ // // type of output given input
61+ // std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
62+ // return input;
63+ // }
64+
65+ // // shape of output tensors given input tensors
66+ // std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
67+ // // assume now inputs have same shape (no broadcasting)
68+ // auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
69+ // return ret;
70+ // }
71+ // void Initialize(RModel& model){
72+
73+ // fUseSession = model.UseSession();
74+
75+ // if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor
76+ // throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
77+ // }
78+ // fShapeX = model.GetTensorShape(fNX);
79+ // // find shape of Y and add it in the list of intermediate tensors
80+ // fShapeY = ShapeInference({fShapeX})[0];
81+ // model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
82+ // }
83+
84+ // std::string Generate(std::string OpName){
85+ // OpName = "op_" + OpName;
86+ // if (fShapeX.empty() || fShapeY.empty()) {
87+ // throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
88+ // }
89+
90+ // size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);
91+
92+ // auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
93+ // auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
94+
95+ // size_t dim = fShapeY.size();
96+ // std::vector<size_t> idx(dim);
97+
98+ // std::stringstream out;
99+ // for (size_t i = 0; i < outputLength; i++) {
100+
101+ // if (dim == 2) {
102+ // idx[0] = i / outputStrides[0];
103+ // idx[1] = i % outputStrides[0];
104+ // }
105+ // if (dim == 3) {
106+ // idx[0] = i / outputStrides[0];
107+ // idx[1] = (i % outputStrides[0]) / outputStrides[1];
108+ // idx[2] = (i % outputStrides[0]) % outputStrides[1];
109+ // }
110+ // if (dim == 4) {
111+ // idx[0] = i / outputStrides[0];
112+ // idx[1] = (i % outputStrides[0]) / outputStrides[1];
113+ // idx[2] = ((i % outputStrides[0]) % outputStrides[1]) / outputStrides[2];
114+ // idx[3] = ((i % outputStrides[0]) % outputStrides[1]) % outputStrides[2];
115+ // }
116+
117+ // assert(idx[fAxis] == 0); // we can avoid computing this for the reduction axis which by definition is always zero
118+
119+ // out << SP << "float sum = 0;\n";
120+ // // float sum = 0;
121+ // for (size_t k = 0; k < fShapeX[fAxis]; k++) {
122+ // idx[fAxis] = k;
123+ // // compute input index j
124+ // size_t j = 0;
125+ // if (dim == 2) j = idx[0]*inputStrides[0] + idx[1];
126+ // if (dim == 3) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2];
127+ // if (dim == 4) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]*inputStrides[2] + idx[3];
128+
129+ // out << SP << SP << "sum += tensor_" << fNX[j] << ";\n";
130+ // }
131+ // out << SP << "float average = sum/float(" << fShapeX[fAxis] << ")\n;";
132+ // out << SP << "tensor_" << fNY[i] << " = average;\n";
133+ // }
134+ // return out.str();
135+ // }
136+
137+ // };
138+
139+ // }//SOFIE
140+ // }//Experimental
141+ // }//TMVA
142+
143+
144+ // #endif //TMVA_SOFIE_ROPERATOR_Reduce
145+
146+ #ifndef TMVA_SOFIE_ROPERATOR_Reduce
147+ #define TMVA_SOFIE_ROPERATOR_Reduce
148+
149+ #include " TMVA/SOFIE_common.hxx"
150+ #include " TMVA/ROperator.hxx"
151+ #include " TMVA/RModel.hxx"
152+
153+ #include < memory>
154+ #include < sstream>
155+ #include < algorithm>
156+ #include < stdexcept>
157+ #include < vector>
158+ #include < cassert>
159+
160+ namespace TMVA {
161+ namespace Experimental {
162+ namespace SOFIE {
163+
164+ template <typename T, ReduceOpMode Op1>
165+ struct ReduceOperatorTrait {
166+ const char *Name () { return " " ; }
167+ };
168+ template <typename T>
169+ struct ReduceOperatorTrait <T, ReduceMean> {
170+ static const char *Name () { return " ReduceMean" ; }
171+ };
172+
173+ template <typename T>
174+ struct ReduceOperatorTrait <T, ReduceProd> {
175+ static const char *Name () { return " ReduceProd" ; }
176+ };
177+
178+ template <typename T>
179+ struct ReduceOperatorTrait <T, ReduceSumsquare> {
180+ static const char *Name () { return " ReduceSumsquare" ; }
181+ };
182+
183+ template <typename T, ReduceOpMode Op>
184+ class ROperator_Reduce final : public ROperator
185+ {
186+ private:
187+ /* Attributes*/
188+ int fkeepdims = 1 ; // default value
189+ std::string fNX ;
190+ std::string fNY ;
191+ std::vector<size_t > fShapeX ;
192+ std::vector<size_t > fShapeY ;
193+ int fAttrAxes ;
194+
195+ public:
196+ ROperator_Reduce (){}
197+ ROperator_Reduce (int keepdims,int attrAxes,std::string nameX, std::string nameY):
198+ fkeepdims (keepdims), fAttrAxes (attrAxes), fNX (UTILITY::Clean_name(nameX)), fNY (UTILITY::Clean_name(nameY)) {}
199+
200+ // type of output given input
201+ std::vector<ETensorType> TypeInference (std::vector<ETensorType> input){
202+ return input;
203+ }
204+
205+ // shape of output tensors given input tensors
206+ std::vector<std::vector<size_t >> ShapeInference (std::vector<std::vector<size_t >> input){
207+
208+ // std::vector<std::vector<size_t>> ret;
209+ // auto & input_shape = input[0];
210+ // auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
211+ // return ret;
212+ auto ret = input; // suggest copy to compiler
213+ ret[fAttrAxes ] = 1 ;
214+ return ret;
215+ }
216+ void Initialize (RModel& model){
217+
218+ fUseSession = model.UseSession ();
219+
220+ if (model.CheckIfTensorAlreadyExist (fNX ) == false ){ // input must be a graph input, or already initialized intermediate tensor
221+ throw std::runtime_error (" TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model" );
222+ }
223+ fShapeX = model.GetTensorShape (fNX );
224+ // find shape of Y and add it in the list of intermediate tensors
225+ fShapeY = ShapeInference (fShapeX );
226+ model.AddIntermediateTensor (fNY , model.GetTensorType (fNX ), fShapeY );
227+ }
228+
229+ std::string Generate (std::string OpName){
230+ OpName = " op_" + OpName;
231+ if (fShapeX .empty () || fShapeY .empty ()) {
232+ throw std::runtime_error (" TMVA SOFIE Reduce Op called to Generate without being initialized first" );
233+ }
234+
235+ size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength (fShapeY );
236+
237+ auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape (fShapeX );
238+ auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape (fShapeY );
239+
240+ // write here according to size of shape
241+ // in generation code can be done automatically
242+ // i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
243+ // and we have for the inverse
244+ // i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
245+
246+ // don't need to divide by last stride s[n-1] since it is 1 by definition
247+
248+ std::stringstream out;
249+ out << " \n //---- operator " << std::string (ReduceOperatorTrait<T,Op>::Name ()) << " " << OpName << " \n " ;
250+ out << SP << " size_t dim = " << fShapeY .size () << " ;\n " ;
251+
252+ out << SP << " std::vector<size_t> idx(dim);" ;
253+
254+
255+ out << SP << " for (size_t i = 0; i < " << outputLength << " ; i++) {\n " ;
256+
257+ // write here according to size of shape
258+ // in generation code can be done automatically
259+ // i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
260+ // and we have for the inverse
261+ // i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
262+
263+ // don't need to divide by last stride s[n-1] since it is 1 by definition
264+
265+ out << SP << SP << " idx[j] = i;\n " ;
266+ out << SP << SP << " size_t k = 0;\n " ;
267+ out << SP << SP << SP << " for(k=0; k < dim-1; k++){\n " ;
268+ out << SP << SP << SP << " idx[k] = idx[k] %" << outputStrides << " [k];\n " ;
269+ out << SP << SP << SP << " };\n " ;
270+ out << SP << SP << " idx[j] = idx[j] /" << outputStrides << " [k];\n " ;
271+
272+
273+ out << SP << " assert(idx[" << fAttrAxes << " ] == 0);\n " ; // we can avoid computing this for the reduction axis which by definition is always zero
274+
275+ out << SP << " float sum = 0;\n " ;
276+ out << SP << SP << " for (size_t k = 0; k < inputShape[" << fAttrAxes << " ]; k++) { \n " ;
277+ out << SP << SP << " idx[" << fAttrAxes << " ] = k;\n " ;
278+ // compute input index j
279+ out << SP << SP << " size_t l = 0;\n " ;
280+ out << SP << SP << " size_t m = 0;\n " ;
281+ out << SP << SP << SP << " for(m=0; m < dim-1; m++){\n " ;
282+ out << SP << SP << SP << " l += idx[m] *" << inputStrides << " [m];\n " ;
283+ out << SP << SP << SP << " };\n " ;
284+ out << SP << SP << " l += idx[m];\n " ;
285+ out << SP << SP << " sum += tensor_" << fNX << " [l];\n " ;
286+ out << SP << SP << " };\n " ;
287+ out << SP << SP << " float average = sum/float(inputShape[" << fAttrAxes << " ]);\n " ;
288+ out << SP << SP << " tensor_" << fNY << " [i] = average;\n " ;
289+ out << SP << " };\n " ;
290+ out << SP << " }\n " ;
291+ return out.str ();
292+ }
293+
294+ };
295+
296+ }// SOFIE
297+ }// Experimental
298+ }// TMVA
299+
300+
301+ #endif // TMVA_SOFIE_ROPERATOR_Reduce
0 commit comments