|
| 1 | +#ifndef TMVA_SOFIE_ROPERATOR_Reduce |
| 2 | +#define TMVA_SOFIE_ROPERATOR_Reduce |
| 3 | + |
| 4 | +#include "TMVA/SOFIE_common.hxx" |
| 5 | +#include "TMVA/ROperator.hxx" |
| 6 | +#include "TMVA/RModel.hxx" |
| 7 | + |
| 8 | +#include <memory> |
| 9 | +#include <sstream> |
| 10 | +#include <algorithm> |
| 11 | +#include <stdexcept> |
| 12 | +#include <vector> |
| 13 | +#include <cassert> |
| 14 | + |
| 15 | +namespace TMVA{ |
| 16 | +namespace Experimental{ |
| 17 | +namespace SOFIE{ |
| 18 | + |
| 19 | +enum ReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd }; |
| 20 | + |
| 21 | +template <typename T, ReduceOpMode Op1> |
| 22 | +struct ReduceOperatorTrait { |
| 23 | + const char *Name() { return ""; } |
| 24 | +}; |
| 25 | +template <typename T> |
| 26 | +struct ReduceOperatorTrait <T, ReduceMean> { |
| 27 | + static const char *Name() { return "ReduceMean"; } |
| 28 | +}; |
| 29 | + |
| 30 | +template <typename T> |
| 31 | +struct ReduceOperatorTrait <T, ReduceProd> { |
| 32 | + static const char *Name() { return "ReduceProd"; } |
| 33 | +}; |
| 34 | + |
| 35 | +template <typename T> |
| 36 | +struct ReduceOperatorTrait <T, ReduceSumsquare> { |
| 37 | + static const char *Name() { return "ReduceSumsquare"; } |
| 38 | +}; |
| 39 | + |
| 40 | +template <typename T, ReduceOpMode Op> |
| 41 | +class ROperator_Reduce final : public ROperator |
| 42 | +{ |
| 43 | +private: |
| 44 | + /* Attributes*/ |
| 45 | + int fAxis = 1; |
| 46 | + ReduceOpMode fReduceMode; |
| 47 | + int fkeepdims = 1; //default value |
| 48 | + std::string fNX; |
| 49 | + std::string fNY; |
| 50 | + std::vector<size_t> fShapeX; |
| 51 | + std::vector<size_t> fShapeY; |
| 52 | + |
| 53 | +public: |
| 54 | + |
| 55 | + ROperator_Reduce(){} |
| 56 | + ROperator_Reduce(int keepdims,int axis,std::string nameX, std::string nameY): |
| 57 | + fkeepdims(keepdims), fAxis(axis), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {} |
| 58 | + |
| 59 | + // type of output given input |
| 60 | + std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){ |
| 61 | + return input; |
| 62 | + } |
| 63 | + |
| 64 | + // shape of output tensors given input tensors |
| 65 | + std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){ |
| 66 | + // assume now inputs have same shape (no broadcasting) |
| 67 | + auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input |
| 68 | + return ret; |
| 69 | + } |
| 70 | + void Initialize(RModel& model){ |
| 71 | + |
| 72 | + fUseSession = model.UseSession(); |
| 73 | + |
| 74 | + if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor |
| 75 | + throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model"); |
| 76 | + } |
| 77 | + fShapeX = model.GetTensorShape(fNX); |
| 78 | + // find shape of Y and add it in the list of intermediate tensors |
| 79 | + fShapeY = ShapeInference({fShapeX})[0]; |
| 80 | + model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY); |
| 81 | + } |
| 82 | + |
| 83 | + std::string Generate(std::string OpName){ |
| 84 | + OpName = "op_" + OpName; |
| 85 | + if (fShapeX.empty() || fShapeY.empty()) { |
| 86 | + throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first"); |
| 87 | + } |
| 88 | + |
| 89 | + size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY); |
| 90 | + |
| 91 | + auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX); |
| 92 | + auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY); |
| 93 | + |
| 94 | + size_t dim = fShapeY.size(); |
| 95 | + std::vector<size_t> idx(dim); |
| 96 | + |
| 97 | + std::stringstream out; |
| 98 | + for (size_t i = 0; i < outputLength; i++) { |
| 99 | + |
| 100 | + if (dim == 2) { |
| 101 | + idx[0] = i / outputStrides[0]; |
| 102 | + idx[1] = i % outputStrides[0]; |
| 103 | + } |
| 104 | + if (dim == 3) { |
| 105 | + idx[0] = i / outputStrides[0]; |
| 106 | + idx[1] = (i % outputStrides[0]) / outputStrides[1]; |
| 107 | + idx[2] = (i % outputStrides[0]) % outputStrides[1]; |
| 108 | + } |
| 109 | + if (dim == 4) { |
| 110 | + idx[0] = i / outputStrides[0]; |
| 111 | + idx[1] = (i % outputStrides[0]) / outputStrides[1]; |
| 112 | + idx[2] = ((i % outputStrides[0]) % outputStrides[1]) / outputStrides[2]; |
| 113 | + idx[3] = ((i % outputStrides[0]) % outputStrides[1]) % outputStrides[2]; |
| 114 | + } |
| 115 | + |
| 116 | + assert(idx[fAxis] == 0); // we can avoid computing this for the reduction axis which by definition is always zero |
| 117 | + |
| 118 | + out << SP << "float sum = 0;\n"; |
| 119 | + // float sum = 0; |
| 120 | + for (size_t k = 0; k < fShapeX[fAxis]; k++) { |
| 121 | + idx[fAxis] = k; |
| 122 | + // compute input index j |
| 123 | + size_t j = 0; |
| 124 | + if (dim == 2) j = idx[0]*inputStrides[0] + idx[1]; |
| 125 | + if (dim == 3) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]; |
| 126 | + if (dim == 4) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]*inputStrides[2] + idx[3]; |
| 127 | + |
| 128 | + out << SP << SP << "sum += tensor_" << fNX[j] << ";\n"; |
| 129 | + } |
| 130 | + out << SP << "float average = sum/float(" << fShapeX[fAxis] << ")\n;"; |
| 131 | + out << SP << "tensor_" << fNY[i] << " = average;\n"; |
| 132 | + } |
| 133 | + return out.str(); |
| 134 | + } |
| 135 | + |
| 136 | +}; |
| 137 | + |
| 138 | +}//SOFIE |
| 139 | +}//Experimental |
| 140 | +}//TMVA |
| 141 | + |
| 142 | + |
| 143 | +#endif //TMVA_SOFIE_ROPERATOR_Reduce |
0 commit comments