diff --git a/tmva/sofie/CMakeLists.txt b/tmva/sofie/CMakeLists.txt index 175918c75aec9..e8bc8a0d9d8e9 100644 --- a/tmva/sofie/CMakeLists.txt +++ b/tmva/sofie/CMakeLists.txt @@ -33,6 +33,7 @@ ROOT_STANDARD_LIBRARY_PACKAGE(ROOTTMVASofie TMVA/ROperator_Concat.hxx TMVA/ROperator_Identity.hxx TMVA/ROperator_Softmax.hxx + TMVA/ROperator_Reduce.hxx TMVA/ROperator_Cast.hxx TMVA/SOFIE_common.hxx TMVA/SOFIEHelpers.hxx @@ -51,4 +52,4 @@ set_target_properties(ROOTTMVASofie PROPERTIES # tests requires protobuf if (tmva-sofie) ROOT_ADD_TEST_SUBDIRECTORY(test) -endif() +endif() \ No newline at end of file diff --git a/tmva/sofie/inc/TMVA/OperatorList.hxx b/tmva/sofie/inc/TMVA/OperatorList.hxx index 43d64b1d9b9a4..0002fb1ffddf5 100644 --- a/tmva/sofie/inc/TMVA/OperatorList.hxx +++ b/tmva/sofie/inc/TMVA/OperatorList.hxx @@ -18,4 +18,5 @@ #include "TMVA/ROperator_Identity.hxx" #include "TMVA/ROperator_Softmax.hxx" #include "TMVA/ROperator_Concat.hxx" +#include "TMVA/ROperator_Reduce.hxx" #include "TMVA/ROperator_Cast.hxx" diff --git a/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx b/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx new file mode 100644 index 0000000000000..0681573d22876 --- /dev/null +++ b/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx @@ -0,0 +1,160 @@ +#ifndef TMVA_SOFIE_ROPERATOR_Reduce +#define TMVA_SOFIE_ROPERATOR_Reduce + +#include "TMVA/SOFIE_common.hxx" +#include "TMVA/ROperator.hxx" +#include "TMVA/RModel.hxx" + +#include +#include +#include +#include +#include +#include + +namespace TMVA{ +namespace Experimental{ +namespace SOFIE{ + +enum EReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd, InvalidReduceOp }; + +template +class ROperator_Reduce final : public ROperator +{ +private: + /* Attributes*/ + int fkeepdims = 1; //default value + int fAttrAxes; + EReduceOpMode fReduceOpMode; + std::string fNX; + std::string fNY; + std::vector fShapeX; + std::vector fShapeY; + + +public: + + std::string Name() { + if (fReduceOpMode == ReduceMean) return "ReduceMean"; + else if (fReduceOpMode == ReduceSumsquare ) return "ReduceSumsquare"; + else if (fReduceOpMode == ReduceProd ) return "ReduceProd"; + return "Invalid"; + } + + ROperator_Reduce(){} + ROperator_Reduce(int keepdims,int attrAxes,std::string nameX, std::string nameY): + fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) { + fReduceOpMode = Op; + } + + // type of output given input + std::vector TypeInference(std::vector input){ + return input; + } + + // shape of output tensors given input tensors + std::vector> ShapeInference(std::vector> input){ + auto ret = input; //suggest copy to compiler + ret[0][fAttrAxes] = 1; + return ret; + } + void Initialize(RModel& model){ + + fUseSession = model.UseSession(); + + if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor + throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model"); + } + fShapeX = model.GetTensorShape(fNX); + // find shape of Y and add it in the list of intermediate tensors + fShapeY = ShapeInference({fShapeX})[0]; + model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY); + + } + + std::string Generate(std::string OpName){ + OpName = "op_" + OpName; + if (fShapeX.empty() || fShapeY.empty()) { + throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first"); + } + + size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY); + + auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX); + auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY); + + // write here according to size of shape + // in generation code can be done automatically + // i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on + // and we have for the inverse + // i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 .... + + // don't need to divide by last stride s[n-1] since it is 1 by definition + + std::stringstream out; + out << "\n//---- operator " << Name() << " " << OpName << "\n"; + out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n"; + + size_t dim = fShapeX.size(); // this is the input dimension (e.g. 2, 3 or 4 or more) + + // here we find output indices + out << SP << SP << "size_t idx_0 = i / " << outputStrides[0] << ";\n" ; + out << SP << SP << "size_t itmp = i;\n"; + for (size_t k = 1; k < dim; k++) { + out << SP << SP << "itmp = itmp % " << outputStrides[k-1] << ";\n" ; + if (k < dim-1) + out << SP << SP << "size_t idx_" << k << " = itmp / " << outputStrides[k] << ";\n" ; + else + // to avoid division by 1 which is outputStrides[dim-1] + out << SP << SP << "size_t idx_" << k << " = itmp;\n"; + } + + // compute reduction + + if(fReduceOpMode == ReduceProd) + out << SP << SP << "float sum = 1;\n"; + else + out << SP << SP << "float sum = 0;\n"; + + out << SP << SP << "for (size_t k = 0; k < " << fShapeX[fAttrAxes] <<"; k++) { \n"; + out << SP << SP << SP << "idx_" << fAttrAxes << " = k;\n"; + // compute input index j + out << SP << SP << SP << "size_t l = "; + for(int n = dim-1; n >=0; n--) { + if (n == int(dim-1)) + out << "idx_" << n; + else + out << " + " << "idx_" << n << " * " << inputStrides[n]; + } + out << ";\n"; + + if(fReduceOpMode == ReduceMean){ + out << SP << SP << SP << "sum += tensor_" << fNX << "[l];\n"; + out << SP << SP << "}\n"; + out << SP << SP << "float reduceResult = sum/static_cast(" << fShapeX[fAttrAxes] << ");\n"; + } + else if(fReduceOpMode == ReduceSumsquare){ + out << SP << SP << SP << "sum += tensor_" << fNX << "[l] * tensor_" << fNX << "[l];\n"; + out << SP << SP << "}\n"; + out << SP << SP << "float reduceResult = sum;\n"; + } + else if(fReduceOpMode == ReduceProd){ + out << SP << SP << SP << "sum *= tensor_" << fNX << "[l];\n"; + out << SP << SP << "}\n"; + out << SP << SP << "float reduceResult = sum;\n"; + } + + out << SP << SP << "tensor_" << fNY << "[i] = reduceResult;\n"; + out << SP << "}\n"; + return out.str(); + } + +}; + +}//SOFIE +}//Experimental +}//TMVA + + +#endif //TMVA_SOFIE_ROPERATOR_Reduce + diff --git a/tmva/sofie/test/TestCustomModelsFromONNX.cxx b/tmva/sofie/test/TestCustomModelsFromONNX.cxx index 34c3adc2306d6..cd8f414059001 100644 --- a/tmva/sofie/test/TestCustomModelsFromONNX.cxx +++ b/tmva/sofie/test/TestCustomModelsFromONNX.cxx @@ -33,6 +33,12 @@ #include "Cast_FromONNX.hxx" #include "input_models/references/Cast.ref.hxx" +#include "ReduceMean_FromONNX.hxx" +#include "input_models/references/ReduceMean.ref.hxx" + +#include "ReduceProd_FromONNX.hxx" +#include "input_models/references/ReduceProd.ref.hxx" + #include "LinearWithLeakyRelu_FromONNX.hxx" #include "input_models/references/LinearWithLeakyRelu.ref.hxx" @@ -779,6 +785,52 @@ TEST(ONNX, Pow_broadcast){ EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE); } +} + + TEST(ONNX, ReduceMean){ + constexpr float TOLERANCE = DEFAULT_TOLERANCE; + + // Preparing the standard input + std::vector input({ + 5, 2, 3, + 5, 5, 4 + }); + + TMVA_SOFIE_ReduceMean::Session s("ReduceMean_FromONNX.dat"); + std::vector output = s.infer(input.data()); + // Checking output size + EXPECT_EQ(output.size(), sizeof(ReduceMean_ExpectedOutput::output) / sizeof(float)); + + float *correct = ReduceMean_ExpectedOutput::output; + + // Checking every output value, one by one + for (size_t i = 0; i < output.size(); ++i) { + EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE); + } + +} + + TEST(ONNX, ReduceProd){ + constexpr float TOLERANCE = DEFAULT_TOLERANCE; + + // Preparing the standard input + std::vector input({ + 5, 2, 3, + 5, 5, 4 + }); + + TMVA_SOFIE_ReduceProd::Session s("ReduceProd_FromONNX.dat"); + std::vector output = s.infer(input.data()); + // Checking output size + EXPECT_EQ(output.size(), sizeof(ReduceProd_ExpectedOutput::output) / sizeof(float)); + + float *correct = ReduceProd_ExpectedOutput::output; + + // Checking every output value, one by one + for (size_t i = 0; i < output.size(); ++i) { + EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE); + } + } TEST(ONNX, RNNBatchwise) diff --git a/tmva/sofie/test/input_models/ReduceMean.onnx b/tmva/sofie/test/input_models/ReduceMean.onnx new file mode 100644 index 0000000000000..48b62512bfc58 Binary files /dev/null and b/tmva/sofie/test/input_models/ReduceMean.onnx differ diff --git a/tmva/sofie/test/input_models/ReduceProd.onnx b/tmva/sofie/test/input_models/ReduceProd.onnx new file mode 100644 index 0000000000000..2c14e43638450 Binary files /dev/null and b/tmva/sofie/test/input_models/ReduceProd.onnx differ diff --git a/tmva/sofie/test/input_models/references/ReduceMean.ref.hxx b/tmva/sofie/test/input_models/references/ReduceMean.ref.hxx new file mode 100644 index 0000000000000..053a34eebf527 --- /dev/null +++ b/tmva/sofie/test/input_models/references/ReduceMean.ref.hxx @@ -0,0 +1,5 @@ +namespace ReduceMean_ExpectedOutput{ + float output[] = { + 5.0, 3.5, 3.5 + }; +} // namespace Reduce_mean_ExpectedOutput \ No newline at end of file diff --git a/tmva/sofie/test/input_models/references/ReduceProd.ref.hxx b/tmva/sofie/test/input_models/references/ReduceProd.ref.hxx new file mode 100644 index 0000000000000..af259b4c69d06 --- /dev/null +++ b/tmva/sofie/test/input_models/references/ReduceProd.ref.hxx @@ -0,0 +1,5 @@ +namespace ReduceProd_ExpectedOutput{ + float output[] = { + 25.0, 10.0, 12.0 + }; +} // namespace Reduce_mean_ExpectedOutput \ No newline at end of file diff --git a/tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx b/tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx index dc95ad413c948..ba82bf1c952a8 100644 --- a/tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx +++ b/tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx @@ -47,6 +47,8 @@ std::unique_ptr make_ROperator_Identity(const onnx::NodeProto &nodepr std::unique_ptr make_ROperator_Softmax(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map &tensor_type); std::unique_ptr make_ROperator_Concat(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map &tensor_type); std::unique_ptr make_ROperator_Cast(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map &tensor_type); +template +std::unique_ptr make_ROperator_Reduce(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map &tensor_type); using factoryMethodMap = std::unordered_map (*)(const onnx::NodeProto&, const onnx::GraphProto&, std::unordered_map&)>; const factoryMethodMap mapOptypeOperator = { @@ -71,6 +73,9 @@ const factoryMethodMap mapOptypeOperator = { {"Div", &make_ROperator_BasicBinary
}, {"Pow", &make_ROperator_BasicBinary}, {"Neg", &make_ROperator_Neg}, + {"ReduceMean", &make_ROperator_Reduce}, + {"ReduceSumsquare", &make_ROperator_Reduce}, + {"ReduceProd", &make_ROperator_Reduce}, {"Reshape", &make_ROperator_Reshape}, {"Flatten", &make_ROperator_Reshape}, {"Slice", &make_ROperator_Slice}, diff --git a/tmva/sofie_parsers/src/RModelParser_ONNX.cxx b/tmva/sofie_parsers/src/RModelParser_ONNX.cxx index ac32bcbbb7de7..5eb4858df50d5 100644 --- a/tmva/sofie_parsers/src/RModelParser_ONNX.cxx +++ b/tmva/sofie_parsers/src/RModelParser_ONNX.cxx @@ -116,6 +116,57 @@ std::unique_ptr make_ROperator_Neg(const onnx::NodeProto& nodeproto, return op; } +template +std::unique_ptr make_ROperator_Reduce(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map& tensor_type){ + + ETensorType input_type; + + EReduceOpMode op_mode = InvalidReduceOp; + + if (nodeproto.op_type() == "ReduceMean") + op_mode = ReduceMean; + else if (nodeproto.op_type() == "ReduceSumsquare") + op_mode = ReduceSumsquare; + else if (nodeproto.op_type() == "ReduceProd") + op_mode = ReduceProd; + + assert(op_mode != InvalidReduceOp); + + auto input_name = nodeproto.input(0); + auto it = tensor_type.find(input_name); + if (it != tensor_type.end()){ + input_type = it->second; + }else{ + throw std::runtime_error("TMVA::SOFIE ONNX Parser Reduce op has input tensor" + input_name + " but its type is not yet registered"); + } + + std::unique_ptr op; + int attr_keepdims = 1; + int attr_axis = 1; + for (int_t i = 0; i < nodeproto.attribute_size(); i++) { + std::string attribute_name = nodeproto.attribute(i).name(); + if (attribute_name == "keepdims") + attr_keepdims = nodeproto.attribute(i).i(); + if(attribute_name == "axis") + attr_axis = nodeproto.attribute(i).i(); + } + switch(input_type){ + case ETensorType::FLOAT: + op.reset(new ROperator_Reduce(attr_keepdims,attr_axis,nodeproto.input(0), nodeproto.output(0))); + break; + default: + throw std::runtime_error("TMVA::SOFIE - Unsupported - Reduce Operator does not yet support input type " + std::to_string(static_cast(input_type))); + } + + ETensorType output_type = (op->TypeInference({input_type}))[0]; + auto it2 = tensor_type.find(nodeproto.output(0)); + if (it2 == tensor_type.end()){ + tensor_type[nodeproto.output(0)] = output_type; + } + + return op; +} + std::unique_ptr make_ROperator_Transpose(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map& tensor_type){ ETensorType input_type;