Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion tmva/sofie/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ ROOT_STANDARD_LIBRARY_PACKAGE(ROOTTMVASofie
TMVA/ROperator_Concat.hxx
TMVA/ROperator_Identity.hxx
TMVA/ROperator_Softmax.hxx
TMVA/ROperator_Reduce.hxx
TMVA/ROperator_Cast.hxx
TMVA/SOFIE_common.hxx
TMVA/SOFIEHelpers.hxx
Expand All @@ -51,4 +52,4 @@ set_target_properties(ROOTTMVASofie PROPERTIES
# tests requires protobuf
if (tmva-sofie)
ROOT_ADD_TEST_SUBDIRECTORY(test)
endif()
endif()
1 change: 1 addition & 0 deletions tmva/sofie/inc/TMVA/OperatorList.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@
#include "TMVA/ROperator_Identity.hxx"
#include "TMVA/ROperator_Softmax.hxx"
#include "TMVA/ROperator_Concat.hxx"
#include "TMVA/ROperator_Reduce.hxx"
#include "TMVA/ROperator_Cast.hxx"
160 changes: 160 additions & 0 deletions tmva/sofie/inc/TMVA/ROperator_Reduce.hxx
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
#ifndef TMVA_SOFIE_ROPERATOR_Reduce
#define TMVA_SOFIE_ROPERATOR_Reduce

#include "TMVA/SOFIE_common.hxx"
#include "TMVA/ROperator.hxx"
#include "TMVA/RModel.hxx"

#include <memory>
#include <sstream>
#include <algorithm>
#include <stdexcept>
#include <vector>
#include <cassert>

namespace TMVA{
namespace Experimental{
namespace SOFIE{

enum EReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd, InvalidReduceOp };

template <typename T, EReduceOpMode Op>
class ROperator_Reduce final : public ROperator
{
private:
/* Attributes*/
int fkeepdims = 1; //default value
int fAttrAxes;
EReduceOpMode fReduceOpMode;
std::string fNX;
std::string fNY;
std::vector<size_t> fShapeX;
std::vector<size_t> fShapeY;


public:

std::string Name() {
if (fReduceOpMode == ReduceMean) return "ReduceMean";
else if (fReduceOpMode == ReduceSumsquare ) return "ReduceSumsquare";
else if (fReduceOpMode == ReduceProd ) return "ReduceProd";
return "Invalid";
}

ROperator_Reduce(){}
ROperator_Reduce(int keepdims,int attrAxes,std::string nameX, std::string nameY):
fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {
fReduceOpMode = Op;
}

// type of output given input
std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
return input;
}

// shape of output tensors given input tensors
std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
auto ret = input; //suggest copy to compiler
ret[0][fAttrAxes] = 1;
return ret;
}
void Initialize(RModel& model){

fUseSession = model.UseSession();

if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor
throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
}
fShapeX = model.GetTensorShape(fNX);
// find shape of Y and add it in the list of intermediate tensors
fShapeY = ShapeInference({fShapeX})[0];
model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);

}

std::string Generate(std::string OpName){
OpName = "op_" + OpName;
if (fShapeX.empty() || fShapeY.empty()) {
throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
}

size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);

auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);

// write here according to size of shape
// in generation code can be done automatically
// i0 = i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
// and we have for the inverse
// i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....

// don't need to divide by last stride s[n-1] since it is 1 by definition

std::stringstream out;
out << "\n//---- operator " << Name() << " " << OpName << "\n";
out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";

size_t dim = fShapeX.size(); // this is the input dimension (e.g. 2, 3 or 4 or more)

// here we find output indices
out << SP << SP << "size_t idx_0 = i / " << outputStrides[0] << ";\n" ;
out << SP << SP << "size_t itmp = i;\n";
for (size_t k = 1; k < dim; k++) {
out << SP << SP << "itmp = itmp % " << outputStrides[k-1] << ";\n" ;
if (k < dim-1)
out << SP << SP << "size_t idx_" << k << " = itmp / " << outputStrides[k] << ";\n" ;
else
// to avoid division by 1 which is outputStrides[dim-1]
out << SP << SP << "size_t idx_" << k << " = itmp;\n";
}

// compute reduction

if(fReduceOpMode == ReduceProd)
out << SP << SP << "float sum = 1;\n";
else
out << SP << SP << "float sum = 0;\n";

out << SP << SP << "for (size_t k = 0; k < " << fShapeX[fAttrAxes] <<"; k++) { \n";
out << SP << SP << SP << "idx_" << fAttrAxes << " = k;\n";
// compute input index j
out << SP << SP << SP << "size_t l = ";
for(int n = dim-1; n >=0; n--) {
if (n == int(dim-1))
out << "idx_" << n;
else
out << " + " << "idx_" << n << " * " << inputStrides[n];
}
out << ";\n";

if(fReduceOpMode == ReduceMean){
out << SP << SP << SP << "sum += tensor_" << fNX << "[l];\n";
out << SP << SP << "}\n";
out << SP << SP << "float reduceResult = sum/static_cast<float>(" << fShapeX[fAttrAxes] << ");\n";
}
else if(fReduceOpMode == ReduceSumsquare){
out << SP << SP << SP << "sum += tensor_" << fNX << "[l] * tensor_" << fNX << "[l];\n";
out << SP << SP << "}\n";
out << SP << SP << "float reduceResult = sum;\n";
}
else if(fReduceOpMode == ReduceProd){
out << SP << SP << SP << "sum *= tensor_" << fNX << "[l];\n";
out << SP << SP << "}\n";
out << SP << SP << "float reduceResult = sum;\n";
}

out << SP << SP << "tensor_" << fNY << "[i] = reduceResult;\n";
out << SP << "}\n";
return out.str();
}

};

}//SOFIE
}//Experimental
}//TMVA


#endif //TMVA_SOFIE_ROPERATOR_Reduce

52 changes: 52 additions & 0 deletions tmva/sofie/test/TestCustomModelsFromONNX.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,12 @@
#include "Cast_FromONNX.hxx"
#include "input_models/references/Cast.ref.hxx"

#include "ReduceMean_FromONNX.hxx"
#include "input_models/references/ReduceMean.ref.hxx"

#include "ReduceProd_FromONNX.hxx"
#include "input_models/references/ReduceProd.ref.hxx"

#include "LinearWithLeakyRelu_FromONNX.hxx"
#include "input_models/references/LinearWithLeakyRelu.ref.hxx"

Expand Down Expand Up @@ -779,6 +785,52 @@ TEST(ONNX, Pow_broadcast){
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
}

}

TEST(ONNX, ReduceMean){
constexpr float TOLERANCE = DEFAULT_TOLERANCE;

// Preparing the standard input
std::vector<float> input({
5, 2, 3,
5, 5, 4
});

TMVA_SOFIE_ReduceMean::Session s("ReduceMean_FromONNX.dat");
std::vector<float> output = s.infer(input.data());
// Checking output size
EXPECT_EQ(output.size(), sizeof(ReduceMean_ExpectedOutput::output) / sizeof(float));

float *correct = ReduceMean_ExpectedOutput::output;

// Checking every output value, one by one
for (size_t i = 0; i < output.size(); ++i) {
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
}

}

TEST(ONNX, ReduceProd){
constexpr float TOLERANCE = DEFAULT_TOLERANCE;

// Preparing the standard input
std::vector<float> input({
5, 2, 3,
5, 5, 4
});

TMVA_SOFIE_ReduceProd::Session s("ReduceProd_FromONNX.dat");
std::vector<float> output = s.infer(input.data());
// Checking output size
EXPECT_EQ(output.size(), sizeof(ReduceProd_ExpectedOutput::output) / sizeof(float));

float *correct = ReduceProd_ExpectedOutput::output;

// Checking every output value, one by one
for (size_t i = 0; i < output.size(); ++i) {
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
}

}

TEST(ONNX, RNNBatchwise)
Expand Down
Binary file added tmva/sofie/test/input_models/ReduceMean.onnx
Binary file not shown.
Binary file added tmva/sofie/test/input_models/ReduceProd.onnx
Binary file not shown.
5 changes: 5 additions & 0 deletions tmva/sofie/test/input_models/references/ReduceMean.ref.hxx
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
namespace ReduceMean_ExpectedOutput{
float output[] = {
5.0, 3.5, 3.5
};
} // namespace Reduce_mean_ExpectedOutput
5 changes: 5 additions & 0 deletions tmva/sofie/test/input_models/references/ReduceProd.ref.hxx
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
namespace ReduceProd_ExpectedOutput{
float output[] = {
25.0, 10.0, 12.0
};
} // namespace Reduce_mean_ExpectedOutput
5 changes: 5 additions & 0 deletions tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ std::unique_ptr<ROperator> make_ROperator_Identity(const onnx::NodeProto &nodepr
std::unique_ptr<ROperator> make_ROperator_Softmax(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
std::unique_ptr<ROperator> make_ROperator_Concat(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
std::unique_ptr<ROperator> make_ROperator_Cast(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
template <EReduceOpMode Op1>
std::unique_ptr<ROperator> make_ROperator_Reduce(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);

using factoryMethodMap = std::unordered_map<std::string, std::unique_ptr<ROperator> (*)(const onnx::NodeProto&, const onnx::GraphProto&, std::unordered_map<std::string, ETensorType>&)>;
const factoryMethodMap mapOptypeOperator = {
Expand All @@ -71,6 +73,9 @@ const factoryMethodMap mapOptypeOperator = {
{"Div", &make_ROperator_BasicBinary<Div>},
{"Pow", &make_ROperator_BasicBinary<Pow>},
{"Neg", &make_ROperator_Neg},
{"ReduceMean", &make_ROperator_Reduce<ReduceMean>},
{"ReduceSumsquare", &make_ROperator_Reduce<ReduceSumsquare>},
{"ReduceProd", &make_ROperator_Reduce<ReduceProd>},
{"Reshape", &make_ROperator_Reshape},
{"Flatten", &make_ROperator_Reshape},
{"Slice", &make_ROperator_Slice},
Expand Down
51 changes: 51 additions & 0 deletions tmva/sofie_parsers/src/RModelParser_ONNX.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,57 @@ std::unique_ptr<ROperator> make_ROperator_Neg(const onnx::NodeProto& nodeproto,
return op;
}

template<EReduceOpMode Op1>
std::unique_ptr<ROperator> make_ROperator_Reduce(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map<std::string, ETensorType>& tensor_type){

ETensorType input_type;

EReduceOpMode op_mode = InvalidReduceOp;

if (nodeproto.op_type() == "ReduceMean")
op_mode = ReduceMean;
else if (nodeproto.op_type() == "ReduceSumsquare")
op_mode = ReduceSumsquare;
else if (nodeproto.op_type() == "ReduceProd")
op_mode = ReduceProd;

assert(op_mode != InvalidReduceOp);

auto input_name = nodeproto.input(0);
auto it = tensor_type.find(input_name);
if (it != tensor_type.end()){
input_type = it->second;
}else{
throw std::runtime_error("TMVA::SOFIE ONNX Parser Reduce op has input tensor" + input_name + " but its type is not yet registered");
}

std::unique_ptr<ROperator> op;
int attr_keepdims = 1;
int attr_axis = 1;
for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
std::string attribute_name = nodeproto.attribute(i).name();
if (attribute_name == "keepdims")
attr_keepdims = nodeproto.attribute(i).i();
if(attribute_name == "axis")
attr_axis = nodeproto.attribute(i).i();
}
switch(input_type){
case ETensorType::FLOAT:
op.reset(new ROperator_Reduce<float,Op1>(attr_keepdims,attr_axis,nodeproto.input(0), nodeproto.output(0)));
break;
default:
throw std::runtime_error("TMVA::SOFIE - Unsupported - Reduce Operator does not yet support input type " + std::to_string(static_cast<int>(input_type)));
}

ETensorType output_type = (op->TypeInference({input_type}))[0];
auto it2 = tensor_type.find(nodeproto.output(0));
if (it2 == tensor_type.end()){
tensor_type[nodeproto.output(0)] = output_type;
}

return op;
}

std::unique_ptr<ROperator> make_ROperator_Transpose(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map<std::string, ETensorType>& tensor_type){

ETensorType input_type;
Expand Down