Skip to content

Commit aeaf2f4

Browse files
committed
Reduce Operator Initial Code pushed
1 parent f1014bf commit aeaf2f4

File tree

6 files changed

+221
-0
lines changed

6 files changed

+221
-0
lines changed
Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
#ifndef TMVA_SOFIE_ROPERATOR_Reduce
2+
#define TMVA_SOFIE_ROPERATOR_Reduce
3+
4+
#include "TMVA/SOFIE_common.hxx"
5+
#include "TMVA/ROperator.hxx"
6+
#include "TMVA/RModel.hxx"
7+
8+
#include <memory>
9+
#include <sstream>
10+
#include <algorithm>
11+
#include <stdexcept>
12+
#include <vector>
13+
#include <cassert>
14+
15+
namespace TMVA{
16+
namespace Experimental{
17+
namespace SOFIE{
18+
19+
enum ReduceOpMode { ReduceMean, ReduceSumsquare, ReduceProd };
20+
21+
template <typename T, ReduceOpMode Op1>
22+
struct ReduceOperatorTrait {
23+
const char *Name() { return ""; }
24+
};
25+
template <typename T>
26+
struct ReduceOperatorTrait <T, ReduceMean> {
27+
static const char *Name() { return "ReduceMean"; }
28+
};
29+
30+
template <typename T>
31+
struct ReduceOperatorTrait <T, ReduceProd> {
32+
static const char *Name() { return "ReduceProd"; }
33+
};
34+
35+
template <typename T>
36+
struct ReduceOperatorTrait <T, ReduceSumsquare> {
37+
static const char *Name() { return "ReduceSumsquare"; }
38+
};
39+
40+
template <typename T, ReduceOpMode Op>
41+
class ROperator_Reduce final : public ROperator
42+
{
43+
private:
44+
/* Attributes*/
45+
int fAxis = 1;
46+
ReduceOpMode fReduceMode;
47+
int fkeepdims = 1; //default value
48+
std::string fNX;
49+
std::string fNY;
50+
std::vector<size_t> fShapeX;
51+
std::vector<size_t> fShapeY;
52+
53+
public:
54+
55+
ROperator_Reduce(){}
56+
ROperator_Reduce(int keepdims,int axis,std::string nameX, std::string nameY):
57+
fkeepdims(keepdims), fAxis(axis), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {}
58+
59+
// type of output given input
60+
std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
61+
return input;
62+
}
63+
64+
// shape of output tensors given input tensors
65+
std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
66+
// assume now inputs have same shape (no broadcasting)
67+
auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
68+
return ret;
69+
}
70+
void Initialize(RModel& model){
71+
72+
fUseSession = model.UseSession();
73+
74+
if (model.CheckIfTensorAlreadyExist(fNX) == false){ //input must be a graph input, or already initialized intermediate tensor
75+
throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
76+
}
77+
fShapeX = model.GetTensorShape(fNX);
78+
// find shape of Y and add it in the list of intermediate tensors
79+
fShapeY = ShapeInference({fShapeX})[0];
80+
model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
81+
}
82+
83+
std::string Generate(std::string OpName){
84+
OpName = "op_" + OpName;
85+
if (fShapeX.empty() || fShapeY.empty()) {
86+
throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
87+
}
88+
89+
size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);
90+
91+
auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
92+
auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
93+
94+
size_t dim = fShapeY.size();
95+
std::vector<size_t> idx(dim);
96+
97+
std::stringstream out;
98+
for (size_t i = 0; i < outputLength; i++) {
99+
100+
if (dim == 2) {
101+
idx[0] = i / outputStrides[0];
102+
idx[1] = i % outputStrides[0];
103+
}
104+
if (dim == 3) {
105+
idx[0] = i / outputStrides[0];
106+
idx[1] = (i % outputStrides[0]) / outputStrides[1];
107+
idx[2] = (i % outputStrides[0]) % outputStrides[1];
108+
}
109+
if (dim == 4) {
110+
idx[0] = i / outputStrides[0];
111+
idx[1] = (i % outputStrides[0]) / outputStrides[1];
112+
idx[2] = ((i % outputStrides[0]) % outputStrides[1]) / outputStrides[2];
113+
idx[3] = ((i % outputStrides[0]) % outputStrides[1]) % outputStrides[2];
114+
}
115+
116+
assert(idx[fAxis] == 0); // we can avoid computing this for the reduction axis which by definition is always zero
117+
118+
out << SP << "float sum = 0;\n";
119+
// float sum = 0;
120+
for (size_t k = 0; k < fShapeX[fAxis]; k++) {
121+
idx[fAxis] = k;
122+
// compute input index j
123+
size_t j = 0;
124+
if (dim == 2) j = idx[0]*inputStrides[0] + idx[1];
125+
if (dim == 3) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2];
126+
if (dim == 4) j = idx[0]*inputStrides[0] + idx[1]* inputStrides[1] + idx[2]*inputStrides[2] + idx[3];
127+
128+
out << SP << SP << "sum += tensor_" << fNX[j] << ";\n";
129+
}
130+
out << SP << "float average = sum/float(" << fShapeX[fAxis] << ")\n;";
131+
out << SP << "tensor_" << fNY[i] << " = average;\n";
132+
}
133+
return out.str();
134+
}
135+
136+
};
137+
138+
}//SOFIE
139+
}//Experimental
140+
}//TMVA
141+
142+
143+
#endif //TMVA_SOFIE_ROPERATOR_Reduce

tmva/sofie/test/TestCustomModelsFromONNX.cxx

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,12 @@
2727
#include "Div_FromONNX.hxx"
2828
#include "input_models/references/Div.ref.hxx"
2929

30+
<<<<<<< HEAD
31+
=======
32+
#include "ReduceMean_FromONNX.hxx"
33+
#include "input_models/references/ReduceMean.ref.hxx"
34+
35+
>>>>>>> ad9c050ba9... Made changes in the Reduce Operator
3036
#include "LinearWithLeakyRelu_FromONNX.hxx"
3137
#include "input_models/references/LinearWithLeakyRelu.ref.hxx"
3238

@@ -643,6 +649,30 @@ TEST(ONNX, AvgPool){
643649

644650
}
645651

652+
TEST(ONNX, ReduceMean){
653+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
654+
655+
// Preparing the standard input
656+
std::vector<float> input({
657+
5, 2, 3,
658+
5, 5, 4
659+
});
660+
661+
TMVA_SOFIE_ReduceMean::Session s("ReduceMean_FromONNX.dat");
662+
std::vector<float> output = s.infer(input.data());
663+
// Checking output size
664+
EXPECT_EQ(output.size(), sizeof(ReduceMean_ExpectedOutput::output) / sizeof(float));
665+
666+
float *correct = ReduceMean_ExpectedOutput::output;
667+
668+
// Checking every output value, one by one
669+
for (size_t i = 0; i < output.size(); ++i) {
670+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
671+
}
672+
673+
}
674+
675+
646676
TEST(ONNX, RNNBatchwise)
647677
{
648678
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
167 Bytes
Binary file not shown.
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
namespace Reduce_mean_ExpectedOutput{
2+
float output[] = {
3+
4
4+
};
5+
} // namespace Reduce_mean_ExpectedOutput

tmva/sofie_parsers/inc/TMVA/RModelParser_ONNX.hxx

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,9 @@ const factoryMethodMap mapOptypeOperator = {
6565
{"Sub", &make_ROperator_BasicBinary<Sub>},
6666
{"Mul", &make_ROperator_BasicBinary<Mul>},
6767
{"Div", &make_ROperator_BasicBinary<Div>},
68+
{"ReduceMean", &make_ROperator_Reduce<ReduceMean>},
69+
{"ReduceSumsquare", &make_ROperator_Reduce<ReduceSumsquare>},
70+
{"ReduceProd", &make_ROperator_Reduce<ReduceProd>},
6871
{"Reshape", &make_ROperator_Reshape},
6972
{"Flatten", &make_ROperator_Reshape},
7073
{"Slice", &make_ROperator_Slice},

tmva/sofie_parsers/src/RModelParser_ONNX.cxx

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,46 @@ std::unique_ptr<ROperator> make_ROperator_BasicBinary(const onnx::NodeProto& nod
8484
return op;
8585
}
8686

87+
template<ReduceOpMode Op1>
88+
std::unique_ptr<ROperator> make_ROperator_Reduce(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map<std::string, ETensorType>& tensor_type){
89+
90+
ETensorType input_type;
91+
92+
auto input_name = nodeproto.input(0);
93+
auto it = tensor_type.find(input_name);
94+
if (it != tensor_type.end()){
95+
input_type = it->second;
96+
}else{
97+
throw std::runtime_error("TMVA::SOFIE ONNX Parser Reduce op has input tensor" + input_name + " but its type is not yet registered");
98+
}
99+
100+
std::unique_ptr<ROperator> op;
101+
int attr_keepdims = 1;
102+
int attr_axis = 1;
103+
for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
104+
std::string attribute_name = nodeproto.attribute(i).name();
105+
if (attribute_name == "keepdims")
106+
attr_keepdims = nodeproto.attribute(i).i();
107+
if(attribute_name == "axis")
108+
attr_axis = nodeproto.attribute(i).i();
109+
}
110+
switch(input_type){
111+
case ETensorType::FLOAT:
112+
op.reset(new ROperator_Reduce<float,Op1>(attr_keepdims,attr_axis,nodeproto.input(0), nodeproto.output(0)));
113+
break;
114+
default:
115+
throw std::runtime_error("TMVA::SOFIE - Unsupported - Reduce Operator does not yet support input type " + std::to_string(static_cast<int>(input_type)));
116+
}
117+
118+
ETensorType output_type = (op->TypeInference({input_type}))[0];
119+
auto it2 = tensor_type.find(nodeproto.output(0));
120+
if (it2 == tensor_type.end()){
121+
tensor_type[nodeproto.output(0)] = output_type;
122+
}
123+
124+
return op;
125+
}
126+
87127
std::unique_ptr<ROperator> make_ROperator_Transpose(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map<std::string, ETensorType>& tensor_type){
88128

89129
ETensorType input_type;

0 commit comments

Comments
 (0)