Skip to content

Commit fd69457

Browse files
committed
[tmva][sofie] Fixes for dynamic tensors
Add RModel::AddShapeParameters
1 parent f88344e commit fd69457

13 files changed

+111
-64
lines changed

tmva/sofie/inc/TMVA/RModel.hxx

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,8 @@ public:
121121
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape);
122122
// Add an intermediate dynamic tensor
123123
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape);
124-
124+
// void Add a shape parameter
125+
void AddShapeParam(const std::string & name, size_t def_value = 0);
125126
void AddInputTensorName(std::string name);
126127
void AddOutputTensorNameList(std::vector<std::string> output_tensor_names);
127128
void

tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -98,22 +98,31 @@ public:
9898
if (!model.CheckIfTensorAlreadyExist(fNB)) {
9999
throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNB + "is not found in model");
100100
}
101-
if (model.IsDynamicTensor(fNA))
101+
int dynamicInputs = 0;
102+
if (model.IsDynamicTensor(fNA)) {
102103
fDimShapeA = model.GetDynamicTensorShape(fNA);
103-
else {
104+
dynamicInputs |= 1;
105+
} else {
104106
fShapeA = model.GetTensorShape(fNA);
105107
fDimShapeA = ConvertShapeToDim(fShapeA);
106108
}
107-
if (model.IsDynamicTensor(fNB))
109+
if (model.IsDynamicTensor(fNB)) {
110+
dynamicInputs |= 2;
108111
fDimShapeB = model.GetDynamicTensorShape(fNB);
109-
else {
112+
} else {
110113
fShapeB = model.GetTensorShape(fNB);
111114
fDimShapeB = ConvertShapeToDim(fShapeB);
112115
}
116+
std::cout << BinaryOperatorTrait<T, Op>::Name() << " ";
117+
if (dynamicInputs & 1)
118+
std::cout << fNA << " is dynamic " << ConvertShapeToString(fDimShapeA) << " ";
119+
if (dynamicInputs & 2)
120+
std::cout << fNB << " is dynamic " << ConvertShapeToString(fDimShapeB) << " ";
121+
std::cout << std::endl;
113122
// check if need to broadcast at initialization time if shapes are known and different
114123
// (we could broadcast the tensor tensor to maximum values of dynamic shapes - to be done)
115124
// case of known shapes
116-
if (!fShapeA.empty() && !fShapeB.empty()) {
125+
if (dynamicInputs == 0) {
117126
auto ret = UTILITY::MultidirectionalBroadcastShape(fShapeA, fShapeB);
118127
fBroadcastFlag = ret.first;
119128
fShapeY = ret.second;
@@ -126,7 +135,7 @@ public:
126135
// Broadcast A to Y
127136
if (broadcastA) {
128137
fNBroadcastedA = "Broadcasted" + fNA + "to" + fNY;
129-
if (model.IsInitializedTensor(fNA)) {
138+
if (model.IsConstantTensor(fNA)) {
130139
auto data = model.GetInitializedTensorData(fNA);
131140
std::shared_ptr<void> broadcastedData(
132141
UTILITY::UnidirectionalBroadcast<T>(static_cast<T *>(data.get()), fShapeA, fShapeY),
@@ -142,7 +151,7 @@ public:
142151
// Broadcast B to Y
143152
if (broadcastB) {
144153
fNBroadcastedB = "Broadcasted" + fNB + "to" + fNY;
145-
if (model.IsInitializedTensor(fNB)) {
154+
if (model.IsConstantTensor(fNB)) {
146155
auto data = model.GetInitializedTensorData(fNB);
147156
if (model.Verbose())
148157
std::cout << "data B " << ConvertShapeToString(fShapeB) << " : "
@@ -168,7 +177,7 @@ public:
168177
fShapeY = fShapeA;
169178
}
170179
// check case of constant output (if all inputs are defined)
171-
if (model.IsInitializedTensor(fNA) && model.IsInitializedTensor(fNB)) {
180+
if (model.IsConstantTensor(fNA) && model.IsConstantTensor(fNB)) {
172181
const std::string &nameA = fNBroadcastedA.empty() ? fNA : fNBroadcastedA;
173182
const std::string &nameB = fNBroadcastedB.empty() ? fNB : fNBroadcastedB;
174183
auto dataA = static_cast<T *>(model.GetInitializedTensorData(nameA).get());

tmva/sofie/inc/TMVA/ROperator_Comparision.hxx

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -180,15 +180,16 @@ public:
180180
fIsModelOutput = true;
181181
}
182182

183-
std::string Generate(std::string OpName) override {
183+
std::string Generate(std::string opName) override {
184184
if (fIsOutputConstant) return "";
185-
OpName = "op_" + OpName;
185+
opName = "op_" + opName;
186186

187187
if (fShapeY.empty()) {
188188
throw std::runtime_error("TMVA SOFIE Comparision Op called to Generate without being initialized first");
189189
}
190190
std::stringstream out;
191-
out << SP << "\n//------ " << ComparisionTrait<T,Op>::Name() << "\n";
191+
out << SP << "\n//------ " << ComparisionTrait<T,Op>::Name() << " " << opName
192+
<< " --> " << ConvertShapeToString(fShapeY) << "\n";
192193
size_t length = ConvertShapeToLength(fShapeY);
193194
// Broadcast A if it's uninitialized
194195
if (!fNBroadcastedX1.empty()) {

tmva/sofie/inc/TMVA/ROperator_Concat.hxx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -229,14 +229,14 @@
229229
}
230230
}
231231

232-
std::string Generate(std::string OpName) override {
232+
std::string Generate(std::string opName) override {
233233
if (fIsOutputConstant) return "";
234-
OpName = "op_"+OpName;
234+
opName = "op_" + opName;
235235
if(fOutputShape.empty()){
236236
throw std::runtime_error("TMVA SOFIE Concat called to Generate without being initialized first");
237237
}
238238
std::stringstream out;
239-
out<<"\n//--------- Concat\n";
239+
out<<"\n//--------- Concat " << opName << " --> " << ConvertShapeToString(fOutputShape) << "\n";
240240
// special case when memory is contiguous
241241
bool hasShapeOnes = true;
242242
for(int i = 0; i<fAxis; ++i){

tmva/sofie/inc/TMVA/ROperator_Constant.hxx

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ public:
6060
}
6161
// get output shape from input values:
6262
// can work only if input is a constant or initialized tensor (or dynamic one)
63-
if (model.IsInitializedTensor(fNX) || model.IsConstantTensor(fNX)) {
63+
if (model.IsConstantTensor(fNX)) {
6464
fIsOutputConstant = true;
6565
auto dptr = model.GetInitializedTensorData(fNX);
6666
auto input_tensor = static_cast<int64_t *>(dptr.get());
@@ -126,16 +126,18 @@ public:
126126

127127
std::string Generate(std::string opName) override {
128128
// no code to generate here. Tensor are defined in Session constructor
129+
std::stringstream out;
129130
if (fIsOutputConstant) {
130131
if (fNX.empty())
131-
return "// ---- Constant (no-op) \n";
132+
out << "// ---- Constant (no-op) " << opName << " --> " << ConvertShapeToString(fDimOutputShape) << "\n";
132133
else
133-
return "// ---- ConstantOfShape (no-op) \n";
134+
out << "// ---- ConstantOfShape (no-op) " << opName << " --> " << ConvertShapeToString(fDimOutputShape) << "\n";
135+
return out.str();
134136
}
135137
// Only ConstantOfShape might require generation code
136138
// generate constant tensor according to input
137-
std::stringstream out;
138-
out << "\n//--------- ConstantOfShape " << opName << "\n";
139+
140+
out << "\n//--------- ConstantOfShape " << opName << " --> " << ConvertShapeToString(fDimOutputShape) << "\n";
139141
// set shape values
140142
for (size_t i = 0; i < fDimOutputShape.size(); i++) {
141143
out << SP << "size_t " << fDimOutputShape[i].param << " = " << "tensor_" << fNX << "[" << i << "];\n";

tmva/sofie/inc/TMVA/ROperator_Expand.hxx

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,10 @@ public:
6464
// assume shape of input shape is known (size is 1)
6565
auto shapeOfInputShape = model.GetTensorShape(fNShape);
6666
fShapeDim.resize(shapeOfInputShape[0]);
67-
for (size_t i = 0; i < fShapeDim.size(); i++)
67+
for (size_t i = 0; i < fShapeDim.size(); i++) {
6868
fShapeDim[i] = Dim{std::string("v_") + fNShape + "_" + std::to_string(i)};
69+
model.AddShapeParam(fShapeDim[i].param);
70+
}
6971
}
7072
// Y is the common shape of fShapeX and shape
7173
std::cout << "expand - input tensor " << ConvertShapeToString(fShapeX) << std::endl;
@@ -131,14 +133,14 @@ public:
131133
return out.str();
132134
}
133135

134-
std::string Generate(std::string OpName) override {
136+
std::string Generate(std::string opName) override {
135137
if (fIsOutputConstant) return "";
136-
OpName = "op_" + OpName;
138+
opName = "op_" + opName;
137139
if (fShapeY.empty()) {
138140
throw std::runtime_error("TMVA SOFIE Expand Op called to Generate without being initialized first");
139141
}
140142
std::stringstream out;
141-
out << SP << "\n//------ Expand Op" << "\n";
143+
out << SP << "\n//------ Expand " << opName << " --> " << ConvertShapeToString(fShapeY) << "\n";
142144
// need to declare shape parameters for non initialized shapes
143145
if (!fInitializedShape) {
144146
for (size_t i = 0; i < fShapeDim.size(); i++) {

tmva/sofie/inc/TMVA/ROperator_Gather.hxx

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -128,14 +128,14 @@ public:
128128
}
129129
}
130130

131-
std::string Generate(std::string OpName) override {
131+
std::string Generate(std::string opName) override {
132132
if (fIsOutputConstant) {
133133
// no code to generate here for constant output. Tensor output is defined in Session constructor
134134
return "//---------------------------------------\n";
135135
}
136-
OpName = "op_" + OpName;
136+
opName = "op_" + opName;
137137
std::stringstream out;
138-
out << "//--------- Gather operator \n";
138+
out << "//--------- Gather " << opName << " --> " << ConvertShapeToString(fShapeY) << "\n";
139139
// The shape of the output is q + r - 1
140140
size_t r = fShapeX.size();
141141
// Indices of shape q
@@ -157,19 +157,17 @@ public:
157157
out << SP << "}\n";
158158
}
159159

160-
161160
// Fill the output Y[j_0, j_1, ..., j_{axis - 1}, i_0, i_1, ..., i_{q - 1}, j_{axis + 1}, ..., j_{r - 1}]
162161
// [0 ... axis) [axis ... axis + q) [axis + q ... q + r - 1)
163162
// iterate in [0 ... axis) [0 ... q) [axis ... r - 1)
164163
// for j_0, j_1, ..., j_{axis-1}
164+
165165
for (size_t j = 0; j < size_t(fAttrAxis); j++) {
166166
std::string index = "j_" + std::to_string(j);
167167
for (size_t k = 0; k <= j; k++) out << SP;
168168
out << "for (size_t " << index << " = 0; " << index << " < " << fShapeY[j] << "; " << index << "++) {\n";
169169
}
170170
// for i_0, i_1, ..., i_{q - 1}
171-
if (q == 0 && fAttrAxis == 0)
172-
out << SP << "{\n"; // add a scope for local variables in case is not in a for loop
173171
for (size_t i = 0; i < q; i++) {
174172
std::string index = "i_" + std::to_string(i);
175173
for (size_t k = 0; k <= i + fAttrAxis; k++) out << SP;
@@ -182,6 +180,10 @@ public:
182180
out << "for (size_t " << index << " = 0; " << index << " < " << fShapeY[q + j] << "; " << index << "++) {\n";
183181
}
184182

183+
// add a scope for local variables in case above loop are not done
184+
if (fAttrAxis == 0 && q == 0 && r <= 1)
185+
out << SP << "{ // scalar case \n";
186+
185187
// output index
186188
for (size_t k = 0; k < q + r; k++) out << SP;
187189
out << "size_t y_index = ";
@@ -200,6 +202,9 @@ public:
200202
out << "j_" << q+j;
201203
if (stridesY[q+j].dim != 1) out << " * " << stridesY[q+j];
202204
}
205+
// empty case
206+
if (fAttrAxis == 0 && q == 0 && r <= 1)
207+
out << "0";
203208
out << ";\n";
204209

205210
// input Indices
@@ -210,7 +215,11 @@ public:
210215
out << "i_" << i;
211216
if (stridesIndices[i].dim != 1) out << " * " << stridesIndices[i];
212217
}
218+
// empty case
219+
if (q == 0)
220+
out << "0";
213221
out << ";\n";
222+
214223
// K
215224
for (size_t k = 0; k < q + r; k++) out << SP;
216225
out << "size_t k = static_cast<size_t>(" << "tensor_" << fNIndices << "[i_index]" << ");\n";
@@ -239,6 +248,9 @@ public:
239248
for (size_t k = 0; k <j; k++) out << SP;
240249
out << "}\n";
241250
}
251+
// close empty scope if it was opened
252+
if (q == 0 && fAttrAxis == 0 && r <= 1)
253+
out << SP << "} // close Gather scope for scalar case \n";
242254

243255

244256
return out.str();

tmva/sofie/inc/TMVA/ROperator_Reduce.hxx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ public:
168168
reducedLength = "reducedLength_" + opName;
169169
out << SP << "size_t " << reducedLength << " = " << inputLength << " / " << outputLength << ";\n";
170170
} else {
171+
std::cout << "input " << inputLength << " output " << outputLength << " !" << std::endl;
171172
int rLength = std::stoi(inputLength) / std::stoi(outputLength);
172173
reducedLength = std::to_string(rLength);
173174
}

tmva/sofie/inc/TMVA/ROperator_Reshape.hxx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ public:
264264
else if (fOpMode == Unsqueeze)
265265
opType = "Unsquueze";
266266

267-
out << SP << "///--------" << opType << " operator " << opName << "\n" << std::endl;
267+
out << SP << "///--------" << opType << " operator " << opName << " --> " << ConvertShapeToString(fShapeOutput) << "\n";
268268

269269
// in case of dynamic output shape we need to set the shape value from input shape tensor
270270
// and take case of the zero values

tmva/sofie/inc/TMVA/ROperator_Slice.hxx

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -171,8 +171,7 @@ public:
171171
fStart[fAxes[i]] = Dim{size_t(istart)};
172172
fEnd[fAxes[i]] = Dim{size_t(iend)};
173173
fSteps[fAxes[i]] = Dim{size_t(istep)};
174-
}
175-
else {
174+
} else {
176175
std::cout << i << " Param dim for " << fAxes[i] << " " << fShapeInput[fAxes[i]] << std::endl;
177176
// we need to correct at run time
178177
if (!itensors[0].empty()) {
@@ -196,22 +195,20 @@ public:
196195
if (!itensors[3].empty()) {
197196
fSteps[fAxes[i]] = Dim{size_t(itensors[3][i])};
198197
}
199-
// case of intermediate tensors for start/end/steps
200-
if (!fStartDims.empty()) {
201-
fStartDims[i] = Dim{std::string("start_") + fNOutput + "_" + std::to_string(i)};
202-
fStart[fAxes[i]] = fStartDims[i];
203-
}
204-
if (!fEndDims.empty()) {
205-
fEndDims[i] = Dim{std::string("end_") + fNOutput + "_" + std::to_string(i)};
206-
fEnd[fAxes[i]] = fEndDims[i];
207-
}
208-
if (!fStepDims.empty()) {
209-
fStepDims[i] = Dim{std::string("step_") + fNOutput + "_" + std::to_string(i)};
210-
fSteps[fAxes[i]] = fStepDims[i];
211-
}
212198
}
213-
214-
199+
// case of intermediate tensors for start/end/steps
200+
if (!fStartDims.empty()) {
201+
fStartDims[i] = Dim{std::string("start_") + fNOutput + "_" + std::to_string(i)};
202+
fStart[fAxes[i]] = fStartDims[i];
203+
}
204+
if (!fEndDims.empty()) {
205+
fEndDims[i] = Dim{std::string("end_") + fNOutput + "_" + std::to_string(i)};
206+
fEnd[fAxes[i]] = fEndDims[i];
207+
}
208+
if (!fStepDims.empty()) {
209+
fStepDims[i] = Dim{std::string("step_") + fNOutput + "_" + std::to_string(i)};
210+
fSteps[fAxes[i]] = fStepDims[i];
211+
}
215212

216213
}
217214
std::cout << "found output shape " << std::endl;
@@ -232,6 +229,15 @@ public:
232229
s += ")/" + fSteps[i].GetVal() + ")";
233230
}
234231
fShapeOutput[i] = Dim{s,size_t(-1)};
232+
// add also the shape parameters to RModel to declare them when
233+
// allocating output tensor
234+
if (fEnd[i].isParam && fEnd[i].dim != size_t(-1))
235+
model.AddShapeParam(fEnd[i].param,fEnd[i].dim );
236+
if (fStart[i].isParam && fStart[i].dim != size_t(-1))
237+
model.AddShapeParam(fStart[i].param,fStart[i].dim );
238+
if (fSteps[i].isParam && fSteps[i].dim != size_t(-1))
239+
model.AddShapeParam(fSteps[i].param,fSteps[i].dim );
240+
235241
}
236242
}
237243
// case input is a constant tensor and of int64 type

0 commit comments

Comments
 (0)