Skip to content

Commit 495a50c

Browse files
committed
[tmva][sofie] Fix sume bugs in handling dynamic shapes
Fix some bugs in dealing with the dynamic shapes. Parathesis must be correctly added Fix also a bug in avoid two times broadcasting of constant input tensors in the Baniry operator Remove the commented code as suggested by Sanjiban's review
1 parent 979189c commit 495a50c

File tree

4 files changed

+14
-22
lines changed

4 files changed

+14
-22
lines changed

tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ public:
141141
// Update the data and the shape of A
142142
model.AddConstantTensor(fNBroadcastedA, model.GetTensorType(fNA), fShapeY, broadcastedData);
143143
fShapeA = fShapeY;
144+
fDimShapeA = ConvertShapeToDim(fShapeA);
144145
} else {
145146
// Add an intermediate tensor for broadcasting A
146147
model.AddIntermediateTensor(fNBroadcastedA, model.GetTensorType(fNA), fShapeY);
@@ -166,6 +167,7 @@ public:
166167
<< std::endl;
167168
model.AddConstantTensor(fNBroadcastedB, model.GetTensorType(fNB), fShapeY, broadcastedData);
168169
fShapeB = fShapeY;
170+
fDimShapeB = ConvertShapeToDim(fShapeB);
169171
} else {
170172
// Add an intermediate tensor for broadcasting B
171173
model.AddIntermediateTensor(fNBroadcastedB, model.GetTensorType(fNB), fShapeY);
@@ -314,14 +316,15 @@ public:
314316
}
315317
// Broadcast A if it's uninitialized
316318
// use broadcasting function where we pass an already allocated tensor to minimize memory allocations
317-
if (!fNBroadcastedA.empty()) {
319+
// in case of A is constant tensor is automatically broadcasted in Initialize() then shapeA == shapeY
320+
if (!fNBroadcastedA.empty() && (fDimShapeA != fDimShapeY)) {
318321
out << SP << SP << "// Broadcasting uninitialized tensor " << fNA << "\n";
319322
out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << typeName << ">(tensor_" << fNA << ", "
320323
<< ConvertDimShapeToString(fDimShapeA) << ", " << ConvertDimShapeToString(fDimShapeY)
321324
<< ", fTensor_" << fNBroadcastedA << ");\n";
322325
}
323326
// Broadcast B if it's uninitialized
324-
if (!fNBroadcastedB.empty()) {
327+
if (!fNBroadcastedB.empty() && (fDimShapeB != fDimShapeY)) {
325328
out << SP << SP << "// Broadcasting uninitialized tensor " << fNB << "\n";
326329
out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << typeName << ">(tensor_" << fNB << ", "
327330
<< ConvertDimShapeToString(fDimShapeB) << ", " << ConvertDimShapeToString(fDimShapeY)

tmva/sofie/inc/TMVA/ROperator_Concat.hxx

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -150,25 +150,13 @@
150150
}
151151

152152
}
153+
// add parenthesis in case is an expression
154+
if (concat_dim.isParam && concat_dim.dim == static_cast<size_t>(-1))
155+
concat_dim = Dim{ std::string("(") + concat_dim.GetVal() + std::string(")"), concat_dim.dim };
153156
}
154157

155158
// output shape for concatenated axis
156159
ret[fAxis] = Dim{concat_dim};
157-
// //ret[0] = inputs[0];
158-
// // check if concat_dim is an integer
159-
// // case like "2+n" can be converted to an integer so need to check the length
160-
// size_t pos = 0;
161-
// try {
162-
// i_concat_dim = std::stoi(concat_dim, &pos);
163-
// if (pos == concat_dim.length())
164-
// ret[fAxis] = Dim{i_concat_dim}; // dimension is integer
165-
// else {
166-
// // check if a composite expression
167-
// ret[fAxis] = Dim{concat_dim};
168-
// }
169-
// catch (std::invalid_argument const& ex) {
170-
171-
// }
172160

173161
}
174162
// case of stacking (not supported yet)
@@ -241,7 +229,7 @@
241229
for (size_t i = 0; i < inputData.size(); i++)
242230
inputData[i] = Dim{ static_cast<size_t>(intData[i])};
243231
}
244-
std::cout << "concatanating input data " << inputLength << " " << inputData[0] << std::endl;
232+
std::cout << "concatenating input data " << inputLength << " " << inputData[0] << std::endl;
245233
std::copy(inputData.begin(), inputData.end(), outputData.begin() + offset );
246234
offset += inputLength;
247235
}

tmva/sofie/inc/TMVA/ROperator_Reshape.hxx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ public:
121121
output_shape[i] = Dim{std::string("(") + ConvertDimShapeToLength(reduced_input) + ")", static_cast<size_t>(-1)};
122122
}
123123
if (!canSimplify)
124-
output_shape[i] = Dim{std::string("(") + input_length + " / (" + tmp_length + ")", static_cast<size_t>(-1)};
124+
output_shape[i] = Dim{std::string("(") + input_length + " / (" + tmp_length + "))", static_cast<size_t>(-1)};
125125
}
126126

127127
break; // cannot have more than -1

tmva/sofie/inc/TMVA/ROperator_Transpose.hxx

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,8 @@ public:
147147
//......
148148
// and we have j_k = i_fAttrPerm[k]
149149
// since we are using consecutive writes we should find the inverse of fAttrPerm
150-
out << SP << "///------- Transpose operator\n" << std::endl;
150+
out << SP << "///------- Transpose operator " << OpName << ConvertDimShapeToString(fShapeData)
151+
<< " --> " << ConvertDimShapeToString(fShapeOutput) << std::endl;
151152
out << SP << "for (size_t id = 0; id < " << length << " ; id++){\n";
152153
out << SP << SP << "tensor_" << fNOutput << "[id] = tensor_" << fNData << "[ ";
153154
// compute output j indices
@@ -156,9 +157,9 @@ public:
156157
if (k == 0)
157158
i_out[k] = "id";
158159
else
159-
i_out[k] = "(id % " + outStrides[k-1].GetVal() + ")";
160+
i_out[k] = "(id % (" + outStrides[k-1].GetVal() + "))";
160161
if (k < dim-1)
161-
i_out[k] += " / " + outStrides[k].GetVal();
162+
i_out[k] += " / (" + outStrides[k].GetVal() + ")";
162163
}
163164
// use now them for input tensors
164165
// need to invert the fAttrPerm[k]

0 commit comments

Comments
 (0)