@@ -135,15 +135,14 @@ class TransposeConvStridedConverter
135135 int64_t inputChannels = weightTy.getDimSize (3 );
136136
137137 // Pad the weight so that it is modulo of the striding.
138- llvm::SmallVector<int32_t , 8 > weightPadding = {0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 };
138+ llvm::SmallVector<int64_t , 8 > weightPadding = {0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 };
139139 weightPadding[3 ] =
140140 (weightHeight % stride[0 ]) ? (stride[0 ] - weightHeight % stride[0 ]) : 0 ;
141141 weightPadding[5 ] =
142- (weightWidth % stride[1 ]) ? (stride[1 ] - weightWidth % stride[1 ]) : 0 ;
143- DenseElementsAttr weightPaddingAttr = DenseIntElementsAttr::get (
144- RankedTensorType::get ({8 }, rewriter.getI32Type ()), weightPadding);
145- Value weightPaddingVal = CreateOpAndInferShape<tosa::ConstOp>(
146- rewriter, loc, weightPaddingAttr.getType (), weightPaddingAttr);
142+ weightWidth % stride[1 ] ? stride[1 ] - weightWidth % stride[1 ] : 0 ;
143+
144+ Value weightPaddingVal =
145+ getTosaConstShape (rewriter, op->getLoc (), weightPadding);
147146
148147 if (op.getQuantizationInfo ().has_value ()) {
149148 auto quantInfo = op.getQuantizationInfo ().value ();
@@ -197,17 +196,14 @@ class TransposeConvStridedConverter
197196 /* axis = */ rewriter.getI32IntegerAttr (2 ));
198197
199198 // We need to pad the input far enough that we can pull all values.
200- llvm::SmallVector<int32_t , 8 > inputPadding = {0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 };
199+ llvm::SmallVector<int64_t , 8 > inputPadding = {0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 };
201200 inputPadding[2 ] += restridedWeightTy.getDimSize (1 ) - 1 ;
202201 inputPadding[3 ] += restridedWeightTy.getDimSize (1 ) - 1 ;
203202 inputPadding[4 ] += restridedWeightTy.getDimSize (2 ) - 1 ;
204203 inputPadding[5 ] += restridedWeightTy.getDimSize (2 ) - 1 ;
205204
206- DenseElementsAttr inputPaddingAttr = DenseIntElementsAttr::get (
207- RankedTensorType::get ({8 }, rewriter.getI32Type ()), inputPadding);
208-
209- Value inputPaddingVal = CreateOpAndInferShape<tosa::ConstOp>(
210- rewriter, loc, inputPaddingAttr.getType (), inputPaddingAttr);
205+ Value inputPaddingVal =
206+ getTosaConstShape (rewriter, op->getLoc (), inputPadding);
211207
212208 if (op.getQuantizationInfo ().has_value ()) {
213209 auto quantInfo = op.getQuantizationInfo ().value ();
@@ -310,17 +306,14 @@ class TransposeConvStridedConverter
310306 rewriter.getDenseI64ArrayAttr (sliceSize))
311307 .getResult ();
312308
313- llvm::SmallVector<int32_t , 8 > resultPadding = {0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 };
309+ llvm::SmallVector<int64_t , 8 > resultPadding = {0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 };
314310 resultPadding[2 ] = resultPadTop;
315311 resultPadding[3 ] = resultTy.getDimSize (1 ) - resultPadTop - sliceSize[1 ];
316312 resultPadding[4 ] = resultPadLeft;
317313 resultPadding[5 ] = resultTy.getDimSize (2 ) - resultPadLeft - sliceSize[2 ];
318314
319- DenseElementsAttr resultPaddingAttr = DenseIntElementsAttr::get (
320- RankedTensorType::get ({8 }, rewriter.getI32Type ()), resultPadding);
321-
322- Value resultPaddingVal = CreateOpAndInferShape<tosa::ConstOp>(
323- rewriter, loc, resultPaddingAttr.getType (), resultPaddingAttr);
315+ Value resultPaddingVal =
316+ getTosaConstShape (rewriter, op->getLoc (), resultPadding);
324317
325318 Value resultPad = CreateOpAndInferShape<tosa::PadOp>(
326319 rewriter, loc, UnrankedTensorType::get (resultETy), slice,
0 commit comments