diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td index dceac03375606..f2328003e49c5 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td @@ -197,14 +197,6 @@ def Tosa_PadOpQuantInfoBuilder : OpBuilder< input, paddings); }]>; -def Tosa_ExplicitValuePadOpQuantInfoBuilder : OpBuilder< - (ins "Type":$outputType, "Value":$input, "Value":$paddings, - "Value":$pad_value), - [{ - buildExplicitValuePadOpWithQuantInfo($_builder, $_state, outputType, - input, paddings, pad_value); - }]>; - // Wrapper over base I32EnumAttr to set common fields. class Tosa_I32Enum cases> : I32EnumAttr { diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h index 344a54f0bb1c9..6fa4aedc1f0b0 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h @@ -168,6 +168,10 @@ namespace tosa { std::optional createZeroPointTensor(OpBuilder &builder, Location loc, Type srcElemType, int64_t zp = 0); +// Create a pad-const const tensor with value of `val` of required data-type +Value createPadConstTensor(OpBuilder &builder, Location loc, Value src, + int32_t val = 0); + } // namespace tosa } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td index 85bd3fb1bb1cc..61e030c90af09 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -1891,8 +1891,7 @@ def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> { let arguments = (ins Tosa_RankedTensor:$input1, Tosa_Shape:$padding, - Optional:$pad_const, - OptionalAttr:$input_zp + Tosa_ScalarTensor:$pad_const ); let results = (outs @@ -1904,10 +1903,8 @@ def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> { Extension<[Tosa_EXT_FP8E4M3, Tosa_EXT_FP8E5M2, Tosa_EXT_BF16]>, ]; - let builders = [Tosa_PadOpQuantInfoBuilder, - Tosa_ExplicitValuePadOpQuantInfoBuilder]; + let builders = [Tosa_PadOpQuantInfoBuilder]; - let hasCanonicalizer = 1; let hasFolder = 1; let hasVerifier = 1; } diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp index 7f029d56e2582..1473212dcfa10 100644 --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -36,10 +36,10 @@ TensorType inferReshapeInputType(TypedValue input, return input.getType(); // The input type must be cast into a tensor with the same rank and all static - // dimensions set to 1. This prevents the generation of a tensor.collapse_shape - // op that converts a dynamically shaped tensor into a 0D tensor. While such - // construct is not incorrect on its own, bufferization cannot properly handle - // it at the moment, so we avoid it. + // dimensions set to 1. This prevents the generation of a + // tensor.collapse_shape op that converts a dynamically shaped tensor into a + // 0D tensor. While such construct is not incorrect on its own, bufferization + // cannot properly handle it at the moment, so we avoid it. SmallVector shape(input.getType().getRank(), 1); return input.getType().clone(shape); } @@ -58,29 +58,31 @@ TensorType inferReshapeExpandedType(TensorType inputType, int64_t totalSize = inputIsStatic ? inputType.getNumElements() : -1; // Compute result shape - auto resultShape = llvm::map_to_vector(newShape, [&](int64_t size) -> int64_t { - // If this is not a placeholder, do not change it - if (size >= 0) - return size; - - // If we do not know the total size of the tensor, keep this dimension - // dynamic in the result shape. - if (!inputIsStatic) - return ShapedType::kDynamic; - - // Calculate the product of all elements in 'newShape' except for the -1 - // placeholder, which we discard by negating the result. - int64_t totalSizeNoPlaceholder = -std::accumulate( - newShape.begin(), newShape.end(), 1, std::multiplies()); - - // If there is a 0 component in 'newShape', resolve the placeholder as 0. - if (totalSizeNoPlaceholder == 0) - return 0; - - // Resolve the placeholder as the quotient between the total tensor size and - // the product of all other sizes. - return totalSize / totalSizeNoPlaceholder; - }); + auto resultShape = + llvm::map_to_vector(newShape, [&](int64_t size) -> int64_t { + // If this is not a placeholder, do not change it. + if (size >= 0) + return size; + + // If we do not know the total size of the tensor, keep this dimension + // dynamic in the result shape. + if (!inputIsStatic) + return ShapedType::kDynamic; + + // Calculate the product of all elements in 'newShape' except for the -1 + // placeholder, which we discard by negating the result. + int64_t totalSizeNoPlaceholder = -std::accumulate( + newShape.begin(), newShape.end(), 1, std::multiplies()); + + // If there is a 0 component in 'newShape', resolve the placeholder as + // 0. + if (totalSizeNoPlaceholder == 0) + return 0; + + // Resolve the placeholder as the quotient between the total tensor size + // and the product of all other sizes. + return totalSize / totalSizeNoPlaceholder; + }); bool resultIsStatic = !ShapedType::isDynamicShape(resultShape); @@ -108,7 +110,8 @@ TensorType inferReshapeCollapsedType(TensorType lhsType, TensorType rhsType) { if (lhsShape.empty() || rhsShape.empty()) return lhsType.clone(ArrayRef{}); - if (ShapedType::isDynamicShape(lhsShape) || ShapedType::isDynamicShape(rhsShape)) + if (ShapedType::isDynamicShape(lhsShape) || + ShapedType::isDynamicShape(rhsShape)) return lhsType.clone({ShapedType::kDynamic}); SmallVector intermediateShape; @@ -150,14 +153,16 @@ TensorType inferReshapeCollapsedType(TensorType lhsType, TensorType rhsType) { } SmallVector -createReassociationMapForCollapse(OpBuilder &builder, Type srcType, Type dstType) { +createReassociationMapForCollapse(OpBuilder &builder, Type srcType, + Type dstType) { auto srcShape = cast(srcType).getShape(); auto dstShape = cast(dstType).getShape(); if (srcShape.empty() || dstShape.empty()) return {}; - if (ShapedType::isDynamicShape(srcShape) || ShapedType::isDynamicShape(dstShape)) { + if (ShapedType::isDynamicShape(srcShape) || + ShapedType::isDynamicShape(dstShape)) { assert(dstShape.size() == 1); SmallVector exprs; for (auto i : llvm::seq(srcShape.size())) @@ -249,14 +254,16 @@ class ReshapeConverter : public OpConversionPattern { auto collapsedType = inferReshapeCollapsedType(inputType, expandedType); // Cast input if needed - auto castInput = rewriter.createOrFold(loc, inputType, input); + auto castInput = + rewriter.createOrFold(loc, inputType, input); // Emit collaspe-expand pair auto collapsed = createCollapse(rewriter, loc, collapsedType, castInput); auto expanded = createExpand(rewriter, loc, expandedType, collapsed); // Cast to final result type if needed - auto result = rewriter.createOrFold(loc, resultType, expanded); + auto result = + rewriter.createOrFold(loc, resultType, expanded); rewriter.replaceOp(reshape, result); return success(); } @@ -350,29 +357,12 @@ class PadConverter : public OpConversionPattern { } ShapedType inputTy = cast(input.getType()); - Type elementTy = inputTy.getElementType(); int64_t rank = inputTy.getRank(); // Setup the default constantAttr. - Value padConstant; - - if (padOp.getPadConst()) { - padConstant = rewriter.createOrFold( - loc, padOp.getPadConst(), ValueRange({})); - } else { - TypedAttr constantAttr; - if (isa(elementTy)) { - constantAttr = rewriter.getFloatAttr(elementTy, 0.0); - } else if (isa(elementTy) && !padOp.getInputZpAttr()) { - constantAttr = rewriter.getIntegerAttr(elementTy, 0); - } else if (isa(elementTy) && padOp.getInputZpAttr()) { - int64_t value = padOp.getInputZpAttr().getInt(); - constantAttr = rewriter.getIntegerAttr(elementTy, value); - } - if (constantAttr) - padConstant = rewriter.create(loc, constantAttr); - } + Value padConstant = rewriter.createOrFold( + loc, padOp.getPadConst(), ValueRange({})); if (!padConstant) { return rewriter.notifyMatchFailure( diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp index 363b5958bc0fd..2c0376134b599 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -175,53 +175,6 @@ void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } -struct MaterializePadValue : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult matchAndRewrite(tosa::PadOp op, - PatternRewriter &rewriter) const override { - if (op.getPadConst()) - return failure(); - - auto input = op.getInput1(); - auto padding = op.getPadding(); - - ShapedType inputTy = llvm::cast(input.getType()); - Type elementTy = inputTy.getElementType(); - - Attribute constantAttr; - if (llvm::isa(elementTy)) { - constantAttr = rewriter.getFloatAttr(elementTy, 0.0); - } else if (llvm::isa(elementTy) && !op.getInputZpAttr()) { - constantAttr = rewriter.getIntegerAttr(elementTy, 0); - } else if (llvm::isa(elementTy) && op.getInputZpAttr()) { - int64_t value = op.getInputZpAttr().getInt(); - constantAttr = rewriter.getIntegerAttr(elementTy, value); - } - - if (!constantAttr) { - return rewriter.notifyMatchFailure( - op, - "tosa.pad to linalg lowering encountered an unknown element type"); - } - - auto denseAttr = DenseElementsAttr::get( - RankedTensorType::get({1}, elementTy), constantAttr); - auto constantVal = rewriter.create( - op.getLoc(), denseAttr.getType(), denseAttr); - - rewriter.replaceOpWithNewOp( - op, op.getType(), ValueRange{input, padding, constantVal}, - op->getAttrs()); - return success(); - } -}; - -void PadOp::getCanonicalizationPatterns(RewritePatternSet &results, - MLIRContext *context) { - results.add(context); -} - struct MaxPool2dIsNoOp : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 1050f3f30fe98..a21f329380561 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -214,6 +214,22 @@ void mlir::tosa::printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, } } +// Create a pad-const const tensor with value of `val` of required data-type +Value mlir::tosa::createPadConstTensor(OpBuilder &builder, Location loc, + Value src, int32_t val) { + const auto srcType = getElementTypeOrSelf(src); + const auto srcElemType = getElementTypeOrSelf(src); + const auto padConstType = mlir::RankedTensorType::get({1}, srcType); + const auto padConstEType = mlir::RankedTensorType::get({1}, srcElemType); + const auto padConstAttr{ + llvm::isa(srcElemType) + ? DenseElementsAttr::get(padConstEType, + builder.getFloatAttr(srcElemType, val)) + : DenseElementsAttr::get(padConstEType, + builder.getIntegerAttr(srcElemType, val))}; + return builder.create(loc, padConstType, padConstAttr); +} + //===----------------------------------------------------------------------===// // Tosa utilities. //===----------------------------------------------------------------------===// @@ -679,30 +695,14 @@ static void buildUnaryOpWithQuantInfo(OpBuilder &builder, static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings) { - result.addOperands({input, paddings}); - auto quantAttr = buildPadOpQuantizationAttr(builder, input); + const Location loc{result.location}; + int32_t zp{0}; + const auto quantAttr = buildPadOpQuantizationAttr(builder, input); if (quantAttr) { - result.addAttribute("input_zp", - builder.getI32IntegerAttr( - static_cast(quantAttr.getInputZp()))); - } - result.types.push_back(outputType); -} - -/// This builder is called on TOSA pad operator when an explicit pad_const -/// value is passed in. It also optionally constructs quantization_attr. -static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, - OperationState &result, - Type outputType, Value input, - Value paddings, - Value padConst) { - result.addOperands({input, paddings, padConst}); - auto quantAttr = buildPadOpQuantizationAttr(builder, input); - if (quantAttr) { - result.addAttribute("input_zp", - builder.getI32IntegerAttr( - static_cast(quantAttr.getInputZp()))); + zp = static_cast(quantAttr.getInputZp()); } + const auto padConstOp{createPadConstTensor(builder, loc, input, zp)}; + result.addOperands({input, paddings, padConstOp}); result.types.push_back(outputType); } diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp index 83bdbce5d1857..70c6ab23ffd54 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -148,16 +148,16 @@ class TransposeConvStridedConverter return rewriter.notifyMatchFailure( op, "zero point must be zero for non-int8 integer types"); - if (weightZpVal != 0) { - weight = CreateOpAndInferShape( - rewriter, loc, UnrankedTensorType::get(weightETy), weight, - weightPaddingVal, nullptr, rewriter.getI32IntegerAttr(weightZpVal)); - - } else { - weight = CreateOpAndInferShape( - rewriter, loc, UnrankedTensorType::get(weightETy), weight, - weightPaddingVal); - } + // construct pad_const values from zp values + ImplicitLocOpBuilder builder(op->getLoc(), rewriter); + const Value inputPadConst = + createPadConstTensor(builder, op->getLoc(), input, inputZpVal); + const Value weightPadConst = + createPadConstTensor(builder, op->getLoc(), input, weightZpVal); + + weight = CreateOpAndInferShape( + rewriter, loc, UnrankedTensorType::get(weightETy), weight, + weightPaddingVal, weightPadConst); weightTy = cast(weight.getType()); weightHeight = weightTy.getDimSize(1); @@ -169,7 +169,6 @@ class TransposeConvStridedConverter stride[0], weightWidth / stride[1], stride[1], inputChannels}; - ImplicitLocOpBuilder builder(op->getLoc(), rewriter); weight = CreateOpAndInferShape( builder, UnrankedTensorType::get(weightETy), weight, getTosaConstShape(rewriter, loc, weightReshapeDims0)); @@ -206,15 +205,9 @@ class TransposeConvStridedConverter Value inputPaddingVal = getTosaConstShape(rewriter, op->getLoc(), inputPadding); - if (inputZpVal != 0) { - input = CreateOpAndInferShape( - rewriter, loc, UnrankedTensorType::get(inputETy), input, - inputPaddingVal, nullptr, rewriter.getI32IntegerAttr(inputZpVal)); - } else { - input = CreateOpAndInferShape( - rewriter, loc, UnrankedTensorType::get(inputETy), input, - inputPaddingVal); - } + input = CreateOpAndInferShape( + rewriter, loc, UnrankedTensorType::get(inputETy), input, + inputPaddingVal, inputPadConst); // We use a zero bias as we need to broadcast the bias. auto zeroBias = rewriter.create( diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir index 6b7f622d3303f..c7a689f5a9ae9 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -498,35 +498,38 @@ func.func @slice_dyn(%arg0: tensor) -> (tensor) { // CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]: func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) { %0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index // CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index // CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index - // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32 + // CHECK-DAG: [[CST:%.+]] = arith.constant 3.140000e+00 : f32 // CHECK: tensor.pad %[[ARG0]] low{{\[}}[[INDEX1]], [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor<4x9xf32> - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, !tosa.shape<4>) -> (tensor<4x9xf32>) + %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> (tensor<4x9xf32>) return %1 : tensor<4x9xf32> } // ----- func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) { %0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK: [[CST:%.+]] = arith.constant 0 : i32 + %pad_const = "tosa.const"() {value = dense<3> : tensor<1xi32>} : () -> tensor<1xi32> + // CHECK: [[CST:%.+]] = arith.constant 3 : i32 // CHECK: tensor.pad // CHECK: tensor.yield [[CST]] - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xi32>, !tosa.shape<4>) -> (tensor<4x9xi32>) + %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<1x2xi32>, !tosa.shape<4>, tensor<1xi32>) -> (tensor<4x9xi32>) return %1 : tensor<4x9xi32> } // ----- func.func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) { %0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK: [[CST:%.+]] = arith.constant 42 : i32 + %pad_const = "tosa.const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32> + // CHECK: [[CST:%.+]] = arith.constant 0 : i32 // CHECK: tensor.pad // CHECK: tensor.yield [[CST]] - %1 = "tosa.pad"(%arg0, %0) {input_zp = 42 : i32} : (tensor<1x2xi32>, !tosa.shape<4>) -> (tensor<4x9xi32>) + %1 = "tosa.pad"(%arg0, %0, %pad_const) {input_zp = 42 : i32} : (tensor<1x2xi32>, !tosa.shape<4>, tensor<1xi32>) -> (tensor<4x9xi32>) return %1 : tensor<4x9xi32> } @@ -551,30 +554,32 @@ func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) { func.func @pad_dyn_input(%arg0 : tensor) -> (tensor) { %0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index // CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index // CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index - // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32 + // CHECK-DAG: [[CST:%.+]] = arith.constant 3.140000e+00 : f32 // CHECK: tensor.pad %[[ARG0]] low{{\[}}[[INDEX1]], [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor to tensor - %1 = "tosa.pad"(%arg0, %0) : (tensor, !tosa.shape<4>) -> (tensor) + %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor, !tosa.shape<4>, tensor<1xf32>) -> (tensor) return %1 : tensor } // ----- func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor) { %0 = tosa.const_shape {value = dense<[-1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // CHECK-DAG: [[INDEX1:%.+]] = arith.constant -1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index // CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index // CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index - // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32 + // CHECK-DAG: [[CST:%.+]] = arith.constant 3.140000e+00 : f32 // CHECK: tensor.pad %[[ARG0]] low{{\[}}[[INDEX1]], [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, !tosa.shape<4>) -> (tensor) + %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> (tensor) return %1 : tensor } diff --git a/mlir/test/Dialect/Tosa/availability.mlir b/mlir/test/Dialect/Tosa/availability.mlir index 98290c7b9eedd..e351148705644 100644 --- a/mlir/test/Dialect/Tosa/availability.mlir +++ b/mlir/test/Dialect/Tosa/availability.mlir @@ -512,9 +512,10 @@ func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) - // CHECK-LABEL: pad func.func @test_pad(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %padding = tosa.const_shape {value = dense<0> : tensor<6xindex>} : () -> !tosa.shape<6> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // CHECK: profiles: [ [pro_int, pro_fp] ] // CHECK: extensions: [ [fp8e4m3, fp8e5m2, bf16] ] - %0 = tosa.pad %arg0, %padding : (tensor<13x21x3xf32>, !tosa.shape<6>) -> tensor<13x21x3xf32> + %0 = tosa.pad %arg0, %padding, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir index a0184e2d82704..87f766916485a 100644 --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -258,7 +258,8 @@ func.func @max_pool2d_is_noop(%arg0: tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf3 func.func @pad_noop(%arg0: tensor) -> tensor { // CHECK: return %arg0 %0 = tosa.const_shape { value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> - %1 = tosa.pad %arg0, %0 : (tensor, !tosa.shape<4>) -> tensor + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> + %1 = tosa.pad %arg0, %0, %pad_const : (tensor, !tosa.shape<4>, tensor<1xf32>) -> tensor return %1 : tensor } @@ -269,7 +270,8 @@ func.func @pad_noop_padding_mismatch_nofold(%arg0: tensor) -> tensor : tensor<4xindex>} : () -> !tosa.shape<4> - %1 = tosa.pad %arg0, %shape : (tensor, !tosa.shape<4>) -> tensor + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> + %1 = tosa.pad %arg0, %shape, %pad_const : (tensor, !tosa.shape<4>, tensor<1xf32>) -> tensor return %1 : tensor } @@ -280,7 +282,8 @@ func.func @pad_noop_type_mismatch_nofold(%arg0: tensor<10xf32>) -> tensor // CHECK: %[[PAD:.+]] = tosa.pad // CHECK: return %[[PAD]] %shape = tosa.const_shape { value = dense<[1, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> - %0 = tosa.pad %arg0, %shape : (tensor<10xf32>, !tosa.shape<2>) -> tensor + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> + %0 = tosa.pad %arg0, %shape, %pad_const : (tensor<10xf32>, !tosa.shape<2>, tensor<1xf32>) -> tensor return %0 : tensor } @@ -291,8 +294,9 @@ func.func @pad_determine_val_i32(%arg0: tensor, %arg1 : tensor<2x2xi32> // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<1xi32>} // CHECK-DAG: %[[PADDING:.+]] = tosa.const_shape {value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> // CHECK: tosa.pad %arg0, %[[PADDING]], %[[ZERO]] + %pad_const = "tosa.const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32> %0 = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> - %1 = tosa.pad %arg0, %0 : (tensor, !tosa.shape<4>) -> tensor + %1 = tosa.pad %arg0, %0, %pad_const : (tensor, !tosa.shape<4>, tensor<1xi32>) -> tensor return %1 : tensor } @@ -300,11 +304,12 @@ func.func @pad_determine_val_i32(%arg0: tensor, %arg1 : tensor<2x2xi32> // CHECK-LABEL: @pad_determine_val_f32 func.func @pad_determine_val_f32(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { - // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<1xf32>} + // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<3.140000e+00> : tensor<1xf32>} // CHECK-DAG: %[[PADDING:.+]] = tosa.const_shape {value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> // CHECK: tosa.pad %arg0, %[[PADDING]], %[[ZERO]] + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> %0 = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> - %1 = tosa.pad %arg0, %0 : (tensor, !tosa.shape<4>) -> tensor + %1 = tosa.pad %arg0, %0, %pad_const : (tensor, !tosa.shape<4>, tensor<1xf32>) -> tensor return %1 : tensor } @@ -312,11 +317,12 @@ func.func @pad_determine_val_f32(%arg0: tensor, %arg1 : tensor<2x2xi32> // CHECK-LABEL: @pad_determine_val_quant func.func @pad_determine_val_quant(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { - // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi32>} + // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<3> : tensor<1xi32>} // CHECK-DAG: %[[PADDING:.+]] = tosa.const_shape {value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> // CHECK: tosa.pad %arg0, %[[PADDING]], %[[ZERO]] + %pad_const = "tosa.const"() {value = dense<3> : tensor<1xi32>} : () -> tensor<1xi32> %0 = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> - %1 = tosa.pad %arg0, %0 {input_zp = 42 : i32} : (tensor, !tosa.shape<4>) -> tensor + %1 = tosa.pad %arg0, %0, %pad_const {input_zp = 42 : i32} : (tensor, !tosa.shape<4>, tensor<1xi32>) -> tensor return %1 : tensor } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index dc556f7486774..594cb12474b3b 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -211,8 +211,9 @@ func.func @test_concat_element_type_mismatch(%arg0 : tensor<1x2xf32>, %arg1 : te // ----- func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: !tosa.shape<6>) -> tensor<13x21x3xf32> { + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // expected-error@+1 {{'tosa.pad' op shape operand is not compile time resolvable}} - %0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, !tosa.shape<6>) -> tensor<13x21x3xf32> + %0 = tosa.pad %arg0, %arg1, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } @@ -228,18 +229,19 @@ func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<1xi8>) -> // ----- func.func @test_pad_io_rank_mismatch(%arg0: tensor<13x21xf32>) { - %padding = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> + %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // expected-error@+1 {{'tosa.pad' op expect same input and output tensor rank.}} - %1 = tosa.pad %arg0, %padding : (tensor<13x21xf32>, !tosa.shape<4>) -> tensor<13x21x3xf32> - return + %1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<13x21x3xf32> } // ----- func.func @test_pad_invalid_padding_rank(%arg0: tensor<13x21xf32>) { %0 = tosa.const_shape {value = dense<1> : tensor<6xindex>} : () -> !tosa.shape<6> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // expected-error@+1 {{'tosa.pad' op expected padding tensor dim 0 to have size 4 (2*rank(shape1)) but got size 6}} - %1 = tosa.pad %arg0, %0 : (tensor<13x21xf32>, !tosa.shape<6>) -> tensor<13x21xf32> + %1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21xf32> return } @@ -256,9 +258,10 @@ func.func @test_pad_invalid_padConst_rank(%arg0: tensor<13x21xf32>, %arg1: tenso // ----- func.func @test_pad_padding_shape_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4> + %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4> + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> // expected-error@+1 {{'tosa.pad' op expected padding tensor dim 0 to have size 6 (2*rank(shape1)) but got size 4}} - %1 = tosa.pad %arg0, %0 : (tensor<13x21x3xf32>, !tosa.shape<4>) -> tensor<13x21x3xf32> + %1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<13x21x3xf32> return %1 : tensor<13x21x3xf32> } diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir index c136e8aac9606..3a137512aa719 100644 --- a/mlir/test/Dialect/Tosa/level_check.mlir +++ b/mlir/test/Dialect/Tosa/level_check.mlir @@ -370,9 +370,10 @@ func.func @test_concat_rank_invalid(%arg0: tensor<1x1x1x13x21x3x8xf32>, %arg1: t // ----- func.func @test_pad_rank_invalid(%arg0: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> { + %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> %padding = tosa.const_shape {value = dense<0> : tensor<14xindex>} : () -> !tosa.shape<14> // expected-error@+1 {{'tosa.pad' op failed level check: operand rank(shape) <= MAX_RANK}} - %0 = tosa.pad %arg0, %padding : (tensor<1x1x1x1x13x21x3xf32>, !tosa.shape<14>) -> tensor<1x1x1x1x13x21x3xf32> + %0 = tosa.pad %arg0, %padding, %pad_const : (tensor<1x1x1x1x13x21x3xf32>, !tosa.shape<14>, tensor<1xf32>) -> tensor<1x1x1x1x13x21x3xf32> return %0 : tensor<1x1x1x1x13x21x3xf32> } diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir index 45a87b97125f7..7bf97fd441b9c 100644 --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -580,14 +580,6 @@ func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) - return %0 : tensor<26x21x3xf32> } -// ----- -// CHECK-LABEL: pad -func.func @test_pad(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %padding = tosa.const_shape {value = dense<0> : tensor<6xindex>} : () -> !tosa.shape<6> - %0 = tosa.pad %arg0, %padding : (tensor<13x21x3xf32>, !tosa.shape<6>) -> tensor<13x21x3xf32> - return %0 : tensor<13x21x3xf32> -} - // ----- // CHECK-LABEL: pad_explicit_value func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir index 0167bf10ed0ae..0a41931b24523 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir @@ -93,8 +93,10 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor< func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1: tensor<5x3x5x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x35x47x5xi32>) { // Manipulate the weight matrix to handle striding. + // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-22> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi8>}> : () -> tensor<1xi8> // CHECK-DAG: %[[PADV:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 1, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> - // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] {input_zp = 42 : i32} + // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]], %[[WEIGHT_ZP]] // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[5, 2, 2, 2, 3, 3]> : tensor<6xindex>} // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]], %[[CONST1]] // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]] {perms = array} @@ -102,30 +104,28 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]], %[[CONST3]] // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i32} // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i32} - // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> - // CHECK-DAG: %[[NEWINPUT:.+]] = tosa.pad %arg0, %[[PAD]] {input_zp = -22 : i32} + // CHECK-DAG: %[[NEWINPUT:.+]] = tosa.pad %arg0, %[[PAD]], %[[INPUT_ZP]] // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() <{value = dense<0> : tensor<30xi32>} - // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-22> : tensor<1xi8>} - // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi8>} // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array, pad = array, stride = array} - // CHECK-DAG: %[[CONV_NEW_SHAPE:.*]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONV_NEW_SHAPE]] + // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONST6]] // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]] {perms = array} - // CHECK-DAG: %[[TRANS_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex>} - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[TRANS_NEW_SHAPE]] - // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] - // CHECK-DAG: %[[ARG2_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>} - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[ARG2_NEW_SHAPE]] + // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[CONST8]] + // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} + // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[SLICE:.*]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] + // CHECK-DAG: %[[CONST9:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[CONST9]] // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] - %input_zp = "tosa.const"() {value = dense<-22> : tensor<1xi8>} : () -> tensor<1xi8> - %weight_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> - %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = i32, out_pad = array, out_shape = array, stride = array} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<2x35x47x5xi32> + %input_zp = "tosa.const"() <{value = dense<-22> : tensor<1xi8>}> : () -> tensor<1xi8> + %weight_zp = "tosa.const"() <{value = dense<42> : tensor<1xi8>}> : () -> tensor<1xi8> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = i32, out_pad = array, stride = array} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<2x35x47x5xi32> return %0 : tensor<2x35x47x5xi32> } @@ -133,24 +133,25 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-LABEL: @transpose_conv2d_strided_overpad func.func @transpose_conv2d_strided_overpad(%arg0 : tensor<1x16x1x1xi8>, %arg1 : tensor<1x2x1x1xi8>, %arg2 : tensor<1xi32>) -> (tensor<1x19x2x1xi32>) { - // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[1, 2, 1, 1, 2, 1]> : tensor<6xindex>} + // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-103> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<93> : tensor<1xi8>}> : () -> tensor<1xi8> // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[2, 2, 1, 1]> : tensor<4xindex>} - // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<2xi32>} // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[1, 17, 1, 1, 2, 1]> : tensor<6xindex>} // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[1, 17, 2, 1]> : tensor<4xindex>} - // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[CONST10:.+]] = tosa.const_shape {value = dense<1> : tensor<4xindex>} - // CHECK-DAG: %[[INPUT_ZP:.*]] = "tosa.const"() <{value = dense<-103> : tensor<1xi8>}> - // CHECK-DAG: %[[WEIGHT_ZP:.*]] = "tosa.const"() <{value = dense<93> : tensor<1xi8>}> - // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]] {input_zp = 93 : i32} + // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]], %[[WEIGHT_ZP]] // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]], %[[CONST1]] // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = tosa.transpose %[[RESHAPE_WEIGHT_0]] {perms = array} // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]], %[[CONST3]] // CHECK: %[[REVERSE:.+]] = tosa.reverse %[[RESHAPE_WEIGHT_1]] {axis = 1 : i32} - // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]] {input_zp = -103 : i32} - // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array, pad = array, stride = array} + // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]], %[[INPUT_ZP]] + // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] + // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} // CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]], %[[CONST6]] // CHECK: %[[TRANSPOSE_RESULT:.+]] = tosa.transpose %[[RESHAPE_RESULT_0]] {perms = array} // CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]], %[[CONST8]] diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir index 77d77ba957621..68ccff886329c 100644 --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -469,8 +469,9 @@ func.func @test_concat_axis_1(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>) // CHECK-LABEL:@test_padding_dynamic_input func.func @test_padding_dynamic_input(%arg0 : tensor<1x?xf32>) -> () { %0 = tosa.const_shape { value = dense<[1, 2, 3, 4]> : tensor<4xindex> } : () -> !tosa.shape<4> - // CHECK: tosa.pad %arg0, %0 : (tensor<1x?xf32>, !tosa.shape<4>) -> tensor<4x?xf32> - %1 = tosa.pad %arg0, %0 : (tensor<1x?xf32>, !tosa.shape<4>) -> tensor + %1 = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> + // CHECK: tosa.pad %arg0, %0, %1 : (tensor<1x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<4x?xf32> + %2 = tosa.pad %arg0, %0, %1 : (tensor<1x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor return } @@ -479,8 +480,9 @@ func.func @test_padding_dynamic_input(%arg0 : tensor<1x?xf32>) -> () { // CHECK-LABEL: @test_padding_simple func.func @test_padding_simple(%arg0 : tensor<1x2xf32>) -> () { %0 = tosa.const_shape { value = dense<[1, 2, 3, 4]> : tensor<4xindex> } : () -> !tosa.shape<4> - // CHECK: tosa.pad %arg0, %0 : (tensor<1x2xf32>, !tosa.shape<4>) -> tensor<4x9xf32> - %1 = tosa.pad %arg0, %0 : (tensor<1x2xf32>, !tosa.shape<4>) -> tensor + %1 = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32> + // CHECK: tosa.pad %arg0, %0, %1 : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<4x9xf32> + %2 = tosa.pad %arg0, %0, %1 : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor return }