Skip to content

Commit 7e67d67

Browse files
committed
[mlir][tosa] Add missing verifier for tosa.pad
This PR adds a missing verifier for `tosa.pad`, ensuring that the padding shape matches [2*rank(input)] according to V1.0 specification.
1 parent 7b23f41 commit 7e67d67

File tree

14 files changed

+88
-82
lines changed

14 files changed

+88
-82
lines changed

mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1552,21 +1552,21 @@ def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> {
15521552
Example:
15531553

15541554
```mlir
1555-
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
1556-
tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>)
1555+
%0 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<4xi32>
1556+
tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<4xi32>) -> (tensor<4x9xf32>)
15571557
```
15581558

15591559
Example 2:
15601560

15611561
```mlir
1562-
%0 = arith.constant dense<[[-1, 2], [3, 4]]> : tensor<2x2xi32>
1563-
tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<?x9xf32>)
1562+
%0 = arith.constant dense<[[-1, 2, 3, 4]]> : tensor<4xi32>
1563+
tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<4xi32>) -> (tensor<?x9xf32>)
15641564
```
15651565
}];
15661566

15671567
let arguments = (ins
15681568
Tosa_RankedTensor:$input1,
1569-
Tosa_Int32Or64Tensor:$padding,
1569+
1DTensorOf<[Tosa_Int32Or64]>:$padding,
15701570
Optional<Tosa_ScalarTensor>:$pad_const,
15711571
OptionalAttr<Tosa_PadOpQuantizationAttr>:$quantization_info
15721572
);

mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -338,23 +338,21 @@ class PadConverter : public OpConversionPattern<tosa::PadOp> {
338338
padOp, "tosa.pad was unable to determine the pad constant value.");
339339
}
340340

341-
Value lowIndex =
342-
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(0));
343-
Value highIndex =
344-
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(1));
345-
346341
SmallVector<OpFoldResult, 3> lowValues;
347342
SmallVector<OpFoldResult, 3> highValues;
348343

349344
lowValues.reserve(rank);
350345
highValues.reserve(rank);
351346

352347
for (int i = 0; i < rank; i++) {
353-
Value inputIndex = rewriter.create<arith::ConstantIndexOp>(loc, i);
348+
Value lowIndex =
349+
rewriter.create<arith::ConstantIndexOp>(loc, 2 * i);
350+
Value highIndex =
351+
rewriter.create<arith::ConstantIndexOp>(loc, 2 * i + 1);
354352
Value lowVal = rewriter.createOrFold<tensor::ExtractOp>(
355-
loc, padding, ValueRange({inputIndex, lowIndex}));
353+
loc, padding, ValueRange({lowIndex}));
356354
Value highVal = rewriter.createOrFold<tensor::ExtractOp>(
357-
loc, padding, ValueRange({inputIndex, highIndex}));
355+
loc, padding, ValueRange({highIndex}));
358356

359357
lowVal = rewriter.createOrFold<arith::IndexCastOp>(
360358
loc, rewriter.getIndexType(), lowVal);

mlir/lib/Dialect/Tosa/IR/TosaOps.cpp

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,7 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
787787
return success();
788788
}
789789

790-
outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
790+
outputShape.resize(paddingShape.getDimSize(0) / 2, ShapedType::kDynamic);
791791
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
792792
return success();
793793
}
@@ -823,13 +823,16 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
823823
LogicalResult tosa::PadOp::verify() {
824824
RankedTensorType inputType = getInput1().getType();
825825
RankedTensorType outputType = getOutput().getType();
826-
TensorType paddingType = getPadding().getType();
826+
RankedTensorType paddingType = getPadding().getType();
827827

828828
if (inputType.getRank() != outputType.getRank())
829829
return emitOpError() << "expect same input and output tensor rank.";
830830

831-
if (paddingType.hasRank() && paddingType.getRank() != 2)
832-
return emitOpError() << "expect 'padding' tensor rank equal to 2.";
831+
if (!paddingType.isDynamicDim(0) &&
832+
paddingType.getDimSize(0) != inputType.getRank() * 2)
833+
return emitOpError() << "expected padding tensor dim 0 to have size "
834+
<< inputType.getRank() << " (2*rank(input)) but got size "
835+
<< paddingType.getDimSize(0);
833836

834837
return success();
835838
}

mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
8181
}
8282
}
8383

84-
auto padSizeTy = RankedTensorType::get({4, 2}, rewriter.getI64Type());
84+
auto padSizeTy = RankedTensorType::get({8}, rewriter.getI64Type());
8585
auto padSize =
8686
DenseIntElementsAttr::get(padSizeTy, ArrayRef<int64_t>(pad));
8787
Value padSizeVal =

mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
108108
}
109109
}
110110

111-
auto padSizeTy = RankedTensorType::get({5, 2}, rewriter.getI64Type());
111+
auto padSizeTy = RankedTensorType::get({10}, rewriter.getI64Type());
112112
auto padSize =
113113
DenseIntElementsAttr::get(padSizeTy, ArrayRef<int64_t>(pad));
114114
Value padSizeVal =

mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ class TransposeConvStridedConverter
139139
weightPadding[5] =
140140
(weightWidth % stride[1]) ? (stride[1] - weightWidth % stride[1]) : 0;
141141
DenseElementsAttr weightPaddingAttr = DenseIntElementsAttr::get(
142-
RankedTensorType::get({4, 2}, rewriter.getI32Type()), weightPadding);
142+
RankedTensorType::get({8}, rewriter.getI32Type()), weightPadding);
143143
Value weightPaddingVal = CreateOpAndInferShape<tosa::ConstOp>(
144144
rewriter, loc, weightPaddingAttr.getType(), weightPaddingAttr);
145145

mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,7 @@ func.func @slice_dyn(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
459459
// CHECK-LABEL: @pad_float
460460
// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]:
461461
func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
462-
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
462+
%0 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<4xi32>
463463
// TODO: Output contains multiple "arith.constant 1 : index".
464464
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
465465
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
@@ -469,32 +469,32 @@ func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
469469
// CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
470470
// CHECK: tensor.yield [[CST]]
471471
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
472-
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>)
472+
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<4xi32>) -> (tensor<4x9xf32>)
473473
return %1 : tensor<4x9xf32>
474474
}
475475

476476
func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
477-
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
477+
%0 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<4xi32>
478478
// CHECK: [[CST:%.+]] = arith.constant 0 : i32
479479
// CHECK: tensor.pad
480480
// CHECK: tensor.yield [[CST]]
481-
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xi32>, tensor<2x2xi32>) -> (tensor<4x9xi32>)
481+
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xi32>, tensor<4xi32>) -> (tensor<4x9xi32>)
482482
return %1 : tensor<4x9xi32>
483483
}
484484

485485
func.func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
486-
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
486+
%0 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<4xi32>
487487
// CHECK: [[CST:%.+]] = arith.constant 42 : i32
488488
// CHECK: tensor.pad
489489
// CHECK: tensor.yield [[CST]]
490-
%1 = "tosa.pad"(%arg0, %0) {quantization_info = #tosa.pad_quant<input_zp = 42>} : (tensor<1x2xi32>, tensor<2x2xi32>) -> (tensor<4x9xi32>)
490+
%1 = "tosa.pad"(%arg0, %0) {quantization_info = #tosa.pad_quant<input_zp = 42>} : (tensor<1x2xi32>, tensor<4xi32>) -> (tensor<4x9xi32>)
491491
return %1 : tensor<4x9xi32>
492492
}
493493

494494
// -----
495495

496496
func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
497-
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
497+
%0 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<4xi32>
498498
// TODO: Output contains multiple "arith.constant 1 : index".
499499
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
500500
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
@@ -505,14 +505,14 @@ func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
505505
// CHECK: tensor.yield [[CST]]
506506
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
507507
%1 = arith.constant dense<42.0> : tensor<f32>
508-
%2 = "tosa.pad"(%arg0, %0, %1) : (tensor<1x2xf32>, tensor<2x2xi32>, tensor<f32>) -> (tensor<4x9xf32>)
508+
%2 = "tosa.pad"(%arg0, %0, %1) : (tensor<1x2xf32>, tensor<4xi32>, tensor<f32>) -> (tensor<4x9xf32>)
509509
return %2 : tensor<4x9xf32>
510510
}
511511

512512
// -----
513513

514514
func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
515-
%0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
515+
%0 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<4xi32>
516516
// TODO: Output contains multiple "arith.constant 1 : index".
517517
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
518518
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
@@ -522,12 +522,12 @@ func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
522522
// CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
523523
// CHECK: tensor.yield [[CST]]
524524
// CHECK: } : tensor<?x2xf32> to tensor<?x9xf32>
525-
%1 = "tosa.pad"(%arg0, %0) : (tensor<?x2xf32>, tensor<2x2xi32>) -> (tensor<?x9xf32>)
525+
%1 = "tosa.pad"(%arg0, %0) : (tensor<?x2xf32>, tensor<4xi32>) -> (tensor<?x9xf32>)
526526
return %1 : tensor<?x9xf32>
527527
}
528528

529529
func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
530-
%0 = arith.constant dense<[[-1, 2], [3, 4]]> : tensor<2x2xi32>
530+
%0 = arith.constant dense<[[-1, 2, 3, 4]]> : tensor<4xi32>
531531
// TODO: Output contains multiple "arith.constant 1 : index".
532532
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
533533
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
@@ -537,7 +537,7 @@ func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
537537
// CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
538538
// CHECK: tensor.yield [[CST]]
539539
// CHECK: } : tensor<1x2xf32> to tensor<?x9xf32>
540-
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<?x9xf32>)
540+
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<4xi32>) -> (tensor<?x9xf32>)
541541
return %1 : tensor<?x9xf32>
542542
}
543543

mlir/test/Dialect/Tosa/canonicalize.mlir

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,8 @@ func.func @max_pool2d_is_noop(%arg0: tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf3
210210
// CHECK-LABEL: @pad_noop
211211
func.func @pad_noop(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
212212
// CHECK: return %arg0
213-
%0 = "tosa.const"() { value = dense<0> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
214-
%1 = tosa.pad %arg0, %0 : (tensor<?x?xf32>, tensor<2x2xi32>) -> tensor<?x?xf32>
213+
%0 = "tosa.const"() { value = dense<0> : tensor<4xi32>} : () -> tensor<4xi32>
214+
%1 = tosa.pad %arg0, %0 : (tensor<?x?xf32>, tensor<4xi32>) -> tensor<?x?xf32>
215215
return %1 : tensor<?x?xf32>
216216
}
217217

@@ -221,8 +221,8 @@ func.func @pad_noop(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
221221
func.func @pad_noop_padding_mismatch_nofold(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
222222
// CHECK: %[[PAD:.+]] = tosa.pad
223223
// CHECK: return %[[PAD]]
224-
%0 = "tosa.const"() { value = dense_resource<__elided__> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
225-
%1 = tosa.pad %arg0, %0 : (tensor<?x?xf32>, tensor<2x2xi32>) -> tensor<?x?xf32>
224+
%0 = "tosa.const"() { value = dense_resource<__elided__> : tensor<4xi32>} : () -> tensor<4xi32>
225+
%1 = tosa.pad %arg0, %0 : (tensor<?x?xf32>, tensor<4xi32>) -> tensor<?x?xf32>
226226
return %1 : tensor<?x?xf32>
227227
}
228228

@@ -234,42 +234,39 @@ func.func @pad_noop_type_mismatch_nofold(%arg0: tensor<10xf32>) -> tensor<?xf32>
234234
// CHECK: return %[[PAD]]
235235

236236
%c0_i32 = arith.constant 0 : i32
237-
%shape = tensor.from_elements %c0_i32, %c0_i32 : tensor<1x2xi32>
237+
%shape = tensor.from_elements %c0_i32, %c0_i32 : tensor<2xi32>
238238

239-
%0 = tosa.pad %arg0, %shape : (tensor<10xf32>, tensor<1x2xi32>) -> tensor<?xf32>
239+
%0 = tosa.pad %arg0, %shape : (tensor<10xf32>, tensor<2xi32>) -> tensor<?xf32>
240240
return %0 : tensor<?xf32>
241241
}
242242

243243
// -----
244244

245245
// CHECK-LABEL: @pad_determine_val_i32
246-
func.func @pad_determine_val_i32(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xi32> {
246+
func.func @pad_determine_val_i32(%arg0: tensor<?x?xi32>, %arg1 : tensor<4xi32>) -> tensor<?x?xi32> {
247247
// CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<i32>}
248248
// CHECK: tosa.pad %arg0, %arg1, %[[ZERO]]
249-
%0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
250-
%1 = tosa.pad %arg0, %arg1 : (tensor<?x?xi32>, tensor<2x2xi32>) -> tensor<?x?xi32>
249+
%1 = tosa.pad %arg0, %arg1 : (tensor<?x?xi32>, tensor<4xi32>) -> tensor<?x?xi32>
251250
return %1 : tensor<?x?xi32>
252251
}
253252

254253
// -----
255254

256255
// CHECK-LABEL: @pad_determine_val_f32
257-
func.func @pad_determine_val_f32(%arg0: tensor<?x?xf32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xf32> {
256+
func.func @pad_determine_val_f32(%arg0: tensor<?x?xf32>, %arg1 : tensor<4xi32>) -> tensor<?x?xf32> {
258257
// CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<f32>}
259258
// CHECK: tosa.pad %arg0, %arg1, %[[ZERO]]
260-
%0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
261-
%1 = tosa.pad %arg0, %arg1 : (tensor<?x?xf32>, tensor<2x2xi32>) -> tensor<?x?xf32>
259+
%1 = tosa.pad %arg0, %arg1 : (tensor<?x?xf32>, tensor<4xi32>) -> tensor<?x?xf32>
262260
return %1 : tensor<?x?xf32>
263261
}
264262

265263
// -----
266264

267265
// CHECK-LABEL: @pad_determine_val_quant
268-
func.func @pad_determine_val_quant(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xi32> {
266+
func.func @pad_determine_val_quant(%arg0: tensor<?x?xi32>, %arg1 : tensor<4xi32>) -> tensor<?x?xi32> {
269267
// CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<42> : tensor<i32>}
270268
// CHECK: tosa.pad %arg0, %arg1, %[[ZERO]]
271-
%0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
272-
%1 = tosa.pad %arg0, %arg1 {quantization_info = #tosa.pad_quant<input_zp = 42>} : (tensor<?x?xi32>, tensor<2x2xi32>) -> tensor<?x?xi32>
269+
%1 = tosa.pad %arg0, %arg1 {quantization_info = #tosa.pad_quant<input_zp = 42>} : (tensor<?x?xi32>, tensor<4xi32>) -> tensor<?x?xi32>
273270
return %1 : tensor<?x?xi32>
274271
}
275272

mlir/test/Dialect/Tosa/invalid.mlir

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -77,48 +77,56 @@ func.func @test_concat_element_type_mismatch(%arg0 : tensor<1x2xf32>, %arg1 : te
7777

7878
// -----
7979

80-
func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> {
80+
func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<6xi32>) -> tensor<13x21x3xf32> {
8181
// expected-error@+1 {{'tosa.pad' op padding of pad is not constant}}
82-
%0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32>
82+
%0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<6xi32>) -> tensor<13x21x3xf32>
8383
return %0 : tensor<13x21x3xf32>
8484
}
8585

8686
// -----
8787

8888
func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<i8>) -> tensor<13x21x3xi8> {
89-
%0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
89+
%0 = "tosa.const"() {value = dense<[[0, 0, 0, 1, 0, 1]]> : tensor<6xi32>} : () -> tensor<6xi32>
9090
// expected-error@+1 {{'tosa.pad' op pad_const of pad is not constant}}
91-
%1 = tosa.pad %arg0, %0, %arg1 : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor<i8>) -> tensor<13x21x3xi8>
91+
%1 = tosa.pad %arg0, %0, %arg1 : (tensor<13x21x3xi8>, tensor<6xi32>, tensor<i8>) -> tensor<13x21x3xi8>
9292
return %1 : tensor<13x21x3xi8>
9393
}
9494

9595
// -----
9696

97-
func.func @test_pad_io_rank_mismatch(%arg0: tensor<13x21xf32>, %arg1: tensor<2x2xi32>) {
97+
func.func @test_pad_io_rank_mismatch(%arg0: tensor<13x21xf32>, %arg1: tensor<4xi32>) {
9898
// expected-error@+1 {{'tosa.pad' op expect same input and output tensor rank.}}
99-
%1 = tosa.pad %arg0, %arg1 : (tensor<13x21xf32>, tensor<2x2xi32>) -> tensor<13x21x3xf32>
99+
%1 = tosa.pad %arg0, %arg1 : (tensor<13x21xf32>, tensor<4xi32>) -> tensor<13x21x3xf32>
100100
return
101101
}
102102

103103
// -----
104104

105-
func.func @test_pad_invalid_padding_rank(%arg0: tensor<13x21xf32>, %arg1: tensor<2xi32>) {
106-
// expected-error@+1 {{'tosa.pad' op expect 'padding' tensor rank equal to 2.}}
107-
%1 = tosa.pad %arg0, %arg1 : (tensor<13x21xf32>, tensor<2xi32>) -> tensor<13x21xf32>
105+
func.func @test_pad_invalid_padding_rank(%arg0: tensor<13x21xf32>, %arg1: tensor<2x2xi32>) {
106+
// expected-error@+1 {{'tosa.pad' op operand #1 must be 1D tensor of 32-bit signless integer or 64-bit signless integer values, but got 'tensor<2x2xi32>'}}
107+
%1 = tosa.pad %arg0, %arg1 : (tensor<13x21xf32>, tensor<2x2xi32>) -> tensor<13x21xf32>
108108
return
109109
}
110110

111111
// -----
112112

113-
func.func @test_pad_invalid_padConst_rank(%arg0: tensor<13x21xf32>, %arg1: tensor<2x2xi32>) {
113+
func.func @test_pad_invalid_padConst_rank(%arg0: tensor<13x21xf32>, %arg1: tensor<4xi32>) {
114114
%0 = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
115115
// expected-error@+1 {{'tosa.pad' op operand #2 must be 0D tensor of number values, but got 'tensor<1xf32>'}}
116-
%1 = tosa.pad %arg0, %arg1, %0 : (tensor<13x21xf32>, tensor<2x2xi32>, tensor<1xf32>) -> tensor<13x21xf32>
116+
%1 = tosa.pad %arg0, %arg1, %0 : (tensor<13x21xf32>, tensor<4xi32>, tensor<1xf32>) -> tensor<13x21xf32>
117117
return
118118
}
119119

120120
// -----
121121

122+
func.func @test_pad_padding_shape_mismatch(%arg0: tensor<13x21x3xf32>, %arg1: tensor<4xi32>) -> tensor<13x21x3xf32> {
123+
// expected-error@+1 {{'tosa.pad' op expected padding tensor dim 0 to have size 6 (2*rank(input)) but got size 4}}
124+
%0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<4xi32>) -> tensor<13x21x3xf32>
125+
return %0 : tensor<13x21x3xf32>
126+
}
127+
128+
// -----
129+
122130
func.func @test_transpose_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3xi32>) -> tensor<3x13x21xf32> {
123131
// expected-error@+1 {{'tosa.transpose' op perms of transpose is not constant}}
124132
%0 = tosa.transpose %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32>

mlir/test/Dialect/Tosa/ops.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -525,16 +525,16 @@ func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -
525525

526526
// -----
527527
// CHECK-LABEL: pad
528-
func.func @test_pad(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> {
529-
%0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32>
528+
func.func @test_pad(%arg0: tensor<13x21x3xf32>, %arg1: tensor<6xi32>) -> tensor<13x21x3xf32> {
529+
%0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<6xi32>) -> tensor<13x21x3xf32>
530530
return %0 : tensor<13x21x3xf32>
531531
}
532532

533533
// -----
534534
// CHECK-LABEL: pad_explicit_value
535-
func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> {
535+
func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>, %arg1: tensor<6xi32>) -> tensor<13x21x3xf32> {
536536
%0 = "tosa.const"() {value = dense<3.14> : tensor<f32>} : () -> tensor<f32>
537-
%1 = tosa.pad %arg0, %arg1, %0 : (tensor<13x21x3xf32>, tensor<3x2xi32>, tensor<f32>) -> tensor<13x21x3xf32>
537+
%1 = tosa.pad %arg0, %arg1, %0 : (tensor<13x21x3xf32>, tensor<6xi32>, tensor<f32>) -> tensor<13x21x3xf32>
538538
return %1 : tensor<13x21x3xf32>
539539
}
540540

0 commit comments

Comments
 (0)