|
1 | | -// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-tosa --cse %s -split-input-file | FileCheck %s |
| 1 | +// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-tosa --canonicalize --cse %s -split-input-file | FileCheck %s |
2 | 2 |
|
3 | 3 | func.func @test_pad_f32(%arg0: tensor<20x16x44x32xf32>) -> tensor<24x22x52x42xf32> { |
4 | 4 | %noval = "onnx.NoValue"() {value} : () -> none |
@@ -160,3 +160,59 @@ func.func @test_pad_f16_constant_none(%arg0: tensor<256x1x1x5x1xf16>) -> tensor< |
160 | 160 | // CHECK: %[[VAR2:.*]] = tosa.pad %arg0, %[[VAR0]], %[[VAR1]] : (tensor<256x1x1x5x1xf16>, !tosa.shape<10>, tensor<f16>) -> tensor<256x1x1x5x2xf16> |
161 | 161 | // CHECK: return %[[VAR2]] : tensor<256x1x1x5x2xf16> |
162 | 162 | } |
| 163 | + |
| 164 | +// ----- |
| 165 | + |
| 166 | +func.func @test_pad_f32_non_constant_padval(%arg0: tensor<20x16x44x32xf32>, %arg1: tensor<f32>) -> tensor<24x22x52x42xf32> { |
| 167 | + %noval = "onnx.NoValue"() {value} : () -> none |
| 168 | + %0 = "onnx.Constant"() {value = dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>} : () -> tensor<8xi64> |
| 169 | + %2 = "onnx.Pad"(%arg0, %0, %arg1, %noval) {mode = "constant"} : (tensor<20x16x44x32xf32>, tensor<8xi64>, tensor<f32>, none) -> tensor<24x22x52x42xf32> |
| 170 | + return %2 : tensor<24x22x52x42xf32> |
| 171 | +// CHECK-LABEL: func.func @test_pad_f32_non_constant_padval |
| 172 | +// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<20x16x44x32xf32>, [[PARAM_1_:%.+]]: tensor<f32>) -> tensor<24x22x52x42xf32> { |
| 173 | +// CHECK: [[VAR_0_:%.+]] = tosa.const_shape {value = dense<[0, 4, 1, 5, 2, 6, 3, 7]> : tensor<8xindex>} : () -> !tosa.shape<8> |
| 174 | +// CHECK: [[VAR_1_:%.+]] = tosa.pad [[PARAM_0_]], [[VAR_0_]], [[PARAM_1_]] : (tensor<20x16x44x32xf32>, !tosa.shape<8>, tensor<f32>) -> tensor<24x22x52x42xf32> |
| 175 | +// CHECK: return [[VAR_1_]] : tensor<24x22x52x42xf32> |
| 176 | +} |
| 177 | + |
| 178 | +// ----- |
| 179 | + |
| 180 | +func.func @test_pad_f32_non_constant_1Dpadval(%arg0: tensor<20x16x44x32xf32>, %arg1: tensor<1xf32>) -> tensor<24x22x52x42xf32> { |
| 181 | + %noval = "onnx.NoValue"() {value} : () -> none |
| 182 | + %0 = "onnx.Constant"() {value = dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>} : () -> tensor<8xi64> |
| 183 | + %2 = "onnx.Pad"(%arg0, %0, %arg1, %noval) {mode = "constant"} : (tensor<20x16x44x32xf32>, tensor<8xi64>, tensor<1xf32>, none) -> tensor<24x22x52x42xf32> |
| 184 | + return %2 : tensor<24x22x52x42xf32> |
| 185 | +// CHECK-LABEL: func.func @test_pad_f32_non_constant_1Dpadval |
| 186 | +// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<20x16x44x32xf32>, [[PARAM_1_:%.+]]: tensor<1xf32>) -> tensor<24x22x52x42xf32> { |
| 187 | +// CHECK-DAG: [[VAR_0_:%.+]] = tosa.const_shape {value = dense<[0, 4, 1, 5, 2, 6, 3, 7]> : tensor<8xindex>} : () -> !tosa.shape<8> |
| 188 | +// CHECK-DAG: [[VAL_1_:%.+]] = tosa.reshape [[PARAM_1_]] {new_shape = array<i64>} : (tensor<1xf32>) -> tensor<f32> |
| 189 | +// CHECK: [[VAR_2_:%.+]] = tosa.pad [[PARAM_0_]], [[VAR_0_]], [[VAL_1_]] : (tensor<20x16x44x32xf32>, !tosa.shape<8>, tensor<f32>) -> tensor<24x22x52x42xf32> |
| 190 | +// CHECK: return [[VAR_2_]] : tensor<24x22x52x42xf32> |
| 191 | +} |
| 192 | + |
| 193 | +// ----- |
| 194 | + |
| 195 | +func.func @test_pad_i64_non_constant_padval(%arg0: tensor<20x16x44x32xi64>, %arg1: tensor<i64>) -> tensor<24x22x52x42xi64> { |
| 196 | + %noval = "onnx.NoValue"() {value} : () -> none |
| 197 | + %0 = "onnx.Constant"() {value = dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>} : () -> tensor<8xi64> |
| 198 | + %2 = "onnx.Pad"(%arg0, %0, %arg1, %noval) {mode = "constant"} : (tensor<20x16x44x32xi64>, tensor<8xi64>, tensor<i64>, none) -> tensor<24x22x52x42xi64> |
| 199 | + return %2 : tensor<24x22x52x42xi64> |
| 200 | +// CHECK-LABEL: func.func @test_pad_i64_non_constant_padval |
| 201 | +// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<20x16x44x32xi64>, [[PARAM_1_:%.+]]: tensor<i64>) -> tensor<24x22x52x42xi64> { |
| 202 | +// CHECK: [[VAR_0_:%.+]] = tosa.const_shape {value = dense<[0, 4, 1, 5, 2, 6, 3, 7]> : tensor<8xindex>} : () -> !tosa.shape<8> |
| 203 | +// CHECK: [[VAR_1_:%.+]] = tosa.pad [[PARAM_0_]], [[VAR_0_]], [[PARAM_1_]] : (tensor<20x16x44x32xi64>, !tosa.shape<8>, tensor<i64>) -> tensor<24x22x52x42xi64> |
| 204 | +// CHECK: return [[VAR_1_]] : tensor<24x22x52x42xi64> |
| 205 | +} |
| 206 | + |
| 207 | +// ----- |
| 208 | +func.func @test_pad_f16_non_constant_padval(%arg0: tensor<20x16x44x32xf16>, %arg1: tensor<f16>) -> tensor<24x22x52x42xf16> { |
| 209 | + %noval = "onnx.NoValue"() {value} : () -> none |
| 210 | + %0 = "onnx.Constant"() {value = dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>} : () -> tensor<8xi64> |
| 211 | + %2 = "onnx.Pad"(%arg0, %0, %arg1, %noval) {mode = "constant"} : (tensor<20x16x44x32xf16>, tensor<8xi64>, tensor<f16>, none) -> tensor<24x22x52x42xf16> |
| 212 | + return %2 : tensor<24x22x52x42xf16> |
| 213 | +// CHECK-LABEL: func.func @test_pad_f16_non_constant_padval |
| 214 | +// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<20x16x44x32xf16>, [[PARAM_1_:%.+]]: tensor<f16>) -> tensor<24x22x52x42xf16> { |
| 215 | +// CHECK: [[VAR_0_:%.+]] = tosa.const_shape {value = dense<[0, 4, 1, 5, 2, 6, 3, 7]> : tensor<8xindex>} : () -> !tosa.shape<8> |
| 216 | +// CHECK: [[VAR_1_:%.+]] = tosa.pad [[PARAM_0_]], [[VAR_0_]], [[PARAM_1_]] : (tensor<20x16x44x32xf16>, !tosa.shape<8>, tensor<f16>) -> tensor<24x22x52x42xf16> |
| 217 | +// CHECK: return [[VAR_1_]] : tensor<24x22x52x42xf16> |
| 218 | +} |
0 commit comments