Skip to content

Commit 4bc2be2

Browse files
committed
test: add testcase
1 parent 23803bf commit 4bc2be2

File tree

1 file changed

+24
-0
lines changed

1 file changed

+24
-0
lines changed

test/mlir/onnx/onnx_canonicalization.mlir

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1387,6 +1387,30 @@ func.func @expand_pow_into_constant(%arg0: tensor<3x4x5xf32>) -> tensor<3x4x5xf3
13871387
// CHECK: onnx.Return [[VAR_0_]] : tensor<3x4x5xf32>
13881388
// CHECK: }
13891389
}
1390+
// -----
1391+
1392+
func.func @test_pow_into_mul_with_qdq(%arg0: tensor<1x3x80x80x2xi8>) -> tensor<1x3x80x80x2xi8> {
1393+
%0 = onnx.Constant dense<2.500000e-01> : tensor<f32>
1394+
%1 = onnx.Constant dense<3.125000e-02> : tensor<f32>
1395+
%2 = onnx.Constant dense<64> : tensor<i8>
1396+
%3 = onnx.Constant dense<0> : tensor<i8>
1397+
%6 = "onnx.DequantizeLinear"(%arg0, %0, %3) {axis = 1 : si64, block_size = 0 : si64} : (tensor<1x3x80x80x2xi8>, tensor<f32>, tensor<i8>) -> tensor<1x3x80x80x2xf32>
1398+
%7 = "onnx.DequantizeLinear"(%2, %1, %3) {axis = 1 : si64, block_size = 0 : si64} : (tensor<i8>, tensor<f32>, tensor<i8>) -> tensor<f32>
1399+
%8 = "onnx.Pow"(%6, %7) : (tensor<1x3x80x80x2xf32>, tensor<f32>) -> tensor<1x3x80x80x2xf32>
1400+
%9 = "onnx.QuantizeLinear"(%8, %1, %3) {axis = 1 : si64, block_size = 0 : si64, output_dtype = 0 : si64, saturate = 1 : si64} : (tensor<1x3x80x80x2xf32>, tensor<f32>, tensor<i8>) -> tensor<1x3x80x80x2xi8>
1401+
return %9 : tensor<1x3x80x80x2xi8>
1402+
1403+
// CHECK-LABEL: func.func @test_pow_into_mul_with_qdq
1404+
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1x3x80x80x2xi8>) -> tensor<1x3x80x80x2xi8> {
1405+
// CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<2.500000e-01> : tensor<f32>
1406+
// CHECK: [[VAR_1_:%.+]] = onnx.Constant dense<3.125000e-02> : tensor<f32>
1407+
// CHECK: [[VAR_2_:%.+]] = onnx.Constant dense<0> : tensor<i8>
1408+
// CHECK: [[VAR_3_:%.+]] = "onnx.DequantizeLinear"([[PARAM_0_]], [[VAR_0_]], [[VAR_2_]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<1x3x80x80x2xi8>, tensor<f32>, tensor<i8>) -> tensor<1x3x80x80x2xf32>
1409+
// CHECK: [[VAR_4_:%.+]] = "onnx.Mul"([[VAR_3_]], [[VAR_3_]]) : (tensor<1x3x80x80x2xf32>, tensor<1x3x80x80x2xf32>) -> tensor<1x3x80x80x2xf32>
1410+
// CHECK: [[VAR_5_:%.+]] = "onnx.QuantizeLinear"([[VAR_4_]], [[VAR_1_]], [[VAR_2_]]) {axis = 1 : si64, block_size = 0 : si64, output_dtype = 0 : si64, saturate = 1 : si64} : (tensor<1x3x80x80x2xf32>, tensor<f32>, tensor<i8>) -> tensor<1x3x80x80x2xi8>
1411+
// CHECK: return [[VAR_5_]] : tensor<1x3x80x80x2xi8>
1412+
// CHECK: }
1413+
}
13901414

13911415
// -----
13921416

0 commit comments

Comments
 (0)