Skip to content

Commit 21cb45e

Browse files
authored
Merge branch 'feature/onnx-to-tosa' into xiao.add_remove_qdq_aroundop
2 parents c78aca7 + 8aaef29 commit 21cb45e

File tree

2 files changed

+132
-3
lines changed

2 files changed

+132
-3
lines changed

src/Dialect/ONNX/Transforms/Decompose.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1549,9 +1549,10 @@ Value decomposeIntoPhasedConvs(PatternRewriter &rewriter, Location loc,
15491549
}
15501550
auto onnxPadsValueConstant =
15511551
getONNXConstOpFromVector(rewriter, loc, weightsPadValue);
1552-
RankedTensorType scalarTy = RankedTensorType::get({}, elementType);
1553-
Value onnxPaddingConstantZero = create.onnx.constant(
1554-
DenseElementsAttr::get(scalarTy, rewriter.getZeroAttr(elementType)));
1552+
auto weightsElementType = weightsType.getElementType();
1553+
RankedTensorType scalarTy = RankedTensorType::get({}, weightsElementType);
1554+
Value onnxPaddingConstantZero = create.onnx.constant(DenseElementsAttr::get(
1555+
scalarTy, rewriter.getZeroAttr(weightsElementType)));
15551556

15561557
auto onnxAxisValueConstantNone = create.onnx.none();
15571558
auto wts_shape = weightsType.getShape();

test/mlir/onnx/onnx_decompose_convtranspose_phased_conv_qdq.mlir

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,134 @@ func.func @test_convtrans_stride11(%arg0: tensor<1x1x12x44xf32>, %arg1: tensor<1
6666

6767
// -----
6868

69+
func.func @test_convtrans_4phase_pads_0011(%arg0: tensor<1x128x10x16xf32>) -> tensor<1x32x20x32xf32> {
70+
%0 = onnx.Constant dense<5.000000e-01> : tensor<f32>
71+
%1 = onnx.Constant dense<1.000000e+00> : tensor<f32>
72+
%2 = onnx.Constant dense<1.22070313E-4> : tensor<f32>
73+
%3 = onnx.Constant dense<2> : tensor<i8>
74+
%4 = onnx.Constant dense<2> : tensor<128x32x3x3xi8>
75+
%5 = onnx.Constant dense<3.125000e-02> : tensor<f32>
76+
%6 = onnx.Constant dense<2> : tensor<32xi8>
77+
%7 = "onnx.DequantizeLinear"(%6, %5, %3) {axis = 1 : si64} : (tensor<32xi8>, tensor<f32>, tensor<i8>) -> tensor<32xf32>
78+
%8 = "onnx.DequantizeLinear"(%4, %2, %3) {axis = 1 : si64} : (tensor<128x32x3x3xi8>, tensor<f32>, tensor<i8>) -> tensor<128x32x3x3xf32>
79+
%9 = "onnx.QuantizeLinear"(%arg0, %1, %3) {axis = 1 : si64, saturate = 1 : si64} : (tensor<1x128x10x16xf32>, tensor<f32>, tensor<i8>) -> tensor<1x128x10x16xi8>
80+
%10 = "onnx.DequantizeLinear"(%9, %1, %3) {axis = 1 : si64} : (tensor<1x128x10x16xi8>, tensor<f32>, tensor<i8>) -> tensor<1x128x10x16xf32>
81+
%11 = "onnx.ConvTranspose"(%10, %8, %7) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], pads = [0, 0, 1, 1], strides = [2, 2]} : (tensor<1x128x10x16xf32>, tensor<128x32x3x3xf32>, tensor<32xf32>) -> tensor<1x32x20x32xf32>
82+
%12 = "onnx.QuantizeLinear"(%11, %0, %3) {axis = 1 : si64, saturate = 1 : si64} : (tensor<1x32x20x32xf32>, tensor<f32>, tensor<i8>) -> tensor<1x32x20x32xi8>
83+
%13 = "onnx.DequantizeLinear"(%12, %0, %3) {axis = 1 : si64} : (tensor<1x32x20x32xi8>, tensor<f32>, tensor<i8>) -> tensor<1x32x20x32xf32>
84+
onnx.Return %13 : tensor<1x32x20x32xf32>
85+
// CHECK-LABEL: func.func @test_convtrans_4phase_pads_0011(
86+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x128x10x16xf32>) -> tensor<1x32x20x32xf32> {
87+
// CHECK: %[[VAL_1:.*]] = onnx.Constant dense<[1, 32, 20, 32]> : tensor<4xi64>
88+
// CHECK: %[[VAL_2:.*]] = onnx.Constant dense<[2, 2, 32, 10, 16]> : tensor<5xi64>
89+
// CHECK: %[[VAL_3:.*]] = onnx.Constant dense<[10, 18]> : tensor<2xi64>
90+
// CHECK: %[[VAL_4:.*]] = onnx.Constant dense<[12, 16]> : tensor<2xi64>
91+
// CHECK: %[[VAL_5:.*]] = onnx.Constant dense<[10, 16]> : tensor<2xi64>
92+
// CHECK: %[[VAL_6:.*]] = onnx.Constant dense<[12, 18]> : tensor<2xi64>
93+
// CHECK: %[[VAL_7:.*]] = onnx.Constant dense<5> : tensor<2xi64>
94+
// CHECK: %[[VAL_8:.*]] = onnx.Constant dense<1> : tensor<2xi64>
95+
// CHECK: %[[VAL_9:.*]] = onnx.Constant dense<[4, 5]> : tensor<2xi64>
96+
// CHECK: %[[VAL_10:.*]] = onnx.Constant dense<[0, 1]> : tensor<2xi64>
97+
// CHECK: %[[VAL_11:.*]] = onnx.Constant dense<[5, 4]> : tensor<2xi64>
98+
// CHECK: %[[VAL_12:.*]] = onnx.Constant dense<[1, 0]> : tensor<2xi64>
99+
// CHECK: %[[VAL_13:.*]] = onnx.Constant dense<4> : tensor<2xi64>
100+
// CHECK: %[[VAL_14:.*]] = onnx.Constant dense<0> : tensor<2xi64>
101+
// CHECK: %[[VAL_15:.*]] = onnx.Constant dense<2> : tensor<2xi64>
102+
// CHECK: %[[VAL_16:.*]] = onnx.Constant dense<[2, 3]> : tensor<2xi64>
103+
// CHECK: %[[VAL_17:.*]] = "onnx.NoValue"() {value} : () -> none
104+
// CHECK: %[[VAL_18:.*]] = onnx.Constant dense<0> : tensor<i8>
105+
// CHECK: %[[VAL_19:.*]] = onnx.Constant dense<[0, 0, 0, 0, 0, 0, 1, 1]> : tensor<8xi64>
106+
// CHECK: %[[VAL_20:.*]] = onnx.Constant dense<3> : tensor<3xi64>
107+
// CHECK: %[[VAL_21:.*]] = onnx.Constant dense<5.000000e-01> : tensor<f32>
108+
// CHECK: %[[VAL_22:.*]] = onnx.Constant dense<1.000000e+00> : tensor<f32>
109+
// CHECK: %[[VAL_23:.*]] = onnx.Constant dense<1.22070313E-4> : tensor<f32>
110+
// CHECK: %[[VAL_24:.*]] = onnx.Constant dense<2> : tensor<i8>
111+
// CHECK: %[[VAL_25:.*]] = onnx.Constant dense<2> : tensor<128x32x3x3xi8>
112+
// CHECK: %[[VAL_26:.*]] = onnx.Constant dense<3.125000e-02> : tensor<f32>
113+
// CHECK: %[[VAL_27:.*]] = onnx.Constant dense<2> : tensor<32xi8>
114+
// CHECK: %[[VAL_28:.*]] = "onnx.DequantizeLinear"(%[[VAL_27]], %[[VAL_26]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32xi8>, tensor<f32>, tensor<i8>) -> tensor<32xf32>
115+
// CHECK: %[[VAL_29:.*]] = "onnx.QuantizeLinear"(%[[VAL_0]], %[[VAL_22]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64, output_dtype = 0 : si64, saturate = 1 : si64} : (tensor<1x128x10x16xf32>, tensor<f32>, tensor<i8>) -> tensor<1x128x10x16xi8>
116+
// CHECK: %[[VAL_30:.*]] = "onnx.DequantizeLinear"(%[[VAL_29]], %[[VAL_22]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<1x128x10x16xi8>, tensor<f32>, tensor<i8>) -> tensor<1x128x10x16xf32>
117+
// CHECK: %[[VAL_31:.*]] = "onnx.Transpose"(%[[VAL_25]]) {perm = [2, 3, 0, 1]} : (tensor<128x32x3x3xi8>) -> tensor<3x3x128x32xi8>
118+
// CHECK: %[[VAL_32:.*]] = "onnx.ReverseSequence"(%[[VAL_31]], %[[VAL_20]]) {batch_axis = 1 : si64, time_axis = 0 : si64} : (tensor<3x3x128x32xi8>, tensor<3xi64>) -> tensor<3x3x128x32xi8>
119+
// CHECK: %[[VAL_33:.*]] = "onnx.ReverseSequence"(%[[VAL_32]], %[[VAL_20]]) {batch_axis = 0 : si64, time_axis = 1 : si64} : (tensor<3x3x128x32xi8>, tensor<3xi64>) -> tensor<3x3x128x32xi8>
120+
// CHECK: %[[VAL_34:.*]] = "onnx.Transpose"(%[[VAL_33]]) {perm = [2, 3, 0, 1]} : (tensor<3x3x128x32xi8>) -> tensor<128x32x3x3xi8>
121+
// CHECK: %[[VAL_35:.*]] = "onnx.Transpose"(%[[VAL_34]]) {perm = [1, 0, 2, 3]} : (tensor<128x32x3x3xi8>) -> tensor<32x128x3x3xi8>
122+
// CHECK: %[[VAL_36:.*]] = "onnx.Pad"(%[[VAL_35]], %[[VAL_19]], %[[VAL_18]], %[[VAL_17]]) {mode = "constant"} : (tensor<32x128x3x3xi8>, tensor<8xi64>, tensor<i8>, none) -> tensor<32x128x4x4xi8>
123+
// CHECK: %[[VAL_37:.*]] = "onnx.Slice"(%[[VAL_36]], %[[VAL_14]], %[[VAL_13]], %[[VAL_16]], %[[VAL_15]]) : (tensor<32x128x4x4xi8>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<32x128x2x2xi8>
124+
// CHECK: %[[VAL_38:.*]] = "onnx.Slice"(%[[VAL_36]], %[[VAL_12]], %[[VAL_11]], %[[VAL_16]], %[[VAL_15]]) : (tensor<32x128x4x4xi8>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<32x128x2x2xi8>
125+
// CHECK: %[[VAL_39:.*]] = "onnx.Slice"(%[[VAL_36]], %[[VAL_10]], %[[VAL_9]], %[[VAL_16]], %[[VAL_15]]) : (tensor<32x128x4x4xi8>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<32x128x2x2xi8>
126+
// CHECK: %[[VAL_40:.*]] = "onnx.Slice"(%[[VAL_36]], %[[VAL_8]], %[[VAL_7]], %[[VAL_16]], %[[VAL_15]]) : (tensor<32x128x4x4xi8>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<32x128x2x2xi8>
127+
// CHECK: %[[VAL_41:.*]] = "onnx.DequantizeLinear"(%[[VAL_40]], %[[VAL_23]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
128+
// CHECK: %[[VAL_42:.*]] = "onnx.Conv"(%[[VAL_30]], %[[VAL_41]], %[[VAL_28]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
129+
// CHECK: %[[VAL_43:.*]] = "onnx.DequantizeLinear"(%[[VAL_37]], %[[VAL_23]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
130+
// CHECK: %[[VAL_44:.*]] = "onnx.Conv"(%[[VAL_30]], %[[VAL_43]], %[[VAL_28]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
131+
// CHECK: %[[VAL_45:.*]] = "onnx.DequantizeLinear"(%[[VAL_38]], %[[VAL_23]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
132+
// CHECK: %[[VAL_46:.*]] = "onnx.Conv"(%[[VAL_30]], %[[VAL_45]], %[[VAL_28]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
133+
// CHECK: %[[VAL_47:.*]] = "onnx.DequantizeLinear"(%[[VAL_39]], %[[VAL_23]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
134+
// CHECK: %[[VAL_48:.*]] = "onnx.Conv"(%[[VAL_30]], %[[VAL_47]], %[[VAL_28]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
135+
// CHECK: %[[VAL_49:.*]] = "onnx.Slice"(%[[VAL_42]], %[[VAL_8]], %[[VAL_6]], %[[VAL_16]], %[[VAL_8]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
136+
// CHECK: %[[VAL_50:.*]] = "onnx.Slice"(%[[VAL_44]], %[[VAL_14]], %[[VAL_5]], %[[VAL_16]], %[[VAL_8]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
137+
// CHECK: %[[VAL_51:.*]] = "onnx.Slice"(%[[VAL_46]], %[[VAL_12]], %[[VAL_4]], %[[VAL_16]], %[[VAL_8]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
138+
// CHECK: %[[VAL_52:.*]] = "onnx.Slice"(%[[VAL_48]], %[[VAL_10]], %[[VAL_3]], %[[VAL_16]], %[[VAL_8]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
139+
// CHECK: %[[VAL_53:.*]] = "onnx.Concat"(%[[VAL_50]], %[[VAL_52]], %[[VAL_51]], %[[VAL_49]]) {axis = 1 : si64} : (tensor<1x32x10x16xf32>, tensor<1x32x10x16xf32>, tensor<1x32x10x16xf32>, tensor<1x32x10x16xf32>) -> tensor<1x128x10x16xf32>
140+
// CHECK: %[[VAL_54:.*]] = "onnx.Reshape"(%[[VAL_53]], %[[VAL_2]]) {allowzero = 0 : si64} : (tensor<1x128x10x16xf32>, tensor<5xi64>) -> tensor<2x2x32x10x16xf32>
141+
// CHECK: %[[VAL_55:.*]] = "onnx.Transpose"(%[[VAL_54]]) {perm = [2, 3, 0, 4, 1]} : (tensor<2x2x32x10x16xf32>) -> tensor<32x10x2x16x2xf32>
142+
// CHECK: %[[VAL_56:.*]] = "onnx.Reshape"(%[[VAL_55]], %[[VAL_1]]) {allowzero = 0 : si64} : (tensor<32x10x2x16x2xf32>, tensor<4xi64>) -> tensor<1x32x20x32xf32>
143+
// CHECK: %[[VAL_57:.*]] = "onnx.QuantizeLinear"(%[[VAL_56]], %[[VAL_21]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64, output_dtype = 0 : si64, saturate = 1 : si64} : (tensor<1x32x20x32xf32>, tensor<f32>, tensor<i8>) -> tensor<1x32x20x32xi8>
144+
// CHECK: %[[VAL_58:.*]] = "onnx.DequantizeLinear"(%[[VAL_57]], %[[VAL_21]], %[[VAL_24]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<1x32x20x32xi8>, tensor<f32>, tensor<i8>) -> tensor<1x32x20x32xf32>
145+
// CHECK: onnx.Return %[[VAL_58]] : tensor<1x32x20x32xf32>
146+
// CHECK: }
147+
// CONSTPROP-LABEL: func.func @test_convtrans_4phase_pads_0011(
148+
// CONSTPROP-SAME: %[[VAL_0:.*]]: tensor<1x128x10x16xf32>) -> tensor<1x32x20x32xf32> {
149+
// CONSTPROP: %[[VAL_1:.*]] = onnx.Constant dense<{{.*}}> : tensor<32x128x2x2xi8>
150+
// CONSTPROP: %[[VAL_2:.*]] = onnx.Constant dense<{{.*}}> : tensor<32x128x2x2xi8>
151+
// CONSTPROP: %[[VAL_3:.*]] = onnx.Constant dense<{{.*}}> : tensor<32x128x2x2xi8>
152+
// CONSTPROP: %[[VAL_4:.*]] = onnx.Constant dense<2> : tensor<32x128x2x2xi8>
153+
// CONSTPROP: %[[VAL_5:.*]] = onnx.Constant dense<[1, 32, 20, 32]> : tensor<4xi64>
154+
// CONSTPROP: %[[VAL_6:.*]] = onnx.Constant dense<[2, 2, 32, 10, 16]> : tensor<5xi64>
155+
// CONSTPROP: %[[VAL_7:.*]] = onnx.Constant dense<[10, 18]> : tensor<2xi64>
156+
// CONSTPROP: %[[VAL_8:.*]] = onnx.Constant dense<[12, 16]> : tensor<2xi64>
157+
// CONSTPROP: %[[VAL_9:.*]] = onnx.Constant dense<[10, 16]> : tensor<2xi64>
158+
// CONSTPROP: %[[VAL_10:.*]] = onnx.Constant dense<[12, 18]> : tensor<2xi64>
159+
// CONSTPROP: %[[VAL_11:.*]] = onnx.Constant dense<1> : tensor<2xi64>
160+
// CONSTPROP: %[[VAL_12:.*]] = onnx.Constant dense<[0, 1]> : tensor<2xi64>
161+
// CONSTPROP: %[[VAL_13:.*]] = onnx.Constant dense<[1, 0]> : tensor<2xi64>
162+
// CONSTPROP: %[[VAL_14:.*]] = onnx.Constant dense<0> : tensor<2xi64>
163+
// CONSTPROP: %[[VAL_15:.*]] = onnx.Constant dense<[2, 3]> : tensor<2xi64>
164+
// CONSTPROP: %[[VAL_16:.*]] = onnx.Constant dense<5.000000e-01> : tensor<f32>
165+
// CONSTPROP: %[[VAL_17:.*]] = onnx.Constant dense<1.000000e+00> : tensor<f32>
166+
// CONSTPROP: %[[VAL_18:.*]] = onnx.Constant dense<1.22070313E-4> : tensor<f32>
167+
// CONSTPROP: %[[VAL_19:.*]] = onnx.Constant dense<2> : tensor<i8>
168+
// CONSTPROP: %[[VAL_20:.*]] = onnx.Constant dense<3.125000e-02> : tensor<f32>
169+
// CONSTPROP: %[[VAL_21:.*]] = onnx.Constant dense<2> : tensor<32xi8>
170+
// CONSTPROP: %[[VAL_22:.*]] = "onnx.DequantizeLinear"(%[[VAL_21]], %[[VAL_20]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32xi8>, tensor<f32>, tensor<i8>) -> tensor<32xf32>
171+
// CONSTPROP: %[[VAL_23:.*]] = "onnx.QuantizeLinear"(%[[VAL_0]], %[[VAL_17]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64, output_dtype = 0 : si64, saturate = 1 : si64} : (tensor<1x128x10x16xf32>, tensor<f32>, tensor<i8>) -> tensor<1x128x10x16xi8>
172+
// CONSTPROP: %[[VAL_24:.*]] = "onnx.DequantizeLinear"(%[[VAL_23]], %[[VAL_17]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<1x128x10x16xi8>, tensor<f32>, tensor<i8>) -> tensor<1x128x10x16xf32>
173+
// CONSTPROP: %[[VAL_25:.*]] = "onnx.DequantizeLinear"(%[[VAL_1]], %[[VAL_18]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
174+
// CONSTPROP: %[[VAL_26:.*]] = "onnx.Conv"(%[[VAL_24]], %[[VAL_25]], %[[VAL_22]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
175+
// CONSTPROP: %[[VAL_27:.*]] = "onnx.DequantizeLinear"(%[[VAL_4]], %[[VAL_18]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
176+
// CONSTPROP: %[[VAL_28:.*]] = "onnx.Conv"(%[[VAL_24]], %[[VAL_27]], %[[VAL_22]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
177+
// CONSTPROP: %[[VAL_29:.*]] = "onnx.DequantizeLinear"(%[[VAL_3]], %[[VAL_18]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
178+
// CONSTPROP: %[[VAL_30:.*]] = "onnx.Conv"(%[[VAL_24]], %[[VAL_29]], %[[VAL_22]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
179+
// CONSTPROP: %[[VAL_31:.*]] = "onnx.DequantizeLinear"(%[[VAL_2]], %[[VAL_18]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<32x128x2x2xi8>, tensor<f32>, tensor<i8>) -> tensor<32x128x2x2xf32>
180+
// CONSTPROP: %[[VAL_32:.*]] = "onnx.Conv"(%[[VAL_24]], %[[VAL_31]], %[[VAL_22]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<1x128x10x16xf32>, tensor<32x128x2x2xf32>, tensor<32xf32>) -> tensor<1x32x11x17xf32>
181+
// CONSTPROP: %[[VAL_33:.*]] = "onnx.Slice"(%[[VAL_26]], %[[VAL_11]], %[[VAL_10]], %[[VAL_15]], %[[VAL_11]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
182+
// CONSTPROP: %[[VAL_34:.*]] = "onnx.Slice"(%[[VAL_28]], %[[VAL_14]], %[[VAL_9]], %[[VAL_15]], %[[VAL_11]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
183+
// CONSTPROP: %[[VAL_35:.*]] = "onnx.Slice"(%[[VAL_30]], %[[VAL_13]], %[[VAL_8]], %[[VAL_15]], %[[VAL_11]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
184+
// CONSTPROP: %[[VAL_36:.*]] = "onnx.Slice"(%[[VAL_32]], %[[VAL_12]], %[[VAL_7]], %[[VAL_15]], %[[VAL_11]]) : (tensor<1x32x11x17xf32>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<1x32x10x16xf32>
185+
// CONSTPROP: %[[VAL_37:.*]] = "onnx.Concat"(%[[VAL_34]], %[[VAL_36]], %[[VAL_35]], %[[VAL_33]]) {axis = 1 : si64} : (tensor<1x32x10x16xf32>, tensor<1x32x10x16xf32>, tensor<1x32x10x16xf32>, tensor<1x32x10x16xf32>) -> tensor<1x128x10x16xf32>
186+
// CONSTPROP: %[[VAL_38:.*]] = "onnx.Reshape"(%[[VAL_37]], %[[VAL_6]]) {allowzero = 0 : si64} : (tensor<1x128x10x16xf32>, tensor<5xi64>) -> tensor<2x2x32x10x16xf32>
187+
// CONSTPROP: %[[VAL_39:.*]] = "onnx.Transpose"(%[[VAL_38]]) {perm = [2, 3, 0, 4, 1]} : (tensor<2x2x32x10x16xf32>) -> tensor<32x10x2x16x2xf32>
188+
// CONSTPROP: %[[VAL_40:.*]] = "onnx.Reshape"(%[[VAL_39]], %[[VAL_5]]) {allowzero = 0 : si64} : (tensor<32x10x2x16x2xf32>, tensor<4xi64>) -> tensor<1x32x20x32xf32>
189+
// CONSTPROP: %[[VAL_41:.*]] = "onnx.QuantizeLinear"(%[[VAL_40]], %[[VAL_16]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64, output_dtype = 0 : si64, saturate = 1 : si64} : (tensor<1x32x20x32xf32>, tensor<f32>, tensor<i8>) -> tensor<1x32x20x32xi8>
190+
// CONSTPROP: %[[VAL_42:.*]] = "onnx.DequantizeLinear"(%[[VAL_41]], %[[VAL_16]], %[[VAL_19]]) {axis = 1 : si64, block_size = 0 : si64} : (tensor<1x32x20x32xi8>, tensor<f32>, tensor<i8>) -> tensor<1x32x20x32xf32>
191+
// CONSTPROP: onnx.Return %[[VAL_42]] : tensor<1x32x20x32xf32>
192+
// CONSTPROP: }
193+
}
194+
195+
// -----
196+
69197
func.func @test_convtrans_stride11_with_relu(%arg0: tensor<1x1x12x44xf32>, %arg1: tensor<1x1x4x16xf32>) -> tensor<1x1x13x57xf32> {
70198
%0 = onnx.Constant dense<5.000000e-01> : tensor<f32>
71199
%1 = onnx.Constant dense<1.000000e+00> : tensor<f32>

0 commit comments

Comments
 (0)