@@ -1329,6 +1329,78 @@ func.func @test_convtranspose(%arg0: !torch.vtensor<[1,1,3,3],f32>, %arg1: !torc
13291329
13301330// -----
13311331
1332+ // CHECK-LABEL: @test_convtranspose_autopad_same_upper
1333+ func.func @test_convtranspose_autopad_same_upper (%arg0: !torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, %arg1: !torch.vtensor <[1 ,2 ,4 ,4 ],f32 >) -> !torch.vtensor <[1 ,2 ,6 ,6 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " user-test" , torch.onnx_meta.producer_version = " " } {
1334+ // CHECK: %[[C1:.*]] = torch.constant.int 1
1335+ // CHECK: %[[C1_0:.*]] = torch.constant.int 1
1336+ // CHECK: %[[C1_1:.*]] = torch.constant.int 1
1337+ // CHECK: %[[C1_2:.*]] = torch.constant.int 1
1338+ // CHECK: %[[C2:.*]] = torch.constant.int 2
1339+ // CHECK: %[[C2_3:.*]] = torch.constant.int 2
1340+ // CHECK: %[[C0:.*]] = torch.constant.int 0
1341+ // CHECK: %[[C0_4:.*]] = torch.constant.int 0
1342+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[C1]], %[[C1_0]] : (!torch.int, !torch.int) -> !torch.list<int>
1343+ // CHECK: %[[DILATIONS:.*]] = torch.prim.ListConstruct %[[C1_1]], %[[C1_2]] : (!torch.int, !torch.int) -> !torch.list<int>
1344+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[C2]], %[[C2_3]] : (!torch.int, !torch.int) -> !torch.list<int>
1345+ // CHECK: %[[OUTPUT_PADDING:.*]] = torch.prim.ListConstruct %[[C0]], %[[C0_4]] : (!torch.int, !torch.int) -> !torch.list<int>
1346+ // CHECK: %[[TRANSPOSED:.*]] = torch.constant.bool true
1347+ // CHECK: %[[BIAS:.*]] = torch.constant.none
1348+ // CHECK: %[[GROUPS:.*]] = torch.constant.int 1
1349+ // CHECK: torch.aten.convolution %arg0, %arg1, %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],f32>, !torch.vtensor<[1,2,4,4],f32>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2,6,6],f32>
1350+ %4 = torch.operator " onnx.ConvTranspose" (%arg0 , %arg1 ) {torch.onnx.auto_pad =" SAME_UPPER" , torch.onnx.strides = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, !torch.vtensor <[1 ,2 ,4 ,4 ],f32 >) -> !torch.vtensor <[1 ,2 ,6 ,6 ],f32 >
1351+ return %4 : !torch.vtensor <[1 ,2 ,6 ,6 ],f32 >
1352+ }
1353+
1354+ // -----
1355+
1356+ // CHECK-LABEL: @test_convtranspose_autopad_same_lower
1357+ func.func @test_convtranspose_autopad_same_lower (%arg0: !torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, %arg1: !torch.vtensor <[1 ,2 ,4 ,4 ],f32 >) -> !torch.vtensor <[1 ,2 ,6 ,6 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " user-test" , torch.onnx_meta.producer_version = " " } {
1358+ // CHECK: %[[C1:.*]] = torch.constant.int 1
1359+ // CHECK: %[[C1_0:.*]] = torch.constant.int 1
1360+ // CHECK: %[[C1_1:.*]] = torch.constant.int 1
1361+ // CHECK: %[[C1_2:.*]] = torch.constant.int 1
1362+ // CHECK: %[[C2:.*]] = torch.constant.int 2
1363+ // CHECK: %[[C2_3:.*]] = torch.constant.int 2
1364+ // CHECK: %[[C0:.*]] = torch.constant.int 0
1365+ // CHECK: %[[C0_4:.*]] = torch.constant.int 0
1366+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[C1]], %[[C1_0]] : (!torch.int, !torch.int) -> !torch.list<int>
1367+ // CHECK: %[[DILATIONS:.*]] = torch.prim.ListConstruct %[[C1_1]], %[[C1_2]] : (!torch.int, !torch.int) -> !torch.list<int>
1368+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[C2]], %[[C2_3]] : (!torch.int, !torch.int) -> !torch.list<int>
1369+ // CHECK: %[[OUTPUT_PADDING:.*]] = torch.prim.ListConstruct %[[C0]], %[[C0_4]] : (!torch.int, !torch.int) -> !torch.list<int>
1370+ // CHECK: %[[TRANSPOSED:.*]] = torch.constant.bool true
1371+ // CHECK: %[[BIAS:.*]] = torch.constant.none
1372+ // CHECK: %[[GROUPS:.*]] = torch.constant.int 1
1373+ // CHECK: torch.aten.convolution %arg0, %arg1, %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],f32>, !torch.vtensor<[1,2,4,4],f32>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2,6,6],f32>
1374+ %4 = torch.operator " onnx.ConvTranspose" (%arg0 , %arg1 ) {torch.onnx.auto_pad =" SAME_LOWER" , torch.onnx.strides = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, !torch.vtensor <[1 ,2 ,4 ,4 ],f32 >) -> !torch.vtensor <[1 ,2 ,6 ,6 ],f32 >
1375+ return %4 : !torch.vtensor <[1 ,2 ,6 ,6 ],f32 >
1376+ }
1377+
1378+ // -----
1379+
1380+ // CHECK-LABEL: @test_convtranspose_autopad_valid
1381+ func.func @test_convtranspose_autopad_valid (%arg0: !torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, %arg1: !torch.vtensor <[1 ,2 ,4 ,4 ],f32 >) -> !torch.vtensor <[1 ,2 ,8 ,8 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " user-test" , torch.onnx_meta.producer_version = " " } {
1382+ // CHECK: %[[C0:.*]] = torch.constant.int 0
1383+ // CHECK: %[[C0_0:.*]] = torch.constant.int 0
1384+ // CHECK: %[[C1:.*]] = torch.constant.int 1
1385+ // CHECK: %[[C1_1:.*]] = torch.constant.int 1
1386+ // CHECK: %[[C2:.*]] = torch.constant.int 2
1387+ // CHECK: %[[C2_2:.*]] = torch.constant.int 2
1388+ // CHECK: %[[C0_3:.*]] = torch.constant.int 0
1389+ // CHECK: %[[C0_4:.*]] = torch.constant.int 0
1390+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[C0]], %[[C0_0]] : (!torch.int, !torch.int) -> !torch.list<int>
1391+ // CHECK: %[[DILATIONS:.*]] = torch.prim.ListConstruct %[[C1]], %[[C1_1]] : (!torch.int, !torch.int) -> !torch.list<int>
1392+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[C2]], %[[C2_2]] : (!torch.int, !torch.int) -> !torch.list<int>
1393+ // CHECK: %[[OUTPUT_PADDING:.*]] = torch.prim.ListConstruct %[[C0_3]], %[[C0_4]] : (!torch.int, !torch.int) -> !torch.list<int>
1394+ // CHECK: %[[TRANSPOSED:.*]] = torch.constant.bool true
1395+ // CHECK: %[[BIAS:.*]] = torch.constant.none
1396+ // CHECK: %[[GROUPS:.*]] = torch.constant.int 1
1397+ // CHECK: torch.aten.convolution %arg0, %arg1, %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],f32>, !torch.vtensor<[1,2,4,4],f32>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2,8,8],f32>
1398+ %4 = torch.operator " onnx.ConvTranspose" (%arg0 , %arg1 ) {torch.onnx.auto_pad =" VALID" , torch.onnx.strides = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, !torch.vtensor <[1 ,2 ,4 ,4 ],f32 >) -> !torch.vtensor <[1 ,2 ,8 ,8 ],f32 >
1399+ return %4 : !torch.vtensor <[1 ,2 ,8 ,8 ],f32 >
1400+ }
1401+
1402+ // -----
1403+
13321404// CHECK-LABEL: @test_batchnorm_epsilon
13331405func.func @test_batchnorm_epsilon (%arg0: !torch.vtensor <[2 ,3 ,4 ,5 ],f32 >, %arg1: !torch.vtensor <[3 ],f32 >, %arg2: !torch.vtensor <[3 ],f32 >, %arg3: !torch.vtensor <[3 ],f32 >, %arg4: !torch.vtensor <[3 ],f32 >) -> !torch.vtensor <[2 ,3 ,4 ,5 ],f32 > attributes {torch.onnx_meta.ir_version = 8 : si64 , torch.onnx_meta.opset_version = 15 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
13341406 // CHECK: %[[FALSE:.*]] = torch.constant.bool false
0 commit comments