@@ -3712,3 +3712,30 @@ func.func @test_qlinearadd(%arg0: !torch.vtensor<[1,4096],ui8>, %arg1: !torch.vt
37123712 // CHECK: return %[[OUT]]
37133713 return %0 : !torch.vtensor <[1 ,4096 ],ui8 >
37143714}
3715+
3716+ // -----
3717+
3718+ // CHECK-LABEL: @test_qlinearleakyrelu(
3719+ // CHECK-SAME: %[[X:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[?,32,?,?],ui8>,
3720+ // CHECK-SAME: %[[X_SCALE:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],f32>,
3721+ // CHECK-SAME: %[[X_ZERO_POINT:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],ui8>,
3722+ // CHECK-SAME: %[[Y_SCALE:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],f32>,
3723+ // CHECK-SAME: %[[Y_ZERO_POINT:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[],ui8>) -> !torch.vtensor<[?,32,?,?],ui8>
3724+ func.func @test_qlinearleakyrelu (%arg0: !torch.vtensor <[?,32 ,?,?],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[],f32 >, %arg4: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[?,32 ,?,?],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
3725+ %0 = torch.operator " onnx.QLinearLeakyRelu" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 ) {torch.onnx.alpha = 1.000000e-01 : f32 } : (!torch.vtensor <[?,32 ,?,?],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[?,32 ,?,?],ui8 >
3726+ // CHECK-DAG: %[[EMPTY:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
3727+ // CHECK-DAG: %[[XSCALE:.+]] = torch.aten.item %[[X_SCALE]] : !torch.vtensor<[],f32> -> !torch.float
3728+ // CHECK-DAG: %[[XZP:.+]] = torch.aten.item %[[X_ZERO_POINT]] : !torch.vtensor<[],ui8> -> !torch.int
3729+ // CHECK-DAG: %[[EMPTY_0:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
3730+ // CHECK-DAG: %[[YSCALE:.+]] = torch.aten.item %[[Y_SCALE]] : !torch.vtensor<[],f32> -> !torch.float
3731+ // CHECK-DAG: %[[YZP:.+]] = torch.aten.item %[[Y_ZERO_POINT]] : !torch.vtensor<[],ui8> -> !torch.int
3732+ // CHECK-DAG: %[[X_QUANT:.+]] = torch.aten._make_per_tensor_quantized_tensor %[[X]], %[[XSCALE]], %[[XZP]] : !torch.vtensor<[?,32,?,?],ui8>, !torch.float, !torch.int -> !torch.vtensor<[?,32,?,?],!torch.quint8>
3733+ // CHECK: %[[X_F32:.+]] = torch.aten.dequantize.self %[[X_QUANT]] : !torch.vtensor<[?,32,?,?],!torch.quint8> -> !torch.vtensor<[?,32,?,?],f32>
3734+ // CHECK: %[[ALPHA:.+]] = torch.constant.float 0.10000000149011612
3735+ // CHECK: %[[LEAKYRELU:.+]] = torch.aten.leaky_relu %[[X_F32]], %[[ALPHA]] : !torch.vtensor<[?,32,?,?],f32>, !torch.float -> !torch.vtensor<[?,32,?,?],f32>
3736+ // CHECK: %[[DTY:.+]] = torch.constant.int 13
3737+ // CHECK: %[[QO:.+]] = torch.aten.quantize_per_tensor %[[LEAKYRELU]], %[[YSCALE]], %[[YZP]], %[[DTY]] : !torch.vtensor<[?,32,?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,32,?,?],!torch.quint8>
3738+ // CHECK: %[[OUT:.+]] = torch.aten.int_repr %[[QO]] : !torch.vtensor<[?,32,?,?],!torch.quint8> -> !torch.vtensor<[?,32,?,?],ui8>
3739+ // CHECK: return %[[OUT]]
3740+ return %0 : !torch.vtensor <[?,32 ,?,?],ui8 >
3741+ }
0 commit comments