@@ -325,6 +325,46 @@ def test_shape_qlinear_softmax(self):
325325 [1 , 32 ],
326326 "Wrong shape inferred for quantized network output" )
327327
328+ def test_shape_qlinear_conv_transpose (self ):
329+ model = self .get_model (
330+ helper .make_node (
331+ "QLinearConvTranspose" ,
332+ inputs = [
333+ "input" ,
334+ "input_scale" ,
335+ "input_zero_point" ,
336+ "conv_transpose_wt_quantized" ,
337+ "weight_scale" ,
338+ "weight_zero_point" ,
339+ "conv_transpose_out_scale" ,
340+ "conv_transpose_out_zero_point" ,
341+ "conv_transpose_bias"
342+ ],
343+ outputs = ["output" ],
344+ name = "quant_node" ,
345+ domain = "com.microsoft" ,
346+ auto_pad = b'NOTSET' ,
347+ dilations = [1 , 1 ],
348+ group = 1 ,
349+ kernel_shape = [2 , 2 ],
350+ pads = [0 , 0 , 0 , 0 ],
351+ strides = [2 , 2 ]
352+ ),
353+ [1 , 32 , 14 , 14 ],
354+ [
355+ numpy_helper .from_array (np .array (0.007874015718698502 , dtype = "float32" ), name = "input_scale" ),
356+ numpy_helper .from_array (np .array (0 , dtype = "int8" ), name = "input_zero_point" ),
357+ numpy_helper .from_array (np .ones ([32 , 64 , 2 , 2 ]).astype ("int8" ), name = "conv_transpose_wt_quantized" ),
358+ numpy_helper .from_array (np .array (0.007874015718698502 , dtype = "float32" ), name = "weight_scale" ),
359+ numpy_helper .from_array (np .array (0 , dtype = "int8" ), name = "weight_zero_point" ),
360+ numpy_helper .from_array (np .array (0.007874015718698502 , dtype = "float32" ), name = "conv_transpose_out_scale" ),
361+ numpy_helper .from_array (np .array (0 , dtype = "int8" ), name = "conv_transpose_out_zero_point" ),
362+ numpy_helper .from_array (np .ones ([64 ]).astype ("int32" ), name = "conv_transpose_bias" ),
363+ ]
364+ )
365+ self .assertEqual (self .infer_out_shape (model ),
366+ [1 , 64 , 28 , 28 ],
367+ "Wrong shape inferred for quantized network output" )
328368
329369if __name__ == "__main__" :
330370 unittest .main ()
0 commit comments