@@ -26,7 +26,7 @@ def symbolic_fn(g, input, output_size, *args):
2626 if scales is None :
2727 if 'ONNX_BACKEND' in os .environ and os .environ [
2828 'ONNX_BACKEND' ] == 'TensorRT' :
29- input_size = input .type ().sizes ()
29+ input_size = input .scalar_type ().sizes ()
3030 # slice the first two dim
3131 input_size = input_size [:2 ]
3232 # convert output_size to int type
@@ -132,13 +132,13 @@ def constant_pad_nd(g, input, padding, value=None):
132132 mode = 'constant'
133133 value = sym_help ._maybe_get_scalar (value )
134134 value = sym_help ._if_scalar_type_as (g , value , input )
135- pad = _prepare_onnx_paddings (g , input .type ().dim (), padding )
135+ pad = _prepare_onnx_paddings (g , input .scalar_type ().dim (), padding )
136136 return g .op ('Pad' , input , pad , value , mode_s = mode )
137137
138138
139139def reflection_pad (g , input , padding ):
140140 mode = 'reflect'
141- paddings = _prepare_onnx_paddings (g , input .type ().dim (), padding )
141+ paddings = _prepare_onnx_paddings (g , input .scalar_type ().dim (), padding )
142142 return g .op ('Pad' , input , paddings , mode_s = mode )
143143
144144
@@ -294,7 +294,7 @@ def one_hot(g, self, num_classes):
294294
295295@parse_args ('v' , 'i' , 'none' )
296296def softmax (g , input , dim , dtype = None ):
297- input_dim = input .type ().dim ()
297+ input_dim = input .scalar_type ().dim ()
298298 if input_dim :
299299 # TODO: remove this as onnx opset 11 spec allows negative axes
300300 if dim < 0 :
@@ -332,7 +332,7 @@ def symbolic_fn(g, input, output_size):
332332 return g .op ('GlobalMaxPool' , input ), None
333333 raise NotImplementedError (
334334 '[Adaptive pool]:input size not accessible' )
335- dim = input .type ().sizes ()[2 :]
335+ dim = input .scalar_type ().sizes ()[2 :]
336336 if output_size == [1 ] * len (output_size ) and type == 'MaxPool' :
337337 return g .op ('GlobalMaxPool' , input ), None
338338
@@ -375,7 +375,7 @@ def new_full(g,
375375 pin_memory = False ):
376376 from torch .onnx .symbolic_opset9 import full
377377 if dtype is None and self .isCompleteTensor ():
378- dtype = self .type ().scalarType ()
378+ dtype = self .scalar_type ().scalarType ()
379379 dtype = sym_help .scalar_type_to_onnx .index (
380380 sym_help .cast_pytorch_to_onnx [dtype ])
381381 return full (g , size , fill_value , dtype , layout , device , pin_memory )
0 commit comments