@@ -604,7 +604,7 @@ def relu6_op(ctx, node, name, args):
604
604
# since onnx does not have relu6, compose it with multiple ops.
605
605
old_output = node .output [0 ]
606
606
dtype = ctx .get_dtype (node .input [0 ])
607
- dtype = utils .ONNX_TO_NUMPY_DTYPE [ dtype ] if dtype else np .float32
607
+ dtype = utils .map_onnx_to_numpy_type ( dtype ) if dtype else np .float32
608
608
shape = ctx .get_shape (node .input [0 ])
609
609
if - 1 in shape :
610
610
# if the shape has unknown dims we need to do something like this for opset < 8 (=no broadcast for min/max):
@@ -651,7 +651,7 @@ def relu6_op8(ctx, node, name, args):
651
651
# since onnx does not have relu6, compose it with multiple ops.
652
652
old_output = node .output [0 ]
653
653
dtype = ctx .get_dtype (node .input [0 ])
654
- dtype = utils .ONNX_TO_NUMPY_DTYPE [ dtype ] if dtype else np .float32
654
+ dtype = utils .map_onnx_to_numpy_type ( dtype ) if dtype else np .float32
655
655
node .type = "Max"
656
656
# const tensor 6
657
657
six_name = utils .make_name (node .name )
@@ -687,7 +687,7 @@ def sign_op(ctx, node, name, args):
687
687
utils .make_sure (node_dtype , "Dtype of {} is None" .format (node .name ))
688
688
if node_dtype in [onnx_pb .TensorProto .COMPLEX64 , onnx_pb .TensorProto .COMPLEX128 ]:
689
689
raise ValueError ("dtype " + node_dtype + " is not supported in onnx for now" )
690
- input_tensor_type = utils .ONNX_TO_NUMPY_DTYPE [ node_dtype ]
690
+ input_tensor_type = utils .map_onnx_to_numpy_type ( node_dtype )
691
691
zero_name = utils .make_name ("{}_zero" .format (node .name ))
692
692
ctx .make_const (zero_name , np .array (0 , dtype = input_tensor_type ))
693
693
greater_node = ctx .make_node ("Greater" , [node .input [0 ], zero_name ])
@@ -1280,7 +1280,7 @@ def fused_batchnorm_op7(ctx, node, name, args):
1280
1280
scale_shape = ctx .get_shape (node .input [1 ])
1281
1281
mean_shape = ctx .get_shape (node .input [3 ])
1282
1282
var_shape = ctx .get_shape (node .input [4 ])
1283
- val_type = utils .ONNX_TO_NUMPY_DTYPE [ ctx .get_dtype (node .input [1 ])]
1283
+ val_type = utils .map_onnx_to_numpy_type ( ctx .get_dtype (node .input [1 ]))
1284
1284
1285
1285
if mean_shape != scale_shape :
1286
1286
new_mean_value = np .array (np .resize (node .inputs [3 ].get_tensor_value (as_list = False ), scale_shape ),
@@ -1387,11 +1387,11 @@ def fill_op(ctx, node, name, args):
1387
1387
# In onnx the value is an attribute so we need to fetch the value as const which
1388
1388
# sooner or later will be a problem for tensorflow-onnx.
1389
1389
# ConstantOfShape in onnxruntime only support int64, so insert cast op
1390
- input_dtype_is_int64 = utils .ONNX_TO_NUMPY_DTYPE [ ctx .get_dtype (node .input [0 ])] == np .int64
1390
+ input_dtype_is_int64 = utils .map_onnx_to_numpy_type ( ctx .get_dtype (node .input [0 ])) == np .int64
1391
1391
if not input_dtype_is_int64 :
1392
1392
cast_node = ctx .insert_new_node_on_input (node , "Cast" , node .input [0 ], to = onnx_pb .TensorProto .INT64 )
1393
1393
dtype = ctx .get_dtype (node .output [0 ])
1394
- value = np .array ([node .inputs [1 ].get_tensor_value ()]).astype (utils .ONNX_TO_NUMPY_DTYPE [ dtype ] )
1394
+ value = np .array ([node .inputs [1 ].get_tensor_value ()]).astype (utils .map_onnx_to_numpy_type ( dtype ) )
1395
1395
value_proto = numpy_helper .from_array (value )
1396
1396
node .set_attr ("value" , value_proto )
1397
1397
del node .input [1 ]
@@ -1601,7 +1601,7 @@ def zeroslike_op(ctx, node, name, args):
1601
1601
# when params "dtype" used, tf will call another op "Fill" instead, so Cast is not needed here.
1602
1602
input_dtype = ctx .get_dtype (node .input [0 ])
1603
1603
node_name = utils .make_name ("zero" )
1604
- const_zero = ctx .make_const (node_name , np .array (0 ).astype (utils .ONNX_TO_NUMPY_DTYPE [ input_dtype ] ))
1604
+ const_zero = ctx .make_const (node_name , np .array (0 ).astype (utils .map_onnx_to_numpy_type ( input_dtype ) ))
1605
1605
shapes = node .output_shapes
1606
1606
dtypes = node .output_dtypes
1607
1607
ctx .remove_node (name )
@@ -2051,15 +2051,15 @@ def rewrite_constant_fold(g, ops):
2051
2051
log .info ("folding node type=%s, name=%s" % (op .type , op .name ))
2052
2052
if op .type == "Cast" :
2053
2053
dst = op .get_attr_int ("to" )
2054
- np_type = tf2onnx .utils .ONNX_TO_NUMPY_DTYPE [ dst ]
2054
+ np_type = tf2onnx .utils .map_onnx_to_numpy_type ( dst )
2055
2055
val = np .cast [np_type ](* inputs )
2056
2056
elif op .type == "ConcatV2" :
2057
2057
axis = inputs [- 1 ]
2058
2058
values = inputs [:- 1 ]
2059
2059
val = func (tuple (values ), axis )
2060
2060
elif op .type == "ListDiff" :
2061
2061
out_type = op .get_attr_int ("out_idx" )
2062
- np_type = tf2onnx .utils .ONNX_TO_NUMPY_DTYPE [ out_type ]
2062
+ np_type = tf2onnx .utils .map_onnx_to_numpy_type ( out_type )
2063
2063
val = func (* inputs )
2064
2064
val = val .astype (np_type )
2065
2065
elif op .type in ["Pack" ]:
@@ -2068,7 +2068,7 @@ def rewrite_constant_fold(g, ops):
2068
2068
val = func (inputs , axis = axis )
2069
2069
elif op .type == "Range" :
2070
2070
dtype = op .get_attr_int ("Tidx" )
2071
- np_type = tf2onnx .utils .ONNX_TO_NUMPY_DTYPE [ dtype ]
2071
+ np_type = tf2onnx .utils .map_onnx_to_numpy_type ( dtype )
2072
2072
val = func (* inputs , dtype = np_type )
2073
2073
else :
2074
2074
val = func (* inputs )
0 commit comments