@@ -436,6 +436,29 @@ def convert_elementwise_sub(
436
436
layers [scope_name ] = sub ([model0 , model1 ])
437
437
438
438
439
+ def convert_sum (
440
+ params , w_name , scope_name , inputs , layers , weights
441
+ ):
442
+ """
443
+ Convert sum.
444
+
445
+ Args:
446
+ params: dictionary with layer parameters
447
+ w_name: name prefix in state_dict
448
+ scope_name: pytorch scope name
449
+ inputs: pytorch node inputs
450
+ layers: dictionary with keras tensors
451
+ weights: pytorch state_dict
452
+ """
453
+ print ('Converting Sum ...' )
454
+
455
+ def target_layer (x ):
456
+ return keras .backend .sum (x )
457
+
458
+ lambda_layer = keras .layers .Lambda (target_layer )
459
+ layers [scope_name ] = lambda_layer (layers [inputs [0 ]])
460
+
461
+
439
462
def convert_concat (params , w_name , scope_name , inputs , layers , weights ):
440
463
"""
441
464
Convert concatenation.
@@ -469,6 +492,7 @@ def convert_relu(params, w_name, scope_name, inputs, layers, weights):
469
492
"""
470
493
print ('Converting relu ...' )
471
494
495
+ print (w_name , scope_name )
472
496
tf_name = w_name + str (random .random ())
473
497
relu = keras .layers .Activation ('relu' , name = tf_name )
474
498
layers [scope_name ] = relu (layers [inputs [0 ]])
@@ -570,7 +594,6 @@ def convert_selu(params, w_name, scope_name, inputs, layers, weights):
570
594
layers [scope_name ] = selu (layers [inputs [0 ]])
571
595
572
596
573
-
574
597
def convert_transpose (params , w_name , scope_name , inputs , layers , weights ):
575
598
"""
576
599
Convert transpose layer.
@@ -705,7 +728,9 @@ def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights):
705
728
706
729
keepdims = params ['keepdims' ] > 0
707
730
axis = np .array (params ['axes' ])
708
- target_layer = lambda x : keras .backend .sum (x , keepdims = keepdims , axis = axis )
731
+
732
+ def target_layer (x , keepdims = keepdims , axis = axis ):
733
+ return keras .backend .sum (x , keepdims = keepdims , axis = axis )
709
734
710
735
lambda_layer = keras .layers .Lambda (target_layer )
711
736
layers [scope_name ] = lambda_layer (layers [inputs [0 ]])
@@ -725,7 +750,9 @@ def convert_constant(params, w_name, scope_name, inputs, layers, weights):
725
750
"""
726
751
print ('Converting constant ...' )
727
752
728
- target_layer = lambda x : keras .backend .constant (np .float32 (params ['value' ]))
753
+ def target_layer (params = params ):
754
+ return keras .backend .constant (np .float32 (params ['value' ]))
755
+
729
756
lambda_layer = keras .layers .Lambda (target_layer )
730
757
layers [scope_name ] = lambda_layer (layers [inputs [0 ]])
731
758
@@ -782,7 +809,7 @@ def convert_padding(params, w_name, scope_name, inputs, layers, weights):
782
809
padding_name = tf_name + '_pad'
783
810
padding_layer = keras .layers .ZeroPadding2D (
784
811
padding = ((params ['pads' ][2 ], params ['pads' ][6 ]), (params ['pads' ][3 ], params ['pads' ][7 ])),
785
- name = tf_name
812
+ name = padding_name
786
813
)
787
814
788
815
layers [scope_name ] = padding_layer (layers [inputs [0 ]])
@@ -801,6 +828,7 @@ def convert_padding(params, w_name, scope_name, inputs, layers, weights):
801
828
'onnx::Add' : convert_elementwise_add ,
802
829
'onnx::Mul' : convert_elementwise_mul ,
803
830
'onnx::Sub' : convert_elementwise_sub ,
831
+ 'onnx::Sum' : convert_sum ,
804
832
'onnx::Concat' : convert_concat ,
805
833
'onnx::Relu' : convert_relu ,
806
834
'onnx::LeakyRelu' : convert_lrelu ,
0 commit comments