@@ -634,75 +634,11 @@ def pool_op(ctx, node, name, args):
634
634
635
635
def relu6_op (ctx , node , name , args ):
636
636
# relu6 = min(max(features, 0), 6)
637
- # since onnx does not have relu6, compose it with multiple ops.
638
- old_output = node .output [0 ]
639
- dtype = ctx .get_dtype (node .input [0 ])
640
- dtype = utils .ONNX_TO_NUMPY_DTYPE [dtype ] if dtype else np .float32
641
- shape = ctx .get_shape (node .input [0 ])
642
- nodes = []
643
- if - 1 in shape :
644
- # if the shape has unknown dims we need to do something like this for opset < 8 (=no broadcast for min/max):
645
- # tz = sub(features, features)
646
- # t6 = add(6, tz)
647
- # relu6 = min(max(features, t0), t6)
648
- input_node = node .inputs [0 ]
649
- node .type = "Max"
650
-
651
- # const tensor 6
652
- six_name = utils .make_name (node .name )
653
- nodes .append (ctx .make_const (six_name , np .array ([6. ], dtype = dtype )))
654
-
655
- # get a tensor of input shape with zeros
656
- sub_node = ctx .make_node ("Sub" , [node .input [0 ], node .input [0 ]], op_name_scope = input_node .name )
657
- node .input .append (sub_node .output [0 ])
658
-
659
- # get a tensor of input shape with 6
660
- add_node = ctx .make_node ("Add" , [six_name , sub_node .output [0 ]], op_name_scope = input_node .name )
661
-
662
- min_name = utils .make_name (node .name )
663
- min_node = ctx .insert_new_node_on_output ("Min" , node .output [0 ], name = min_name )
664
- min_node .input .append (add_node .output [0 ])
665
- ctx .copy_shape (old_output , min_node .output [0 ])
666
- nodes .extend ([sub_node , add_node , node , min_node ])
667
- return nodes
668
-
669
- # if there is no unknown dim in shape we can use constants
670
- node .type = "Max"
671
- zero_name = utils .make_name (node .name )
672
- nodes .append (ctx .make_const (zero_name , np .zeros (shape , dtype = dtype )))
673
- six_name = utils .make_name (node .name )
674
- six = np .zeros (shape , dtype = dtype )
675
- six .fill (6 )
676
- nodes .append (ctx .make_const (six_name , six ))
677
- node .input .append (zero_name )
678
- min_name = utils .make_name (node .name )
679
- min_node = ctx .insert_new_node_on_output ("Min" , node .output [0 ], name = min_name )
680
- min_node .input .append (six_name )
681
- ctx .copy_shape (old_output , min_node .output [0 ])
682
- nodes .extend ([node , min_node ])
683
- return nodes
684
-
685
-
686
- def relu6_op8 (ctx , node , name , args ):
687
- # relu6 = min(max(features, 0), 6) for opset >= 8
688
- # since onnx does not have relu6, compose it with multiple ops.
689
- old_output = node .output [0 ]
690
- dtype = ctx .get_dtype (node .input [0 ])
691
- dtype = utils .ONNX_TO_NUMPY_DTYPE [dtype ] if dtype else np .float32
692
- node .type = "Max"
693
- nodes = []
694
- # const tensor 6
695
- six_name = utils .make_name (node .name )
696
- nodes .append (ctx .make_const (six_name , np .array ([6 ], dtype = dtype )))
697
- zero_name = utils .make_name (node .name )
698
- nodes .append (ctx .make_const (zero_name , np .array ([0 ], dtype = dtype )))
699
- node .input .append (zero_name )
700
- min_name = utils .make_name (node .name )
701
- min_node = ctx .insert_new_node_on_output ("Min" , node .output [0 ], name = min_name )
702
- min_node .input .append (six_name )
703
- ctx .copy_shape (old_output , min_node .output [0 ])
704
- nodes .extend ([node , min_node ])
705
- return nodes
637
+ node .type = "Relu"
638
+ clip_name = utils .make_name (node .name )
639
+ clip_node = ctx .insert_new_node_on_output ("Clip" , node .output [0 ], name = clip_name , min = 0.0 , max = 6.0 )
640
+ ctx .copy_shape (node .output [0 ], clip_node .output [0 ])
641
+ return [node , clip_node ]
706
642
707
643
708
644
def squareddifference_op (ctx , node , name , args ):
@@ -1901,7 +1837,6 @@ def where_op(ctx, node, name, args):
1901
1837
}
1902
1838
1903
1839
_OPSET_8 = {
1904
- "Relu6" : (relu6_op8 , []), # make use of min/max broadcast
1905
1840
"ReverseSequence" : (reverse_op8 , []), # make use of scan
1906
1841
"Select" : (select_op8 , []),
1907
1842
}
0 commit comments