@@ -799,8 +799,10 @@ def test_relu(self):
799
799
800
800
@skip_caffe2_backend ("fails on caffe2 with dim issue" )
801
801
@check_onnxruntime_incompatibility ("Mul" )
802
- def test_leaky_relu (self ):
803
- x_types = [np .float32 , np .int32 , np .int64 ]
802
+ @check_tf_min_version ("1.6" )
803
+ def test_leaky_relu_int (self ):
804
+ # starting from tf 1.6, leaky_relu supports `feature` x of int type
805
+ x_types = [np .int32 , np .int64 ]
804
806
for x_type in x_types :
805
807
x_val = 1000 * np .random .random_sample ([1000 , 100 ]).astype (x_type )
806
808
for alpha in [0.1 , - 0.1 , 1.0 , - 1.0 ]:
@@ -810,6 +812,17 @@ def test_leaky_relu(self):
810
812
self ._run_test_case ([_OUTPUT ], {_INPUT : x_val })
811
813
tf .reset_default_graph ()
812
814
815
+ @skip_caffe2_backend ("fails on caffe2 with dim issue" )
816
+ @check_onnxruntime_incompatibility ("Mul" )
817
+ def test_leaky_relu_float (self ):
818
+ x_val = 1000 * np .random .random_sample ([1000 , 100 ]).astype (np .float32 )
819
+ for alpha in [0.1 , - 0.1 , 1.0 , - 1.0 ]:
820
+ x = tf .placeholder (x_val .dtype , [None ] * x_val .ndim , name = _TFINPUT )
821
+ x_ = tf .nn .leaky_relu (x , alpha )
822
+ _ = tf .identity (x_ , name = _TFOUTPUT )
823
+ self ._run_test_case ([_OUTPUT ], {_INPUT : x_val })
824
+ tf .reset_default_graph ()
825
+
813
826
@check_onnxruntime_incompatibility ("Elu" )
814
827
def test_elu (self ):
815
828
x_val = np .array ([0.5 , 1.0 , - 0.5 , - 1.0 ], dtype = np .float32 ).reshape ((2 , 2 ))
0 commit comments