@@ -1366,6 +1366,15 @@ def func(x):
1366
1366
return tf .identity (x_ , name = _TFOUTPUT )
1367
1367
self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val })
1368
1368
1369
+ @check_opset_min_version (13 , "Split" )
1370
+ def test_split_nonconst (self ):
1371
+ x_val = np .linspace (1.0 , 5 * 30.0 , 5 * 30 ).astype (np .float32 ).reshape ((5 , 30 ))
1372
+ y_val = np .array ([4 , 15 , 11 ], np .int32 )
1373
+ def func (x , y ):
1374
+ x_ , _ , _ = tf .split (x , y , 1 )
1375
+ return tf .identity (x_ , name = _TFOUTPUT )
1376
+ self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val , _INPUT1 : y_val })
1377
+
1369
1378
def test_split_with_more_outputs (self ):
1370
1379
x_val = np .linspace (1.0 , 5 * 30.0 , 5 * 30 ).astype (np .float32 ).reshape ((5 , 30 ))
1371
1380
def func (x ):
@@ -1387,6 +1396,24 @@ def func(x):
1387
1396
return tf .identity (x_ , name = _TFOUTPUT )
1388
1397
self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val })
1389
1398
1399
+ @check_opset_min_version (13 , "ReduceSum" )
1400
+ def test_reducesum_nonconst_axis (self ):
1401
+ x_val = np .array ([1.0 , 2.0 , 3.0 , 4.0 ], dtype = np .float32 ).reshape ((2 , 1 , 2 ))
1402
+ y_val = np .array ([1 , 2 ], dtype = np .int32 )
1403
+ def func (x , y ):
1404
+ x_ = tf .reduce_sum (x , axis = y )
1405
+ return tf .identity (x_ , name = _TFOUTPUT )
1406
+ self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val , _INPUT1 : y_val })
1407
+
1408
+ @check_opset_min_version (13 , "ReduceSum" )
1409
+ def test_reducesum_empty_axis (self ):
1410
+ x_val = np .array ([1.0 , 2.0 , 3.0 , 4.0 ], dtype = np .float32 ).reshape ((2 , 1 , 2 ))
1411
+ y_val = np .array ([], dtype = np .int32 )
1412
+ def func (x , y ):
1413
+ x_ = tf .reduce_sum (x , axis = y )
1414
+ return tf .identity (x_ , name = _TFOUTPUT )
1415
+ self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val , _INPUT1 : y_val })
1416
+
1390
1417
@check_opset_min_version (9 , "OneHot" )
1391
1418
def test_segment_sum_data_vector (self ):
1392
1419
segs_val = np .array ([0 , 0 , 0 , 1 , 2 , 2 , 3 , 3 ], dtype = np .int32 )
@@ -2866,6 +2893,16 @@ def func(x):
2866
2893
return tf .identity (res , name = _TFOUTPUT ), tf .identity (res1 , name = _TFOUTPUT1 )
2867
2894
self ._run_test_case (func , [_OUTPUT , _OUTPUT1 ], {_INPUT : input_val })
2868
2895
2896
+ @check_opset_min_version (11 , "ReduceSum" )
2897
+ @check_tf_min_version ("1.15" )
2898
+ def test_reduce_any_empty_axis (self ):
2899
+ input_val = np .random .randint (0 , 2 , (10 , 20 )).astype (np .bool )
2900
+ def func (x ):
2901
+ res = tf .reduce_any (input_tensor = x , keepdims = False )
2902
+ res1 = tf .reduce_any (input_tensor = x , axis = [], keepdims = False )
2903
+ return tf .identity (res , name = _TFOUTPUT ), tf .identity (res1 , name = _TFOUTPUT1 )
2904
+ self ._run_test_case (func , [_OUTPUT , _OUTPUT1 ], {_INPUT : input_val })
2905
+
2869
2906
@check_opset_min_version (7 , "fill" )
2870
2907
def test_zeros_like (self ):
2871
2908
input_x = np .random .random_sample ([10 , 20 ]).astype (np .float32 )
@@ -3289,6 +3326,20 @@ def func(x):
3289
3326
return tf .identity (y , name = _TFOUTPUT )
3290
3327
self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val })
3291
3328
3329
+ def test_softmax (self ):
3330
+ x_val = np .arange (0 , 24 , dtype = np .float32 ).reshape ([3 , 1 , 8 ])
3331
+ def func (x ):
3332
+ y = tf .nn .softmax (x )
3333
+ return tf .identity (y , name = _TFOUTPUT )
3334
+ self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val })
3335
+
3336
+ def test_log_softmax (self ):
3337
+ x_val = np .arange (0 , 24 , dtype = np .float32 ).reshape ([3 , 1 , 8 ])
3338
+ def func (x ):
3339
+ y = tf .nn .log_softmax (x )
3340
+ return tf .identity (y , name = _TFOUTPUT )
3341
+ self ._run_test_case (func , [_OUTPUT ], {_INPUT : x_val })
3342
+
3292
3343
# test for gemm pattern0: alpha*A*B + beta*C
3293
3344
def test_gemm_pattern0 (self ):
3294
3345
max_number = 10
0 commit comments