@@ -35,7 +35,7 @@ class StringOpsTests(Tf2OnnxBackendTestBase):
35
35
36
36
@requires_custom_ops ("StringRegexReplace" )
37
37
def test_static_regex_replace (self ):
38
- text_val = np .array ([["Hello world!" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = np . str )
38
+ text_val = np .array ([["Hello world!" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = str )
39
39
def func (text ):
40
40
x_ = tf .strings .regex_replace (text , " " , "_" , replace_global = True )
41
41
return tf .identity (x_ , name = _TFOUTPUT )
@@ -44,9 +44,9 @@ def func(text):
44
44
@requires_custom_ops ("StringJoin" )
45
45
@check_opset_min_version (8 , "Expand" )
46
46
def test_string_join (self ):
47
- text_val1 = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = np . str )
48
- text_val2 = np .array ([["b" , "Test 1 2 3" ], ["Hi there" , "suits ♠♣♥♦" ]], dtype = np . str )
49
- text_val3 = np .array ("Some scalar text" , dtype = np . str )
47
+ text_val1 = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = str )
48
+ text_val2 = np .array ([["b" , "Test 1 2 3" ], ["Hi there" , "suits ♠♣♥♦" ]], dtype = str )
49
+ text_val3 = np .array ("Some scalar text" , dtype = str )
50
50
def func (text1 , text2 , text3 ):
51
51
x_ = tf .strings .join ([text1 , text2 , text3 ], separator = "±" )
52
52
return tf .identity (x_ , name = _TFOUTPUT )
@@ -55,7 +55,7 @@ def func(text1, text2, text3):
55
55
@requires_custom_ops ("StringSplit" )
56
56
@check_tf_min_version ("2.0" , "result is sparse not ragged in tf1" )
57
57
def test_string_split (self ):
58
- text_val = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = np . str )
58
+ text_val = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = str )
59
59
def func (text ):
60
60
x = tf .strings .split (text , sep = ' ' ).flat_values
61
61
x_ = tf .identity (x , name = _TFOUTPUT )
@@ -64,7 +64,7 @@ def func(text):
64
64
65
65
@requires_custom_ops ("StringToHashBucketFast" )
66
66
def test_string_to_hash_bucket_fast (self ):
67
- text_val = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = np . str )
67
+ text_val = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = str )
68
68
def func (text ):
69
69
x = tf .strings .to_hash_bucket_fast (text , 20 )
70
70
x_ = tf .identity (x , name = _TFOUTPUT )
@@ -73,8 +73,8 @@ def func(text):
73
73
74
74
@requires_custom_ops ("StringEqual" )
75
75
def test_string_equal (self ):
76
- text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = np . str )
77
- text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = np . str )
76
+ text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = str )
77
+ text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = str )
78
78
def func (text1 , text2 ):
79
79
x = tf .equal (text1 , text2 )
80
80
x_ = tf .identity (x , name = _TFOUTPUT )
@@ -83,8 +83,8 @@ def func(text1, text2):
83
83
84
84
@requires_custom_ops ("StringNotEqual" )
85
85
def test_string_not_equal (self ):
86
- text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = np . str )
87
- text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = np . str )
86
+ text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = str )
87
+ text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = str )
88
88
def func (text1 , text2 ):
89
89
x = tf .not_equal (text1 , text2 )
90
90
x_ = tf .identity (x , name = _TFOUTPUT )
@@ -116,7 +116,7 @@ def test_regex_split_with_offsets(self):
116
116
from tensorflow_text .python .ops .regex_split_ops import (
117
117
gen_regex_split_ops as lib_gen_regex_split_ops )
118
118
text_val = np .array (["a Test 1 2 3 ♠♣" ,
119
- "Hi there test test ♥♦" ], dtype = np . str )
119
+ "Hi there test test ♥♦" ], dtype = str )
120
120
def func (text ):
121
121
tokens , begin_offsets , end_offsets , row_splits = lib_gen_regex_split_ops .regex_split_with_offsets (
122
122
text , "(\\ s)" , "" )
@@ -153,7 +153,7 @@ def _CreateTable(vocab, num_oov=1):
153
153
init , num_oov , lookup_key_dtype = tf .string )
154
154
155
155
vocab = _CreateTable (["great" , "they" , "the" , "##'" , "##re" , "##est" ])
156
- text_val = np .array (["they're" , "the" , "greatest" ], dtype = np . str )
156
+ text_val = np .array (["they're" , "the" , "greatest" ], dtype = str )
157
157
158
158
def func (text ):
159
159
inputs = ragged_tensor .convert_to_tensor_or_ragged_tensor (text )
0 commit comments