9
9
10
10
import json
11
11
12
- from core import TEMPLATE , URL , build_request_payload , send_request
12
+ from core import TEMPLATE , URL , build_request_payload , get_token_list , send_request
13
13
14
14
15
15
def test_stream_response ():
@@ -76,18 +76,24 @@ def test_logprobs_enabled():
76
76
def test_stop_sequence ():
77
77
data = {
78
78
"stream" : False ,
79
- "stop" : ["果冻 " ],
79
+ "stop" : ["。 " ],
80
80
"messages" : [
81
- {"role" : "user" , "content" : "你要严格按照我接下来的话输出,输出冒号后面的内容,请输出:这是第一段。果冻这是第二段啦啦啦啦啦。" },
81
+ {
82
+ "role" : "user" ,
83
+ "content" : "你要严格按照我接下来的话输出,输出冒号后面的内容,请输出:这是第一段。这是第二段啦啦啦啦啦。" ,
84
+ },
82
85
],
83
86
"max_tokens" : 20 ,
84
87
"top_p" : 0 ,
85
88
}
86
89
payload = build_request_payload (TEMPLATE , data )
87
90
resp = send_request (URL , payload ).json ()
88
91
content = resp ["choices" ][0 ]["message" ]["content" ]
92
+ token_list = get_token_list (resp )
89
93
print ("截断输出:" , content )
90
94
assert "第二段" not in content
95
+ assert "第二段" not in token_list
96
+ assert "。" in token_list , "没有找到。符号"
91
97
92
98
93
99
def test_sampling_parameters ():
@@ -125,7 +131,7 @@ def test_multi_turn_conversation():
125
131
126
132
127
133
def test_bad_words_filtering ():
128
- banned_tokens = ["和" , "呀 " ]
134
+ banned_tokens = ["香 " ]
129
135
130
136
data = {
131
137
"stream" : False ,
@@ -140,36 +146,14 @@ def test_bad_words_filtering():
140
146
141
147
payload = build_request_payload (TEMPLATE , data )
142
148
response = send_request (URL , payload ).json ()
143
-
144
149
content = response ["choices" ][0 ]["message" ]["content" ]
145
150
print ("生成内容:" , content )
151
+ token_list = get_token_list (response )
146
152
147
153
for word in banned_tokens :
148
- assert word not in content , f"bad_word '{ word } ' 不应出现在生成结果中"
154
+ assert word not in token_list , f"bad_word '{ word } ' 不应出现在生成结果中"
149
155
150
- print ("test_bad_words_filtering 通过:生成结果未包含被禁词" )
151
-
152
- data = {
153
- "stream" : False ,
154
- "messages" : [
155
- {"role" : "system" , "content" : "你是一个助手,回答简洁清楚" },
156
- {"role" : "user" , "content" : "请输出冒号后面的字,一模一样: 我爱吃果冻,苹果,香蕉,和荔枝呀呀呀" },
157
- ],
158
- "top_p" : 0 ,
159
- "max_tokens" : 69 ,
160
- # "bad_words": banned_tokens,
161
- }
162
-
163
- payload = build_request_payload (TEMPLATE , data )
164
- response = send_request (URL , payload ).json ()
165
-
166
- content = response ["choices" ][0 ]["message" ]["content" ]
167
- print ("生成内容:" , content )
168
-
169
- for word in banned_tokens :
170
- assert word not in content , f"bad_word '{ word } ' 不应出现在生成结果中"
171
-
172
- print ("test_bad_words_filtering 通过:生成结果未包含被禁词" )
156
+ print ("test_bad_words_filtering 正例验证通过" )
173
157
174
158
175
159
def test_bad_words_filtering1 ():
@@ -195,8 +179,10 @@ def test_bad_words_filtering1():
195
179
for word in banned_tokens :
196
180
assert word not in content , f"bad_word '{ word } ' 不应出现在生成结果中"
197
181
198
- print ("test_bad_words_filtering 通过:生成结果未包含被禁词" )
199
- word = "呀呀"
182
+ print ("test_bad_words_filtering1 通过:生成结果未包含被禁词" )
183
+
184
+ # 正例验证
185
+ word = "呀"
200
186
data = {
201
187
"stream" : False ,
202
188
"messages" : [
@@ -212,7 +198,7 @@ def test_bad_words_filtering1():
212
198
213
199
content = response ["choices" ][0 ]["message" ]["content" ]
214
200
print ("生成内容:" , content )
201
+ token_list = get_token_list (response )
202
+ assert word in token_list , f"'{ word } ' 应出现在生成结果中"
215
203
216
- assert word in content , f" '{ word } ' 应出现在生成结果中"
217
-
218
- print ("test_bad_words_filtering 通过:生成结果未包含被禁词" )
204
+ print ("test_bad_words_filtering1 正例验证通过" )
0 commit comments