@@ -256,7 +256,7 @@ def test_openai_v1_completions(
256
256
"prompt" : prompt ,
257
257
"max_tokens" : max_tokens ,
258
258
"stream" : stream ,
259
- "ignore_eos" : True ,
259
+ "debug_config" : { " ignore_eos" : True } ,
260
260
}
261
261
262
262
response = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -347,7 +347,7 @@ def test_openai_v1_completions_echo(
347
347
"max_tokens" : max_tokens ,
348
348
"echo" : True ,
349
349
"stream" : stream ,
350
- "ignore_eos" : True ,
350
+ "debug_config" : { " ignore_eos" : True } ,
351
351
}
352
352
353
353
response = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -398,7 +398,7 @@ def test_openai_v1_completions_suffix(
398
398
"max_tokens" : max_tokens ,
399
399
"suffix" : suffix ,
400
400
"stream" : stream ,
401
- "ignore_eos" : True ,
401
+ "debug_config" : { " ignore_eos" : True } ,
402
402
}
403
403
404
404
response = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -498,7 +498,7 @@ def test_openai_v1_completions_temperature(
498
498
"max_tokens" : max_tokens ,
499
499
"stream" : stream ,
500
500
"temperature" : 0.0 ,
501
- "ignore_eos" : True ,
501
+ "debug_config" : { " ignore_eos" : True } ,
502
502
}
503
503
504
504
response = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -652,7 +652,7 @@ def test_openai_v1_completions_logit_bias(
652
652
"max_tokens" : max_tokens ,
653
653
"stream" : stream ,
654
654
"logit_bias" : {338 : - 100 }, # 338 is " is" in Llama tokenizer.
655
- "ignore_eos" : True ,
655
+ "debug_config" : { " ignore_eos" : True } ,
656
656
}
657
657
658
658
response = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -699,7 +699,7 @@ def test_openai_v1_completions_presence_frequency_penalty(
699
699
"stream" : stream ,
700
700
"frequency_penalty" : 2.0 ,
701
701
"presence_penalty" : 2.0 ,
702
- "ignore_eos" : True ,
702
+ "debug_config" : { " ignore_eos" : True } ,
703
703
}
704
704
705
705
response = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -743,7 +743,7 @@ def test_openai_v1_completions_seed(
743
743
"max_tokens" : max_tokens ,
744
744
"stream" : False ,
745
745
"seed" : 233 ,
746
- "ignore_eos" : True ,
746
+ "debug_config" : { " ignore_eos" : True } ,
747
747
}
748
748
749
749
response1 = requests .post (OPENAI_V1_COMPLETION_URL , json = payload , timeout = 180 )
@@ -1207,7 +1207,7 @@ def test_openai_v1_chat_completions_ignore_eos(
1207
1207
"messages" : messages ,
1208
1208
"stream" : stream ,
1209
1209
"max_tokens" : max_tokens ,
1210
- "ignore_eos" : True ,
1210
+ "debug_config" : { " ignore_eos" : True } ,
1211
1211
}
1212
1212
1213
1213
response = requests .post (OPENAI_V1_CHAT_COMPLETION_URL , json = payload , timeout = 180 )
0 commit comments