@@ -284,6 +284,7 @@ def decode(self, *args, **kwargs):
284284 "fn_with_ui" : chatgpt_ui ,
285285 "fn_without_ui" : chatgpt_noui ,
286286 "endpoint" : openai_endpoint ,
287+ "can_multi_thread" : True ,
287288 "max_token" : 128000 ,
288289 "tokenizer" : tokenizer_gpt4 ,
289290 "token_cnt" : get_token_num_gpt4 ,
@@ -368,6 +369,50 @@ def decode(self, *args, **kwargs):
368369 "openai_force_temperature_one" : True ,
369370 },
370371
372+ "gpt-4.1" :{
373+ "fn_with_ui" : chatgpt_ui ,
374+ "fn_without_ui" : chatgpt_noui ,
375+ "has_multimodal_capacity" : True ,
376+ "endpoint" : openai_endpoint ,
377+ "max_token" : 828000 ,
378+ "tokenizer" : tokenizer_gpt4 ,
379+ "token_cnt" : get_token_num_gpt4 ,
380+ },
381+
382+ "gpt-4.1-mini" :{
383+ "fn_with_ui" : chatgpt_ui ,
384+ "fn_without_ui" : chatgpt_noui ,
385+ "has_multimodal_capacity" : True ,
386+ "endpoint" : openai_endpoint ,
387+ "max_token" : 828000 ,
388+ "tokenizer" : tokenizer_gpt4 ,
389+ "token_cnt" : get_token_num_gpt4 ,
390+ },
391+
392+ "o3" :{
393+ "fn_with_ui" : chatgpt_ui ,
394+ "fn_without_ui" : chatgpt_noui ,
395+ "has_multimodal_capacity" : True ,
396+ "endpoint" : openai_endpoint ,
397+ "max_token" : 828000 ,
398+ "tokenizer" : tokenizer_gpt4 ,
399+ "token_cnt" : get_token_num_gpt4 ,
400+ "openai_disable_system_prompt" : True ,
401+ "openai_disable_stream" : True ,
402+ "openai_force_temperature_one" : True ,
403+ },
404+
405+ "o4-mini" :{
406+ "fn_with_ui" : chatgpt_ui ,
407+ "fn_without_ui" : chatgpt_noui ,
408+ "has_multimodal_capacity" : True ,
409+ "can_multi_thread" : True ,
410+ "endpoint" : openai_endpoint ,
411+ "max_token" : 828000 ,
412+ "tokenizer" : tokenizer_gpt4 ,
413+ "token_cnt" : get_token_num_gpt4 ,
414+ },
415+
371416 "gpt-4-turbo" : {
372417 "fn_with_ui" : chatgpt_ui ,
373418 "fn_without_ui" : chatgpt_noui ,
@@ -878,7 +923,10 @@ def decode(self, *args, **kwargs):
878923 logger .error (trimmed_format_exc ())
879924
880925# -=-=-=-=-=-=- 阿里云百炼(通义)-在线模型 -=-=-=-=-=-=-
881- qwen_models = ["qwen-max-latest" , "qwen-max-2025-01-25" ,"qwen-max" ,"qwen-turbo" ,"qwen-plus" ,"dashscope-deepseek-r1" ,"dashscope-deepseek-v3" ]
926+ qwen_models = ["qwen-max-latest" , "qwen-max-2025-01-25" ,"qwen-max" ,"qwen-turbo" ,"qwen-plus" ,
927+ "dashscope-deepseek-r1" ,"dashscope-deepseek-v3" ,
928+ "dashscope-qwen3-14b" , "dashscope-qwen3-235b-a22b" , "dashscope-qwen3-qwen3-32b" ,
929+ ]
882930if any (item in qwen_models for item in AVAIL_LLM_MODELS ):
883931 try :
884932 from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
@@ -947,6 +995,34 @@ def decode(self, *args, **kwargs):
947995 "max_token" : 57344 ,
948996 "tokenizer" : tokenizer_gpt35 ,
949997 "token_cnt" : get_token_num_gpt35 ,
998+ },
999+ "dashscope-qwen3-14b" : {
1000+ "fn_with_ui" : qwen_ui ,
1001+ "fn_without_ui" : qwen_noui ,
1002+ "enable_reasoning" : True ,
1003+ "can_multi_thread" : True ,
1004+ "endpoint" : None ,
1005+ "max_token" : 129024 ,
1006+ "tokenizer" : tokenizer_gpt35 ,
1007+ "token_cnt" : get_token_num_gpt35 ,
1008+ },
1009+ "dashscope-qwen3-235b-a22b" : {
1010+ "fn_with_ui" : qwen_ui ,
1011+ "fn_without_ui" : qwen_noui ,
1012+ "can_multi_thread" : True ,
1013+ "endpoint" : None ,
1014+ "max_token" : 129024 ,
1015+ "tokenizer" : tokenizer_gpt35 ,
1016+ "token_cnt" : get_token_num_gpt35 ,
1017+ },
1018+ "dashscope-qwen3-32b" : {
1019+ "fn_with_ui" : qwen_ui ,
1020+ "fn_without_ui" : qwen_noui ,
1021+ "can_multi_thread" : True ,
1022+ "endpoint" : None ,
1023+ "max_token" : 129024 ,
1024+ "tokenizer" : tokenizer_gpt35 ,
1025+ "token_cnt" : get_token_num_gpt35 ,
9501026 }
9511027 })
9521028 except :
0 commit comments