@@ -81,8 +81,8 @@ async def request_response(
8181 response_tokens = min (response_tokens , max_response_tokens )
8282
8383 if model not in MODELS and self .db .endpoint_override is None :
84- log .error (f"This model is no longer supported: { model } . Switching to gpt-5" )
85- model = "gpt-5"
84+ log .error (f"This model is no longer supported: { model } . Switching to gpt-5.1 " )
85+ model = "gpt-5.1 "
8686 await self .save_conf ()
8787
8888 response : ChatCompletion = await request_chat_completion_raw (
@@ -132,7 +132,7 @@ async def request_embedding(self, text: str, conf: GuildSettings) -> List[float]
132132 # -------------------------------------------------------
133133 # -------------------------------------------------------
134134
135- async def count_payload_tokens (self , messages : List [dict ], model : str = "gpt-5" ) -> int :
135+ async def count_payload_tokens (self , messages : List [dict ], model : str = "gpt-5.1 " ) -> int :
136136 if not messages :
137137 return 0
138138
@@ -167,7 +167,7 @@ def _count_payload():
167167
168168 return await asyncio .to_thread (_count_payload )
169169
170- async def count_function_tokens (self , functions : List [dict ], model : str = "gpt-5" ) -> int :
170+ async def count_function_tokens (self , functions : List [dict ], model : str = "gpt-5.1 " ) -> int :
171171 # Initialize function settings to 0
172172 func_init = 0
173173 prop_init = 0
@@ -205,6 +205,8 @@ async def count_function_tokens(self, functions: List[dict], model: str = "gpt-5
205205 "gpt-5-mini-2025-04-16" ,
206206 "gpt-5-nano" ,
207207 "gpt-5-nano-2025-04-16" ,
208+ "gpt-5.1" ,
209+ "gpt-5.1-2025-11-13" ,
208210 ]:
209211 # Set function settings for the above models
210212 func_init = 7
@@ -273,7 +275,7 @@ def _count_tokens():
273275
274276 return await asyncio .to_thread (_count_tokens )
275277
276- async def get_tokens (self , text : str , model : str = "gpt-5" ) -> list [int ]:
278+ async def get_tokens (self , text : str , model : str = "gpt-5.1 " ) -> list [int ]:
277279 """Get token list from text"""
278280 if not text :
279281 log .debug ("No text to get tokens from!" )
@@ -358,7 +360,7 @@ async def cut_text_by_tokens(self, text: str, conf: GuildSettings, user: Optiona
358360 tokens = await self .get_tokens (text , conf .get_user_model (user ))
359361 return await self .get_text (tokens [: self .get_max_tokens (conf , user )], conf .get_user_model (user ))
360362
361- async def get_text (self , tokens : list , model : str = "gpt-5" ) -> str :
363+ async def get_text (self , tokens : list , model : str = "gpt-5.1 " ) -> str :
362364 """Get text from token list"""
363365
364366 def _get_encoding ():
0 commit comments