Skip to content

Commit 8c49ee3

Browse files
committed
Assistant - add support for gpt-5.1 model
1 parent 8472f42 commit 8c49ee3

File tree

6 files changed

+26
-14
lines changed

6 files changed

+26
-14
lines changed

assistant/abc.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,23 +62,23 @@ async def cut_text_by_tokens(self, text: str, conf: GuildSettings, user: Optiona
6262
raise NotImplementedError
6363

6464
@abstractmethod
65-
async def count_payload_tokens(self, messages: List[dict], model: str = "gpt-5") -> int:
65+
async def count_payload_tokens(self, messages: List[dict], model: str = "gpt-5.1") -> int:
6666
raise NotImplementedError
6767

6868
@abstractmethod
69-
async def count_function_tokens(self, functions: List[dict], model: str = "gpt-5") -> int:
69+
async def count_function_tokens(self, functions: List[dict], model: str = "gpt-5.1") -> int:
7070
raise NotImplementedError
7171

7272
@abstractmethod
7373
async def count_tokens(self, text: str, model: str) -> int:
7474
raise NotImplementedError
7575

7676
@abstractmethod
77-
async def get_tokens(self, text: str, model: str = "gpt-5") -> list[int]:
77+
async def get_tokens(self, text: str, model: str = "gpt-5.1") -> list[int]:
7878
raise NotImplementedError
7979

8080
@abstractmethod
81-
async def get_text(self, tokens: list, model: str = "gpt-5") -> str:
81+
async def get_text(self, tokens: list, model: str = "gpt-5.1") -> str:
8282
raise NotImplementedError
8383

8484
@abstractmethod

assistant/assistant.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ class Assistant(
5656
"""
5757

5858
__author__ = "[vertyco](https://github.com/vertyco/vrt-cogs)"
59-
__version__ = "6.18.11"
59+
__version__ = "6.18.12"
6060

6161
def format_help_for_context(self, ctx):
6262
helpcmd = super().format_help_for_context(ctx)

assistant/commands/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -473,7 +473,7 @@ async def summarize_convo(
473473
response: ChatCompletionMessage = await self.request_response(
474474
messages=payload,
475475
conf=conf,
476-
model_override="gpt-5",
476+
model_override="gpt-5.1",
477477
temperature_override=0.0,
478478
)
479479
except httpx.ReadTimeout:

assistant/common/api.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,8 @@ async def request_response(
8181
response_tokens = min(response_tokens, max_response_tokens)
8282

8383
if model not in MODELS and self.db.endpoint_override is None:
84-
log.error(f"This model is no longer supported: {model}. Switching to gpt-5")
85-
model = "gpt-5"
84+
log.error(f"This model is no longer supported: {model}. Switching to gpt-5.1")
85+
model = "gpt-5.1"
8686
await self.save_conf()
8787

8888
response: ChatCompletion = await request_chat_completion_raw(
@@ -132,7 +132,7 @@ async def request_embedding(self, text: str, conf: GuildSettings) -> List[float]
132132
# -------------------------------------------------------
133133
# -------------------------------------------------------
134134

135-
async def count_payload_tokens(self, messages: List[dict], model: str = "gpt-5") -> int:
135+
async def count_payload_tokens(self, messages: List[dict], model: str = "gpt-5.1") -> int:
136136
if not messages:
137137
return 0
138138

@@ -167,7 +167,7 @@ def _count_payload():
167167

168168
return await asyncio.to_thread(_count_payload)
169169

170-
async def count_function_tokens(self, functions: List[dict], model: str = "gpt-5") -> int:
170+
async def count_function_tokens(self, functions: List[dict], model: str = "gpt-5.1") -> int:
171171
# Initialize function settings to 0
172172
func_init = 0
173173
prop_init = 0
@@ -205,6 +205,8 @@ async def count_function_tokens(self, functions: List[dict], model: str = "gpt-5
205205
"gpt-5-mini-2025-04-16",
206206
"gpt-5-nano",
207207
"gpt-5-nano-2025-04-16",
208+
"gpt-5.1",
209+
"gpt-5.1-2025-11-13",
208210
]:
209211
# Set function settings for the above models
210212
func_init = 7
@@ -273,7 +275,7 @@ def _count_tokens():
273275

274276
return await asyncio.to_thread(_count_tokens)
275277

276-
async def get_tokens(self, text: str, model: str = "gpt-5") -> list[int]:
278+
async def get_tokens(self, text: str, model: str = "gpt-5.1") -> list[int]:
277279
"""Get token list from text"""
278280
if not text:
279281
log.debug("No text to get tokens from!")
@@ -358,7 +360,7 @@ async def cut_text_by_tokens(self, text: str, conf: GuildSettings, user: Optiona
358360
tokens = await self.get_tokens(text, conf.get_user_model(user))
359361
return await self.get_text(tokens[: self.get_max_tokens(conf, user)], conf.get_user_model(user))
360362

361-
async def get_text(self, tokens: list, model: str = "gpt-5") -> str:
363+
async def get_text(self, tokens: list, model: str = "gpt-5.1") -> str:
362364
"""Get text from token list"""
363365

364366
def _get_encoding():

assistant/common/constants.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@
4343
"gpt-5-mini-2025-08-07": 400000,
4444
"gpt-5-nano": 400000,
4545
"gpt-5-nano-2025-08-07": 400000,
46+
"gpt-5.1": 400000,
47+
"gpt-5.1-2025-11-13": 400000,
4648
}
4749
PRICES = { # Price per 1k tokens
4850
"gpt-3.5-turbo": [0.001, 0.0015],
@@ -86,6 +88,8 @@
8688
"gpt-5-mini-2025-08-07": [0.00025, 0.002],
8789
"gpt-5-nano": [0.00005, 0.0004],
8890
"gpt-5-nano-2025-08-07": [0.00005, 0.0004],
91+
"gpt-5.1": [0.00125, 0.010],
92+
"gpt-5.1-2025-11-13": [0.00125, 0.010],
8993
"o1": [0.015, 0.06],
9094
"o1-2024-12-17": [0.015, 0.06],
9195
"o1-preview": [0.015, 0.06],
@@ -166,6 +170,8 @@
166170
"gpt-5-mini-2025-08-07",
167171
"gpt-5-nano",
168172
"gpt-5-nano-2025-08-07",
173+
"gpt-5.1",
174+
"gpt-5.1-2025-11-13",
169175
]
170176
NO_DEVELOPER_ROLE = [ # Also doesnt support system messages
171177
"o1-mini",
@@ -204,6 +210,8 @@
204210
"gpt-5-mini-2025-08-07",
205211
"gpt-5-nano",
206212
"gpt-5-nano-2025-08-07",
213+
"gpt-5.1",
214+
"gpt-5.1-2025-11-13",
207215
]
208216
SUPPORTS_TOOLS = [
209217
"gpt-3.5-turbo-1106",
@@ -239,6 +247,8 @@
239247
"gpt-5-mini-2025-08-07",
240248
"gpt-5-nano",
241249
"gpt-5-nano-2025-08-07",
250+
"gpt-5.1",
251+
"gpt-5.1-2025-11-13",
242252
]
243253
READ_EXTENSIONS = [
244254
".txt",

assistant/common/models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ class GuildSettings(AssistantBaseModel):
9696
mention: bool = False
9797
mention_respond: bool = True
9898
enabled: bool = True # Auto-reply channel
99-
model: str = "gpt-5"
99+
model: str = "gpt-5.1"
100100
embed_model: str = "text-embedding-3-small" # Or text-embedding-3-large, text-embedding-ada-002
101101
collab_convos: bool = False
102102
reasoning_effort: str = "low" # low, medium, high (or minimal for gpt-5)
@@ -106,7 +106,7 @@ class GuildSettings(AssistantBaseModel):
106106
auto_answer: bool = False # Answer questions anywhere if one is detected and embedding is found for it
107107
auto_answer_threshold: float = 0.7 # 0.0 - 1.0 # Confidence threshold for auto-answer
108108
auto_answer_ignored_channels: t.List[int] = [] # Channel IDs to ignore auto-answer
109-
auto_answer_model: str = "gpt-5" # Model to use for auto-answer
109+
auto_answer_model: str = "gpt-5.1" # Model to use for auto-answer
110110

111111
image_command: bool = True # Allow image commands
112112

0 commit comments

Comments
 (0)