Skip to content

Commit ca09166

Browse files
committed
Support for gpt-3.5-turbo-0125
1 parent 10219c0 commit ca09166

File tree

3 files changed

+7
-3
lines changed

3 files changed

+7
-3
lines changed

tests/test_openai.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,15 @@ def test_token_counter(self):
2626
]
2727

2828
# check if test covers all models (increase if new models are added)
29-
assert len(OpenAIChatModel.__args__) == 16 #  type: ignore
29+
assert len(OpenAIChatModel.__args__) == 17 #  type: ignore
3030

3131
client = AsyncTypeOpenAI(api_key="mock")
3232

3333
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo") == 27
3434
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-0301") == 29
3535
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-0613") == 27
3636
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-1106") == 27
37+
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-0125") == 27
3738
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-16k") == 27
3839
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-16k-0613") == 27
3940
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4") == 27
@@ -49,14 +50,15 @@ def test_token_counter(self):
4950

5051
def test_max_token_counter(self):
5152
# check if test covers all models (increase if new models are added)
52-
assert len(OpenAIChatModel.__args__) == 16 #  type: ignore
53+
assert len(OpenAIChatModel.__args__) == 17 #  type: ignore
5354

5455
client = AsyncTypeOpenAI(api_key="mock")
5556

5657
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo") == 4096
5758
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-0301") == 4096
5859
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-0613") == 4096
5960
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-1106") == 16384
61+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-0125") == 16384
6062
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-16k") == 16384
6163
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-16k-0613") == 16384
6264
assert client.chat.completions.max_tokens_of_model("gpt-4") == 8192

typegpt/openai/base_chat_completion.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def max_tokens_of_model(model: OpenAIChatModel) -> int:
99
match model:
1010
case "gpt-3.5-turbo" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613":
1111
return 4096
12-
case "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-16k-0613" | "gpt-3.5-turbo-1106":
12+
case "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-16k-0613" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-0125":
1313
return 16384
1414
case "gpt-4" | "gpt-4-0314" | "gpt-4-0613":
1515
return 8192
@@ -46,6 +46,7 @@ def num_tokens_from_messages(cls, messages: list[EncodedMessage], model: OpenAIC
4646
"gpt-4-0613",
4747
"gpt-3.5-turbo-0613",
4848
"gpt-3.5-turbo-1106",
49+
"gpt-3.5-turbo-0125",
4950
"gpt-3.5-turbo-16k-0613",
5051
"gpt-4-32k-0314",
5152
"gpt-4-32k-0613",

typegpt/openai/views.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
"gpt-3.5-turbo-0301",
77
"gpt-3.5-turbo-0613",
88
"gpt-3.5-turbo-1106",
9+
"gpt-3.5-turbo-0125",
910
"gpt-3.5-turbo-16k", # 3.5 turbo 16k
1011
"gpt-3.5-turbo-16k-0613",
1112
"gpt-4", # gpt-4

0 commit comments

Comments
 (0)