Skip to content

Commit a8f67aa

Browse files
committed
Support for new gpt 4 turbo model
1 parent df9150b commit a8f67aa

File tree

3 files changed

+32
-3
lines changed

3 files changed

+32
-3
lines changed

tests/test_openai.py

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def test_token_counter(self):
2626
]
2727

2828
# check if test covers all models (increase if new models are added)
29-
assert len(OpenAIChatModel.__args__) == 14 #  type: ignore
29+
assert len(OpenAIChatModel.__args__) == 16 #  type: ignore
3030

3131
client = AsyncTypeOpenAI(api_key="mock")
3232

@@ -42,9 +42,34 @@ def test_token_counter(self):
4242
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-32k") == 27
4343
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-32k-0314") == 27
4444
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-32k-0613") == 27
45+
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-turbo-preview") == 27
4546
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-1106-preview") == 27
47+
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-0125-preview") == 27
4648
assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-vision-preview") == 27
4749

50+
def test_max_token_counter(self):
51+
# check if test covers all models (increase if new models are added)
52+
assert len(OpenAIChatModel.__args__) == 16 #  type: ignore
53+
54+
client = AsyncTypeOpenAI(api_key="mock")
55+
56+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo") == 4096
57+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-0301") == 4096
58+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-0613") == 4096
59+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-1106") == 16384
60+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-16k") == 16384
61+
assert client.chat.completions.max_tokens_of_model("gpt-3.5-turbo-16k-0613") == 16384
62+
assert client.chat.completions.max_tokens_of_model("gpt-4") == 8192
63+
assert client.chat.completions.max_tokens_of_model("gpt-4-0314") == 8192
64+
assert client.chat.completions.max_tokens_of_model("gpt-4-0613") == 8192
65+
assert client.chat.completions.max_tokens_of_model("gpt-4-32k") == 32768
66+
assert client.chat.completions.max_tokens_of_model("gpt-4-32k-0314") == 32768
67+
assert client.chat.completions.max_tokens_of_model("gpt-4-32k-0613") == 32768
68+
assert client.chat.completions.max_tokens_of_model("gpt-4-turbo-preview") == 128_000
69+
assert client.chat.completions.max_tokens_of_model("gpt-4-1106-preview") == 128_000
70+
assert client.chat.completions.max_tokens_of_model("gpt-4-0125-preview") == 128_000
71+
assert client.chat.completions.max_tokens_of_model("gpt-4-vision-preview") == 128_000
72+
4873
# -
4974

5075
@pytest.fixture

typegpt/openai/base_chat_completion.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def max_tokens_of_model(model: OpenAIChatModel) -> int:
1515
return 8192
1616
case "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613":
1717
return 32768
18-
case "gpt-4-1106-preview" | "gpt-4-vision-preview":
18+
case "gpt-4-turbo-preview" | "gpt-4-1106-preview" | "gpt-4-0125-preview" | "gpt-4-vision-preview":
1919
return 128_000
2020

2121
# copied from OpenAI: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
@@ -49,6 +49,8 @@ def num_tokens_from_messages(cls, messages: list[EncodedMessage], model: OpenAIC
4949
"gpt-3.5-turbo-16k-0613",
5050
"gpt-4-32k-0314",
5151
"gpt-4-32k-0613",
52+
"gpt-4-turbo-preview",
53+
"gpt-4-0125-preview",
5254
"gpt-4-1106-preview",
5355
"gpt-4-vision-preview",
5456
):

typegpt/openai/views.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,9 @@
1414
"gpt-4-32k", # gpt-4 32k
1515
"gpt-4-32k-0314",
1616
"gpt-4-32k-0613",
17-
"gpt-4-1106-preview", # gpt-4 turbo
17+
"gpt-4-turbo-preview", # gpt-4 turbo
18+
"gpt-4-1106-preview",
19+
"gpt-4-0125-preview",
1820
"gpt-4-vision-preview", # gpt-4 vision
1921
]
2022

0 commit comments

Comments
 (0)