Skip to content

Commit 12daba6

Browse files
authored
test(openai): raise token limit for o1 test (#33118)
`test_o1[False-False]` was sometimes failing because the OpenAI o1 model was hitting a token limit with only 100 tokens
1 parent eaf8dce commit 12daba6

File tree

1 file changed

+4
-2
lines changed
  • libs/partners/openai/tests/integration_tests/chat_models

1 file changed

+4
-2
lines changed

libs/partners/openai/tests/integration_tests/chat_models/test_base.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1017,10 +1017,12 @@ async def test_astream_response_format() -> None:
10171017
@pytest.mark.parametrize("use_responses_api", [False, True])
10181018
@pytest.mark.parametrize("use_max_completion_tokens", [True, False])
10191019
def test_o1(use_max_completion_tokens: bool, use_responses_api: bool) -> None:
1020+
# o1 models need higher token limits for reasoning
1021+
o1_token_limit = 1000
10201022
if use_max_completion_tokens:
1021-
kwargs: dict = {"max_completion_tokens": MAX_TOKEN_COUNT}
1023+
kwargs: dict = {"max_completion_tokens": o1_token_limit}
10221024
else:
1023-
kwargs = {"max_tokens": MAX_TOKEN_COUNT}
1025+
kwargs = {"max_tokens": o1_token_limit}
10241026
response = ChatOpenAI(
10251027
model="o1",
10261028
reasoning_effort="low",

0 commit comments

Comments
 (0)