File tree Expand file tree Collapse file tree 1 file changed +4
-2
lines changed
libs/partners/openai/tests/integration_tests/chat_models Expand file tree Collapse file tree 1 file changed +4
-2
lines changed Original file line number Diff line number Diff line change @@ -1017,10 +1017,12 @@ async def test_astream_response_format() -> None:
1017
1017
@pytest .mark .parametrize ("use_responses_api" , [False , True ])
1018
1018
@pytest .mark .parametrize ("use_max_completion_tokens" , [True , False ])
1019
1019
def test_o1 (use_max_completion_tokens : bool , use_responses_api : bool ) -> None :
1020
+ # o1 models need higher token limits for reasoning
1021
+ o1_token_limit = 1000
1020
1022
if use_max_completion_tokens :
1021
- kwargs : dict = {"max_completion_tokens" : MAX_TOKEN_COUNT }
1023
+ kwargs : dict = {"max_completion_tokens" : o1_token_limit }
1022
1024
else :
1023
- kwargs = {"max_tokens" : MAX_TOKEN_COUNT }
1025
+ kwargs = {"max_tokens" : o1_token_limit }
1024
1026
response = ChatOpenAI (
1025
1027
model = "o1" ,
1026
1028
reasoning_effort = "low" ,
You can’t perform that action at this time.
0 commit comments