Skip to content

Commit bc9a2fd

Browse files
DouweMclaude[bot]Kludex
authored
fix: Add gpt-5 models to reasoning model detection for temperature parameter handling (#2483)
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com> Co-authored-by: Douwe Maan <[email protected]> Co-authored-by: Marcelo Trylesinski <[email protected]>
1 parent 741c5c9 commit bc9a2fd

File tree

3 files changed

+87
-24
lines changed

3 files changed

+87
-24
lines changed

pydantic_ai_slim/pydantic_ai/profiles/openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class OpenAIModelProfile(ModelProfile):
3232

3333
def openai_model_profile(model_name: str) -> ModelProfile:
3434
"""Get the model profile for an OpenAI model."""
35-
is_reasoning_model = model_name.startswith('o')
35+
is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5')
3636
# Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later.
3737
# We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
3838
# when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
interactions:
2+
- request:
3+
headers:
4+
accept:
5+
- application/json
6+
accept-encoding:
7+
- gzip, deflate
8+
connection:
9+
- keep-alive
10+
content-length:
11+
- '104'
12+
content-type:
13+
- application/json
14+
host:
15+
- api.openai.com
16+
method: POST
17+
parsed_body:
18+
messages:
19+
- content: What is the capital of France?
20+
role: user
21+
model: gpt-5
22+
stream: false
23+
uri: https://api.openai.com/v1/chat/completions
24+
response:
25+
headers:
26+
access-control-expose-headers:
27+
- X-Request-ID
28+
alt-svc:
29+
- h3=":443"; ma=86400
30+
connection:
31+
- keep-alive
32+
content-length:
33+
- '772'
34+
content-type:
35+
- application/json
36+
openai-organization:
37+
- pydantic-28gund
38+
openai-processing-ms:
39+
- '2671'
40+
openai-project:
41+
- proj_dKobscVY9YJxeEaDJen54e3d
42+
openai-version:
43+
- '2020-10-01'
44+
strict-transport-security:
45+
- max-age=31536000; includeSubDomains; preload
46+
transfer-encoding:
47+
- chunked
48+
parsed_body:
49+
choices:
50+
- finish_reason: stop
51+
index: 0
52+
message:
53+
annotations: []
54+
content: Paris.
55+
refusal: null
56+
role: assistant
57+
created: 1754902196
58+
id: chatcmpl-C3IW4xlMbxWk92VDDKNyaEJjJrTmh
59+
model: gpt-5-2025-08-07
60+
object: chat.completion
61+
service_tier: default
62+
system_fingerprint: null
63+
usage:
64+
completion_tokens: 11
65+
completion_tokens_details:
66+
accepted_prediction_tokens: 0
67+
audio_tokens: 0
68+
reasoning_tokens: 0
69+
rejected_prediction_tokens: 0
70+
prompt_tokens: 13
71+
prompt_tokens_details:
72+
audio_tokens: 0
73+
cached_tokens: 0
74+
total_tokens: 24
75+
status:
76+
code: 200
77+
message: OK
78+
version: 1

tests/models/test_openai.py

Lines changed: 8 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -776,7 +776,6 @@ async def test_image_url_input(allow_model_requests: None):
776776
)
777777

778778

779-
@pytest.mark.vcr()
780779
async def test_openai_audio_url_input(allow_model_requests: None, openai_api_key: str):
781780
m = OpenAIModel('gpt-4o-audio-preview', provider=OpenAIProvider(api_key=openai_api_key))
782781
agent = Agent(m)
@@ -787,7 +786,6 @@ async def test_openai_audio_url_input(allow_model_requests: None, openai_api_key
787786
)
788787

789788

790-
@pytest.mark.vcr()
791789
async def test_document_url_input(allow_model_requests: None, openai_api_key: str):
792790
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
793791
agent = Agent(m)
@@ -879,7 +877,6 @@ async def get_image() -> ImageUrl:
879877
)
880878

881879

882-
@pytest.mark.vcr()
883880
async def test_image_as_binary_content_tool_response(
884881
allow_model_requests: None, image_content: BinaryContent, openai_api_key: str
885882
):
@@ -960,7 +957,6 @@ async def get_image() -> BinaryContent:
960957
)
961958

962959

963-
@pytest.mark.vcr()
964960
async def test_image_as_binary_content_input(
965961
allow_model_requests: None, image_content: BinaryContent, openai_api_key: str
966962
):
@@ -981,7 +977,6 @@ async def test_audio_as_binary_content_input(
981977
assert result.output == snapshot('The name mentioned in the audio is Marcelo.')
982978

983979

984-
@pytest.mark.vcr()
985980
async def test_document_as_binary_content_input(
986981
allow_model_requests: None, document_content: BinaryContent, openai_api_key: str
987982
):
@@ -1045,15 +1040,13 @@ async def get_capital(country: str) -> str:
10451040
assert result.output == snapshot('The capital of England is London.')
10461041

10471042

1048-
@pytest.mark.vcr()
10491043
async def test_extra_headers(allow_model_requests: None, openai_api_key: str):
10501044
# This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
10511045
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
10521046
agent = Agent(m, model_settings=OpenAIModelSettings(extra_headers={'Extra-Header-Key': 'Extra-Header-Value'}))
10531047
await agent.run('hello')
10541048

10551049

1056-
@pytest.mark.vcr()
10571050
async def test_user_id(allow_model_requests: None, openai_api_key: str):
10581051
# This test doesn't do anything, it's just here to ensure that calls with `user` don't cause errors, including type.
10591052
# Since we use VCR, creating tests with an `httpx.Transport` is not possible.
@@ -1924,7 +1917,6 @@ async def get_temperature(city: str) -> float:
19241917
)
19251918

19261919

1927-
@pytest.mark.vcr()
19281920
async def test_openai_responses_model_thinking_part(allow_model_requests: None, openai_api_key: str):
19291921
m = OpenAIResponsesModel('o3-mini', provider=OpenAIProvider(api_key=openai_api_key))
19301922
settings = OpenAIResponsesModelSettings(openai_reasoning_effort='high', openai_reasoning_summary='detailed')
@@ -2009,7 +2001,6 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None,
20092001
)
20102002

20112003

2012-
@pytest.mark.vcr()
20132004
async def test_openai_model_thinking_part(allow_model_requests: None, openai_api_key: str):
20142005
provider = OpenAIProvider(api_key=openai_api_key)
20152006
responses_model = OpenAIResponsesModel('o3-mini', provider=provider)
@@ -2098,7 +2089,6 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api
20982089
)
20992090

21002091

2101-
@pytest.mark.vcr()
21022092
async def test_openai_model_thinking_part_iter(allow_model_requests: None, openai_api_key: str):
21032093
provider = OpenAIProvider(api_key=openai_api_key)
21042094
responses_model = OpenAIResponsesModel('o3-mini', provider=provider)
@@ -2127,7 +2117,6 @@ async def test_openai_model_thinking_part_iter(allow_model_requests: None, opena
21272117
)
21282118

21292119

2130-
@pytest.mark.vcr()
21312120
async def test_openai_instructions_with_logprobs(allow_model_requests: None):
21322121
# Create a mock response with logprobs
21332122
c = completion_message(
@@ -2164,7 +2153,6 @@ async def test_openai_instructions_with_logprobs(allow_model_requests: None):
21642153
]
21652154

21662155

2167-
@pytest.mark.vcr()
21682156
async def test_openai_web_search_tool_model_not_supported(allow_model_requests: None, openai_api_key: str):
21692157
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
21702158
agent = Agent(
@@ -2175,7 +2163,6 @@ async def test_openai_web_search_tool_model_not_supported(allow_model_requests:
21752163
await agent.run('What day is today?')
21762164

21772165

2178-
@pytest.mark.vcr()
21792166
async def test_openai_web_search_tool(allow_model_requests: None, openai_api_key: str):
21802167
m = OpenAIModel('gpt-4o-search-preview', provider=OpenAIProvider(api_key=openai_api_key))
21812168
agent = Agent(
@@ -2186,7 +2173,6 @@ async def test_openai_web_search_tool(allow_model_requests: None, openai_api_key
21862173
assert result.output == snapshot('May 14, 2025, 8:51:29 AM ')
21872174

21882175

2189-
@pytest.mark.vcr()
21902176
async def test_openai_web_search_tool_with_user_location(allow_model_requests: None, openai_api_key: str):
21912177
m = OpenAIModel('gpt-4o-search-preview', provider=OpenAIProvider(api_key=openai_api_key))
21922178
agent = Agent(
@@ -2214,7 +2200,6 @@ async def test_openai_web_search_tool_with_user_location(allow_model_requests: N
22142200
""")
22152201

22162202

2217-
@pytest.mark.vcr()
22182203
async def test_reasoning_model_with_temperature(allow_model_requests: None, openai_api_key: str):
22192204
m = OpenAIModel('o3-mini', provider=OpenAIProvider(api_key=openai_api_key))
22202205
agent = Agent(m, model_settings=OpenAIModelSettings(temperature=0.5))
@@ -2319,7 +2304,6 @@ def test_model_profile_strict_not_supported():
23192304
)
23202305

23212306

2322-
@pytest.mark.vcr
23232307
async def test_compatible_api_with_tool_calls_without_id(allow_model_requests: None, gemini_api_key: str):
23242308
provider = OpenAIProvider(
23252309
openai_client=AsyncOpenAI(
@@ -2358,7 +2342,6 @@ def test_openai_response_timestamp_milliseconds(allow_model_requests: None):
23582342
assert response.timestamp == snapshot(datetime(2025, 6, 1, 3, 7, 48, tzinfo=timezone.utc))
23592343

23602344

2361-
@pytest.mark.vcr()
23622345
async def test_openai_tool_output(allow_model_requests: None, openai_api_key: str):
23632346
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
23642347

@@ -2453,7 +2436,6 @@ async def get_user_country() -> str:
24532436
)
24542437

24552438

2456-
@pytest.mark.vcr()
24572439
async def test_openai_text_output_function(allow_model_requests: None, openai_api_key: str):
24582440
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
24592441

@@ -2533,7 +2515,6 @@ async def get_user_country() -> str:
25332515
)
25342516

25352517

2536-
@pytest.mark.vcr()
25372518
async def test_openai_native_output(allow_model_requests: None, openai_api_key: str):
25382519
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
25392520

@@ -2616,7 +2597,6 @@ async def get_user_country() -> str:
26162597
)
26172598

26182599

2619-
@pytest.mark.vcr()
26202600
async def test_openai_native_output_multiple(allow_model_requests: None, openai_api_key: str):
26212601
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
26222602

@@ -2705,7 +2685,6 @@ async def get_user_country() -> str:
27052685
)
27062686

27072687

2708-
@pytest.mark.vcr()
27092688
async def test_openai_prompted_output(allow_model_requests: None, openai_api_key: str):
27102689
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
27112690

@@ -2800,7 +2779,6 @@ async def get_user_country() -> str:
28002779
)
28012780

28022781

2803-
@pytest.mark.vcr()
28042782
async def test_openai_prompted_output_multiple(allow_model_requests: None, openai_api_key: str):
28052783
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
28062784

@@ -2959,7 +2937,6 @@ async def test_process_response_no_created_timestamp(allow_model_requests: None)
29592937
assert response_message.timestamp == IsNow(tz=timezone.utc)
29602938

29612939

2962-
@pytest.mark.anyio()
29632940
async def test_tool_choice_fallback(allow_model_requests: None) -> None:
29642941
profile = OpenAIModelProfile(openai_supports_tool_choice_required=False).update(openai_model_profile('stub'))
29652942

@@ -2976,3 +2953,11 @@ async def test_tool_choice_fallback(allow_model_requests: None) -> None:
29762953
)
29772954

29782955
assert get_mock_chat_completion_kwargs(mock_client)[0]['tool_choice'] == 'auto'
2956+
2957+
2958+
async def test_openai_model_settings_temperature_ignored_on_gpt_5(allow_model_requests: None, openai_api_key: str):
2959+
m = OpenAIModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key))
2960+
agent = Agent(m)
2961+
2962+
result = await agent.run('What is the capital of France?', model_settings=ModelSettings(temperature=0.0))
2963+
assert result.output == snapshot('Paris.')

0 commit comments

Comments
 (0)