Skip to content

Add new OpenAI GPT-5 models #2503

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions pydantic_ai_slim/pydantic_ai/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,13 @@
'gpt-4o-mini-search-preview-2025-03-11',
'gpt-4o-search-preview',
'gpt-4o-search-preview-2025-03-11',
'gpt-5',
'gpt-5-2025-08-07',
'gpt-5-chat-latest',
'gpt-5-mini',
'gpt-5-mini-2025-08-07',
'gpt-5-nano',
'gpt-5-nano-2025-08-07',
'grok:grok-4',
'grok:grok-4-0709',
'grok:grok-3',
Expand Down Expand Up @@ -313,11 +320,18 @@
'openai:gpt-4o-mini-search-preview-2025-03-11',
'openai:gpt-4o-search-preview',
'openai:gpt-4o-search-preview-2025-03-11',
'openai:gpt-5',
'openai:gpt-5-2025-08-07',
'openai:o1',
'openai:gpt-5-chat-latest',
'openai:o1-2024-12-17',
'openai:gpt-5-mini',
'openai:o1-mini',
'openai:gpt-5-mini-2025-08-07',
'openai:o1-mini-2024-09-12',
'openai:gpt-5-nano',
'openai:o1-preview',
'openai:gpt-5-nano-2025-08-07',
'openai:o1-preview-2024-09-12',
'openai:o1-pro',
'openai:o1-pro-2025-03-19',
Expand Down
11 changes: 8 additions & 3 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@
from openai.types.chat.chat_completion_content_part_image_param import ImageURL
from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio
from openai.types.chat.chat_completion_content_part_param import File, FileFile
from openai.types.chat.chat_completion_message_function_tool_call import ChatCompletionMessageFunctionToolCall
from openai.types.chat.chat_completion_message_function_tool_call_param import (
ChatCompletionMessageFunctionToolCallParam,
)
from openai.types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
from openai.types.chat.completion_create_params import (
WebSearchOptions,
Expand Down Expand Up @@ -416,6 +420,7 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons
items.extend(split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags))
if choice.message.tool_calls is not None:
for c in choice.message.tool_calls:
c = cast(ChatCompletionMessageFunctionToolCall, c)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We shouldn't be doing this. What is the type of c?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Kludex the choice.message.tool_calls has the type of ChatCompletionMessageToolCall which is

ChatCompletionMessageToolCall: TypeAlias = Annotated[
    Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall],
    PropertyInfo(discriminator="type"),
]

that's why I use cast.

part = ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)
part.tool_call_id = _guard_tool_call_id(part)
items.append(part)
Expand Down Expand Up @@ -474,7 +479,7 @@ async def _map_messages(self, messages: list[ModelMessage]) -> list[chat.ChatCom
openai_messages.append(item)
elif isinstance(message, ModelResponse):
texts: list[str] = []
tool_calls: list[chat.ChatCompletionMessageToolCallParam] = []
tool_calls: list[ChatCompletionMessageFunctionToolCallParam] = []
for item in message.parts:
if isinstance(item, TextPart):
texts.append(item.content)
Expand Down Expand Up @@ -505,8 +510,8 @@ async def _map_messages(self, messages: list[ModelMessage]) -> list[chat.ChatCom
return openai_messages

@staticmethod
def _map_tool_call(t: ToolCallPart) -> chat.ChatCompletionMessageToolCallParam:
return chat.ChatCompletionMessageToolCallParam(
def _map_tool_call(t: ToolCallPart) -> ChatCompletionMessageFunctionToolCallParam:
return ChatCompletionMessageFunctionToolCallParam(
id=_guard_tool_call_id(t=t),
type='function',
function={'name': t.tool_name, 'arguments': t.args_as_json_str()},
Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ dependencies = [
# WARNING if you add optional groups, please update docs/install.md
logfire = ["logfire>=3.11.0"]
# Models
openai = ["openai>=1.92.0"]
openai = ["openai>=1.99.3"]
cohere = ["cohere>=5.16.0; platform_system != 'Emscripten'"]
vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
google = ["google-genai>=1.28.0"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,18 @@ interactions:
uri: https://us.inference.heroku.com/available-models
response:
headers:
cache-control:
- no-cache, no-store, must-revalidate
content-length:
- '760'
content-security-policy:
- default-src 'none'; frame-ancestors 'none'
content-type:
- application/json
expires:
- '0'
pragma:
- no-cache
strict-transport-security:
- max-age=63072000
parsed_body:
Expand Down
9 changes: 5 additions & 4 deletions tests/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
ChoiceDeltaToolCallFunction,
)
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.chat.chat_completion_message_function_tool_call import ChatCompletionMessageFunctionToolCall
from openai.types.chat.chat_completion_message_tool_call import Function
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
from openai.types.completion_usage import CompletionUsage, PromptTokensDetails
Expand Down Expand Up @@ -241,7 +242,7 @@ async def test_request_structured_response(allow_model_requests: None):
content=None,
role='assistant',
tool_calls=[
chat.ChatCompletionMessageToolCall(
ChatCompletionMessageFunctionToolCall(
id='123',
function=Function(arguments='{"response": [1, 2, 123]}', name='final_result'),
type='function',
Expand Down Expand Up @@ -292,7 +293,7 @@ async def test_request_tool_call(allow_model_requests: None):
content=None,
role='assistant',
tool_calls=[
chat.ChatCompletionMessageToolCall(
ChatCompletionMessageFunctionToolCall(
id='1',
function=Function(arguments='{"loc_name": "San Fransisco"}', name='get_location'),
type='function',
Expand All @@ -311,7 +312,7 @@ async def test_request_tool_call(allow_model_requests: None):
content=None,
role='assistant',
tool_calls=[
chat.ChatCompletionMessageToolCall(
ChatCompletionMessageFunctionToolCall(
id='2',
function=Function(arguments='{"loc_name": "London"}', name='get_location'),
type='function',
Expand Down Expand Up @@ -722,7 +723,7 @@ async def test_parallel_tool_calls(allow_model_requests: None, parallel_tool_cal
content=None,
role='assistant',
tool_calls=[
chat.ChatCompletionMessageToolCall(
ChatCompletionMessageFunctionToolCall(
id='123',
function=Function(arguments='{"response": [1, 2, 3]}', name='final_result'),
type='function',
Expand Down
10 changes: 5 additions & 5 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.