Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions pydantic_ai_slim/pydantic_ai/_parts_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,7 @@ def handle_tool_call_part(
tool_name: str,
args: str | dict[str, Any] | None,
tool_call_id: str | None = None,
id: str | None = None,
) -> ModelResponseStreamEvent:
"""Immediately create or fully-overwrite a ToolCallPart with the given information.

Expand All @@ -323,6 +324,7 @@ def handle_tool_call_part(
tool_name: The name of the tool being invoked.
args: The arguments for the tool call, either as a string, a dictionary, or None.
tool_call_id: An optional string identifier for this tool call.
id: An optional identifier for this tool call part.

Returns:
ModelResponseStreamEvent: A `PartStartEvent` indicating that a new tool call part
Expand All @@ -332,6 +334,7 @@ def handle_tool_call_part(
tool_name=tool_name,
args=args,
tool_call_id=tool_call_id or _generate_tool_call_id(),
id=id,
)
if vendor_part_id is None:
# vendor_part_id is None, so we unconditionally append a new ToolCallPart to the end of the list
Expand Down
7 changes: 7 additions & 0 deletions pydantic_ai_slim/pydantic_ai/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -1052,6 +1052,13 @@ class BaseToolCallPart:
In case the tool call id is not provided by the model, Pydantic AI will generate a random one.
"""

_: KW_ONLY

id: str | None = None
"""An optional identifier of the tool call part, separate from the tool call ID.

This is used by some APIs like OpenAI Responses."""

def args_as_dict(self) -> dict[str, Any]:
"""Return the arguments as a Python dictionary.

Expand Down
20 changes: 12 additions & 8 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1005,7 +1005,12 @@ def _process_response( # noqa: C901
items.append(TextPart(content.text, id=item.id))
elif isinstance(item, responses.ResponseFunctionToolCall):
items.append(
ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
ToolCallPart(
item.name,
item.arguments,
tool_call_id=item.call_id,
id=item.id,
)
)
elif isinstance(item, responses.ResponseCodeInterpreterToolCall):
call_part, return_part, file_parts = _map_code_interpreter_tool_call(item, self.system)
Expand Down Expand Up @@ -1361,6 +1366,7 @@ async def _map_messages( # noqa: C901
elif isinstance(item, ToolCallPart):
call_id = _guard_tool_call_id(t=item)
call_id, id = _split_combined_tool_call_id(call_id)
id = id or item.id

param = responses.ResponseFunctionToolCallParam(
name=item.tool_name,
Expand Down Expand Up @@ -1724,7 +1730,8 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
vendor_part_id=chunk.item.id,
tool_name=chunk.item.name,
args=chunk.item.arguments,
tool_call_id=_combine_tool_call_ids(chunk.item.call_id, chunk.item.id),
tool_call_id=chunk.item.call_id,
id=chunk.item.id,
)
elif isinstance(chunk.item, responses.ResponseReasoningItem):
pass
Expand Down Expand Up @@ -1963,18 +1970,15 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
return u


def _combine_tool_call_ids(call_id: str, id: str | None) -> str:
def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
# When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
# Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
return f'{call_id}|{id}' if id else call_id
# Before our `ToolCallPart` gained the `id` field alongside `tool_call_id` field, we combined the two fields into a single string stored on `tool_call_id`.


def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
if '|' in combined_id:
call_id, id = combined_id.split('|', 1)
return call_id, id
else:
return combined_id, None # pragma: no cover
return combined_id, None


def _map_code_interpreter_tool_call(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
interactions:
- request:
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '834'
content-type:
- application/json
host:
- api.openai.com
method: POST
parsed_body:
include:
- reasoning.encrypted_content
input:
- content: What is the largest city in the user country?
role: user
- arguments: '{}'
call_id: call_ZWkVhdUjupo528U9dqgFeRkH
id: fc_68477f0bb8e4819cba6d781e174d77f8001fd29e2d5573f7
name: get_user_country
type: function_call
- call_id: call_ZWkVhdUjupo528U9dqgFeRkH
output: Mexico
type: function_call_output
- content: What is the largest city in the user country?
role: user
model: gpt-5
previous_response_id: null
stream: false
tool_choice: required
tools:
- description: The final response which ends this conversation
name: final_result
parameters:
additionalProperties: false
properties:
city:
type: string
country:
type: string
required:
- city
- country
type: object
strict: true
type: function
uri: https://api.openai.com/v1/responses
response:
headers:
alt-svc:
- h3=":443"; ma=86400
connection:
- keep-alive
content-length:
- '5572'
content-type:
- application/json
openai-organization:
- pydantic-28gund
openai-processing-ms:
- '6305'
openai-project:
- proj_dKobscVY9YJxeEaDJen54e3d
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
background: false
billing:
payer: developer
created_at: 1760355047
error: null
id: resp_001fd29e2d5573f70068ece2e6dfbc819c96557f0de72802be
incomplete_details: null
instructions: null
max_output_tokens: null
max_tool_calls: null
metadata: {}
model: gpt-5-2025-08-07
object: response
output:
- encrypted_content: gAAAAABo7OLt_-yMcMz15n_JkwU0selGH2vqiwJDNU86YIjY_jQLXid4usIFjjCppiyOnJjtU_C6e7jUIKnfZRBt1DHVFMGpAVvTBZBVdJhXl0ypGjkAj3Wv_3ecAG9oU3DoUMKrbwEMqL0LaSfNSN1qgCTt-RL2sgeEDgFeiOpX40BWgS8tVMfR4_qBxJcp8KeYvw5niPgwcMF3UPIEjHlaVpglJH2SzZtTOdxeFDfYbnvdWTMvwYFIc0jKOREG_-hZE4AznhHdSLV2-I5nGlxuxqaI4GQCk-Fp8Cvcy15_NYYP62ii50VlR6HPp_gQZEetwgC5pThsiuuG7-n1hGOnsj8gZyjSKsMe2KpzlYzhT7ighmArDVEx8Utvp1FXikqGkEzt4RTqqPInp9kuvqQTSyd8JZ6BEetRl1EuZXT7zXrzLwFN7Vm_gqixmf6mLXZUw6vg6LqGkhSh5fo6C7akPTwwJXjVJ37Dzfejo6RiVKOT-_9sdYCHW2kZ9XfQAmRQfB97UpSZ8QrVfaKy_uRIHLexs8QrQvKuw-uHDQBAL3OEmSTzHzCQ-q7b0FHr514Z29l9etavHNVdpeleWGo6VEtLWGQyblIdIBtf946YnQvr6NYIR8uATn9Z91rr8FsFJTpJh_v5iGA2f8rfPRu27nmw-q8XnPVc_FYCZDk08r_YhdEJZn1INBi8wYSWmpib8VxNpkFO7FFRuK-F8rh3MTpYgIOqPQYbf3LCRvKukTwv1b3mjSKVpHQSm_s6s7djdD-rLuc22-3_MLd0ii4_oOT8w51TQIM61LtonGvxUqf4oKHSUFCVnrWWiT-0ttdpwpJ_iB5frnEeY2mWyU1u7sd38BI3dOzoM82IFaIm98g9fa99bmoA7Z7gI60tzyF8YbJmWF-PCwyKHJ7B1MbCBonO36NmeEM-SplrR54fGykxTmwvtbYGhd5f0cdYzD0zulRDj-AhOd96rrUB_fIgoQGTXey8L_w0whcnVTWdG6is-rx8373Sz8ZRoE5RiLWW1mfHzVXxwslphx4BedRVF0tL-1YO7sg5MXhHCf6hpw8dOht-21NMrb1F1DQadFE_fhySFl-TgOD5BlhAuupLMsqcCIa4lcXP_loyA4ERP6WSdz2Bybz7_1eOiflfVodRrNqvr_DnL0NEXD_JkYTeIn84ziarFV7U7ZnkMvRiA_p1fWdbHTsE_8lu1rsf8fcJ1e76_6ycPkOc4TrOZw8gVRb7gIbMMVrv72BT_sFhW7GkXrzCQpQaeybmRw-bjFhkMMjMDYGXkA_H0q2Zfyh3zCOoa40hl2cqRWp7n1XuafmtKG_F8e9hyWox0q7AhZr5HOOaHz8r3O3-dmNl1KP52bqA8S72rLDslAOQlDupmAQgAmkm5ApYeYcEBredN78jHQ1pviUEI2-3qr4ClXZFHPa54AJ_q4HQ-EcKXEcYQglG21mSUy_tFQF-m4X46Qu8yYWcBVW4E0CG3wbvYx0BCdbc5RhIDkJo1elxLK8XS64lpFkCWy62xLVeMuVuCj8q84-Kk7tZ7gtMtLV9PHQCdbl3s2pAzMfuNIBJog6-HPmwha2n9T0Md5qF7OqCtnYWOWUfIMmQVcdW-ECGsQy9uIUmpsOjdtH31hrX3MUEhIOUB5xErLwfp-_s22ciAY_ap3JlYAiTKGlMCxKxTzK7wWEG_nYhDXC1Afj2z-tgvYhtn9MyDf2v0aIpDM9BoTOLEO-ButzylJ06pJlrJhpdvklvwJxUiuhlwy0bHNilb4Zv4QwnUv3DCrIeKe1ne90vEXe6YlDwSMeWJcz1DZIQBvVcNlN8q2y8Rae3lMWzsvD0YXrcXp02ckYoLSOQZgNYviGYLsgRgPGiIkncjSDt7WWV6td3l-zTrP6MT_hKigmg5F5_F6tS1bKb0jlQBZd0NP-_L_TPqMGRjCYG8johd6VyMiagslDjxG39Dh2wyTI19ZW7h_AOuOpnfkt2armqiq6iGfevA3malqkNakb6mFAS04J9O0butWVAw4yiPCEcLuDNAzzi_qrqLee4gkjh0NplvfGCaE6qqYms61GJbJC4wge6vjyTakurbqWEV3YoR3y_dn-0pjQ7TOx9kkruDwg0nZIV5O6yYxaulmbuvo3fs5CZb9ptZPD0MzGZj7CZU2MDCa4a4gr0McOx2MricxSzIu6emuRUzZuC6C1JxPRC00M0TrZNMIe_WVa9fXDLV1ULEAIMwMXzNT9zV6yiYQCwhkp30Wqde3W0LlIRpSbDuJXcvT8OCbXkdPNIScccdT9LvUQQ--hU2P45kisOev3TYn7yv-pdxM3u1KFNwuFxedSArMBPg7GDz1BOxDQRzv0mfwbf_CcoFbuyj7Tf4zWO46HVdHeRNbvIE--bnaSYD-UFaKknp8ZsBQQhBU_2TEca3fKwmg81-g7Vdb28QUZEuPzgE4ekxZejkKpiKqlLC5nJYgvXrqk2H35D51mYdzPs0ST05Mc41x9MFm_YOLxSFyA0yGAKVINmD5wT6kvRflPkgoksd2ryIvo4KMw3oZQKodv5By0mSJ8iX2vhTGylxiM8wj-ICyNuOsaRFrcMSpX7tZbXcDyysApdmx217BSADoQiNZBLngF7ptxc2QGyo3CwuDjaljwmSgL9KeGthd1RJFd826M287IPpCjLM4WRquCL_E0pQryNqOMn-ZEOCAlBjE37290EhkjKbhiGBEnHUvSbhoH4nL47AmunP_Q5aqh5173VfyoyaybuS3fXjQ5WO0kyFjMdD-a7C6PVdwToCTP-TljoF2YnQKCiqUGs9gNHS9mYhQSXzY4uuGlTHLfKB4JKS5_MQHvwI9zCbTvVG854fPuo_2mzSh-y8TSzBWPokhYWI_q095Sh6tOqDIJNMGyjI2GDFRSyKpKhIFCLyU2JEo9B6l91jPlir0XI8ZOQfBd9J0I4JIqnyoj40_1bF1zUDGc014bdGfxazxwlGph_ysKAP39wV7X9DBFS3ZmeSIn-r3s-sci0HmwnJUb2r03m40rFuNTV1cJMAFP7ZY7PQQQ0TtlO_al0uedaOWylLauap_eoRqc6xGJ2rSz1e7cOevksUlAqzK5xknYKHlsW970xuDGHKOZnKPg8O9nb2PKrcjwEQF5RFPc3l8TtOUXPhhvTERZFGoEuGuSuSp1cJhzba06yPnL-wE3CstYUm3jvkaUme6kKqM4tWBCQDg-_2PYf24xXYlmkIklylskqId826Y3pVVUd7e0vQO0POPeVYU1qwtTp7Ln-MhYEWexxptdNkVQ-kWx63w6HXF6_kefSxaf0UcvL8tOV73u7w_udle9MC_TXgwJZpoW2tSi5HETjQ_i28FAP2iJmclWOm3gP08cMiXvgpTpjzh6meBdvKepnifl_ivPzRnyjz3mYCZH-UJ4LmOHIonv-8arnckhCwHoFIpaIX7eSZyY0JcbBETKImtUwrlTSlbD8l02KDtqw2FJURtEWI5dC1sTS8c2HcyjXyQDA9A25a0M1yIgZyaadODGQ1zoa9xXB
id: rs_001fd29e2d5573f70068ece2e816fc819c82755f049c987ea4
summary: []
type: reasoning
- arguments: '{"city":"Mexico City","country":"Mexico"}'
call_id: call_LIXPi261Xx3dGYzlDsOoyHGk
id: fc_001fd29e2d5573f70068ece2ecc140819c97ca83bd4647a717
name: final_result
status: completed
type: function_call
parallel_tool_calls: true
previous_response_id: null
prompt_cache_key: null
reasoning:
effort: medium
summary: null
safety_identifier: null
service_tier: default
status: completed
store: true
temperature: 1.0
text:
format:
type: text
verbosity: medium
tool_choice: required
tools:
- description: The final response which ends this conversation
name: final_result
parameters:
additionalProperties: false
properties:
city:
type: string
country:
type: string
required:
- city
- country
type: object
strict: true
type: function
top_logprobs: 0
top_p: 1.0
truncation: disabled
usage:
input_tokens: 103
input_tokens_details:
cached_tokens: 0
output_tokens: 409
output_tokens_details:
reasoning_tokens: 384
total_tokens: 512
user: null
status:
code: 200
message: OK
version: 1
Loading