Skip to content

Commit 1f8d794

Browse files
authored
Add new ToolCallPart.id field for OpenAI Responses (#3151)
1 parent e119392 commit 1f8d794

File tree

7 files changed

+348
-46
lines changed

7 files changed

+348
-46
lines changed

pydantic_ai_slim/pydantic_ai/_parts_manager.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -312,6 +312,7 @@ def handle_tool_call_part(
312312
tool_name: str,
313313
args: str | dict[str, Any] | None,
314314
tool_call_id: str | None = None,
315+
id: str | None = None,
315316
) -> ModelResponseStreamEvent:
316317
"""Immediately create or fully-overwrite a ToolCallPart with the given information.
317318
@@ -323,6 +324,7 @@ def handle_tool_call_part(
323324
tool_name: The name of the tool being invoked.
324325
args: The arguments for the tool call, either as a string, a dictionary, or None.
325326
tool_call_id: An optional string identifier for this tool call.
327+
id: An optional identifier for this tool call part.
326328
327329
Returns:
328330
ModelResponseStreamEvent: A `PartStartEvent` indicating that a new tool call part
@@ -332,6 +334,7 @@ def handle_tool_call_part(
332334
tool_name=tool_name,
333335
args=args,
334336
tool_call_id=tool_call_id or _generate_tool_call_id(),
337+
id=id,
335338
)
336339
if vendor_part_id is None:
337340
# vendor_part_id is None, so we unconditionally append a new ToolCallPart to the end of the list

pydantic_ai_slim/pydantic_ai/messages.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1052,6 +1052,13 @@ class BaseToolCallPart:
10521052
In case the tool call id is not provided by the model, Pydantic AI will generate a random one.
10531053
"""
10541054

1055+
_: KW_ONLY
1056+
1057+
id: str | None = None
1058+
"""An optional identifier of the tool call part, separate from the tool call ID.
1059+
1060+
This is used by some APIs like OpenAI Responses."""
1061+
10551062
def args_as_dict(self) -> dict[str, Any]:
10561063
"""Return the arguments as a Python dictionary.
10571064

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1008,7 +1008,12 @@ def _process_response( # noqa: C901
10081008
items.append(TextPart(content.text, id=item.id))
10091009
elif isinstance(item, responses.ResponseFunctionToolCall):
10101010
items.append(
1011-
ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
1011+
ToolCallPart(
1012+
item.name,
1013+
item.arguments,
1014+
tool_call_id=item.call_id,
1015+
id=item.id,
1016+
)
10121017
)
10131018
elif isinstance(item, responses.ResponseCodeInterpreterToolCall):
10141019
call_part, return_part, file_parts = _map_code_interpreter_tool_call(item, self.system)
@@ -1364,6 +1369,7 @@ async def _map_messages( # noqa: C901
13641369
elif isinstance(item, ToolCallPart):
13651370
call_id = _guard_tool_call_id(t=item)
13661371
call_id, id = _split_combined_tool_call_id(call_id)
1372+
id = id or item.id
13671373

13681374
param = responses.ResponseFunctionToolCallParam(
13691375
name=item.tool_name,
@@ -1727,7 +1733,8 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
17271733
vendor_part_id=chunk.item.id,
17281734
tool_name=chunk.item.name,
17291735
args=chunk.item.arguments,
1730-
tool_call_id=_combine_tool_call_ids(chunk.item.call_id, chunk.item.id),
1736+
tool_call_id=chunk.item.call_id,
1737+
id=chunk.item.id,
17311738
)
17321739
elif isinstance(chunk.item, responses.ResponseReasoningItem):
17331740
pass
@@ -1966,18 +1973,15 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
19661973
return u
19671974

19681975

1969-
def _combine_tool_call_ids(call_id: str, id: str | None) -> str:
1976+
def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
19701977
# When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
1971-
# Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
1972-
return f'{call_id}|{id}' if id else call_id
1978+
# Before our `ToolCallPart` gained the `id` field alongside `tool_call_id` field, we combined the two fields into a single string stored on `tool_call_id`.
19731979

1974-
1975-
def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
19761980
if '|' in combined_id:
19771981
call_id, id = combined_id.split('|', 1)
19781982
return call_id, id
19791983
else:
1980-
return combined_id, None # pragma: no cover
1984+
return combined_id, None
19811985

19821986

19831987
def _map_code_interpreter_tool_call(
Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
interactions:
2+
- request:
3+
headers:
4+
accept:
5+
- application/json
6+
accept-encoding:
7+
- gzip, deflate
8+
connection:
9+
- keep-alive
10+
content-length:
11+
- '834'
12+
content-type:
13+
- application/json
14+
host:
15+
- api.openai.com
16+
method: POST
17+
parsed_body:
18+
include:
19+
- reasoning.encrypted_content
20+
input:
21+
- content: What is the largest city in the user country?
22+
role: user
23+
- arguments: '{}'
24+
call_id: call_ZWkVhdUjupo528U9dqgFeRkH
25+
id: fc_68477f0bb8e4819cba6d781e174d77f8001fd29e2d5573f7
26+
name: get_user_country
27+
type: function_call
28+
- call_id: call_ZWkVhdUjupo528U9dqgFeRkH
29+
output: Mexico
30+
type: function_call_output
31+
- content: What is the largest city in the user country?
32+
role: user
33+
model: gpt-5
34+
previous_response_id: null
35+
stream: false
36+
tool_choice: required
37+
tools:
38+
- description: The final response which ends this conversation
39+
name: final_result
40+
parameters:
41+
additionalProperties: false
42+
properties:
43+
city:
44+
type: string
45+
country:
46+
type: string
47+
required:
48+
- city
49+
- country
50+
type: object
51+
strict: true
52+
type: function
53+
uri: https://api.openai.com/v1/responses
54+
response:
55+
headers:
56+
alt-svc:
57+
- h3=":443"; ma=86400
58+
connection:
59+
- keep-alive
60+
content-length:
61+
- '5572'
62+
content-type:
63+
- application/json
64+
openai-organization:
65+
- pydantic-28gund
66+
openai-processing-ms:
67+
- '6305'
68+
openai-project:
69+
- proj_dKobscVY9YJxeEaDJen54e3d
70+
openai-version:
71+
- '2020-10-01'
72+
strict-transport-security:
73+
- max-age=31536000; includeSubDomains; preload
74+
transfer-encoding:
75+
- chunked
76+
parsed_body:
77+
background: false
78+
billing:
79+
payer: developer
80+
created_at: 1760355047
81+
error: null
82+
id: resp_001fd29e2d5573f70068ece2e6dfbc819c96557f0de72802be
83+
incomplete_details: null
84+
instructions: null
85+
max_output_tokens: null
86+
max_tool_calls: null
87+
metadata: {}
88+
model: gpt-5-2025-08-07
89+
object: response
90+
output:
91+
- encrypted_content: gAAAAABo7OLt_-yMcMz15n_JkwU0selGH2vqiwJDNU86YIjY_jQLXid4usIFjjCppiyOnJjtU_C6e7jUIKnfZRBt1DHVFMGpAVvTBZBVdJhXl0ypGjkAj3Wv_3ecAG9oU3DoUMKrbwEMqL0LaSfNSN1qgCTt-RL2sgeEDgFeiOpX40BWgS8tVMfR4_qBxJcp8KeYvw5niPgwcMF3UPIEjHlaVpglJH2SzZtTOdxeFDfYbnvdWTMvwYFIc0jKOREG_-hZE4AznhHdSLV2-I5nGlxuxqaI4GQCk-Fp8Cvcy15_NYYP62ii50VlR6HPp_gQZEetwgC5pThsiuuG7-n1hGOnsj8gZyjSKsMe2KpzlYzhT7ighmArDVEx8Utvp1FXikqGkEzt4RTqqPInp9kuvqQTSyd8JZ6BEetRl1EuZXT7zXrzLwFN7Vm_gqixmf6mLXZUw6vg6LqGkhSh5fo6C7akPTwwJXjVJ37Dzfejo6RiVKOT-_9sdYCHW2kZ9XfQAmRQfB97UpSZ8QrVfaKy_uRIHLexs8QrQvKuw-uHDQBAL3OEmSTzHzCQ-q7b0FHr514Z29l9etavHNVdpeleWGo6VEtLWGQyblIdIBtf946YnQvr6NYIR8uATn9Z91rr8FsFJTpJh_v5iGA2f8rfPRu27nmw-q8XnPVc_FYCZDk08r_YhdEJZn1INBi8wYSWmpib8VxNpkFO7FFRuK-F8rh3MTpYgIOqPQYbf3LCRvKukTwv1b3mjSKVpHQSm_s6s7djdD-rLuc22-3_MLd0ii4_oOT8w51TQIM61LtonGvxUqf4oKHSUFCVnrWWiT-0ttdpwpJ_iB5frnEeY2mWyU1u7sd38BI3dOzoM82IFaIm98g9fa99bmoA7Z7gI60tzyF8YbJmWF-PCwyKHJ7B1MbCBonO36NmeEM-SplrR54fGykxTmwvtbYGhd5f0cdYzD0zulRDj-AhOd96rrUB_fIgoQGTXey8L_w0whcnVTWdG6is-rx8373Sz8ZRoE5RiLWW1mfHzVXxwslphx4BedRVF0tL-1YO7sg5MXhHCf6hpw8dOht-21NMrb1F1DQadFE_fhySFl-TgOD5BlhAuupLMsqcCIa4lcXP_loyA4ERP6WSdz2Bybz7_1eOiflfVodRrNqvr_DnL0NEXD_JkYTeIn84ziarFV7U7ZnkMvRiA_p1fWdbHTsE_8lu1rsf8fcJ1e76_6ycPkOc4TrOZw8gVRb7gIbMMVrv72BT_sFhW7GkXrzCQpQaeybmRw-bjFhkMMjMDYGXkA_H0q2Zfyh3zCOoa40hl2cqRWp7n1XuafmtKG_F8e9hyWox0q7AhZr5HOOaHz8r3O3-dmNl1KP52bqA8S72rLDslAOQlDupmAQgAmkm5ApYeYcEBredN78jHQ1pviUEI2-3qr4ClXZFHPa54AJ_q4HQ-EcKXEcYQglG21mSUy_tFQF-m4X46Qu8yYWcBVW4E0CG3wbvYx0BCdbc5RhIDkJo1elxLK8XS64lpFkCWy62xLVeMuVuCj8q84-Kk7tZ7gtMtLV9PHQCdbl3s2pAzMfuNIBJog6-HPmwha2n9T0Md5qF7OqCtnYWOWUfIMmQVcdW-ECGsQy9uIUmpsOjdtH31hrX3MUEhIOUB5xErLwfp-_s22ciAY_ap3JlYAiTKGlMCxKxTzK7wWEG_nYhDXC1Afj2z-tgvYhtn9MyDf2v0aIpDM9BoTOLEO-ButzylJ06pJlrJhpdvklvwJxUiuhlwy0bHNilb4Zv4QwnUv3DCrIeKe1ne90vEXe6YlDwSMeWJcz1DZIQBvVcNlN8q2y8Rae3lMWzsvD0YXrcXp02ckYoLSOQZgNYviGYLsgRgPGiIkncjSDt7WWV6td3l-zTrP6MT_hKigmg5F5_F6tS1bKb0jlQBZd0NP-_L_TPqMGRjCYG8johd6VyMiagslDjxG39Dh2wyTI19ZW7h_AOuOpnfkt2armqiq6iGfevA3malqkNakb6mFAS04J9O0butWVAw4yiPCEcLuDNAzzi_qrqLee4gkjh0NplvfGCaE6qqYms61GJbJC4wge6vjyTakurbqWEV3YoR3y_dn-0pjQ7TOx9kkruDwg0nZIV5O6yYxaulmbuvo3fs5CZb9ptZPD0MzGZj7CZU2MDCa4a4gr0McOx2MricxSzIu6emuRUzZuC6C1JxPRC00M0TrZNMIe_WVa9fXDLV1ULEAIMwMXzNT9zV6yiYQCwhkp30Wqde3W0LlIRpSbDuJXcvT8OCbXkdPNIScccdT9LvUQQ--hU2P45kisOev3TYn7yv-pdxM3u1KFNwuFxedSArMBPg7GDz1BOxDQRzv0mfwbf_CcoFbuyj7Tf4zWO46HVdHeRNbvIE--bnaSYD-UFaKknp8ZsBQQhBU_2TEca3fKwmg81-g7Vdb28QUZEuPzgE4ekxZejkKpiKqlLC5nJYgvXrqk2H35D51mYdzPs0ST05Mc41x9MFm_YOLxSFyA0yGAKVINmD5wT6kvRflPkgoksd2ryIvo4KMw3oZQKodv5By0mSJ8iX2vhTGylxiM8wj-ICyNuOsaRFrcMSpX7tZbXcDyysApdmx217BSADoQiNZBLngF7ptxc2QGyo3CwuDjaljwmSgL9KeGthd1RJFd826M287IPpCjLM4WRquCL_E0pQryNqOMn-ZEOCAlBjE37290EhkjKbhiGBEnHUvSbhoH4nL47AmunP_Q5aqh5173VfyoyaybuS3fXjQ5WO0kyFjMdD-a7C6PVdwToCTP-TljoF2YnQKCiqUGs9gNHS9mYhQSXzY4uuGlTHLfKB4JKS5_MQHvwI9zCbTvVG854fPuo_2mzSh-y8TSzBWPokhYWI_q095Sh6tOqDIJNMGyjI2GDFRSyKpKhIFCLyU2JEo9B6l91jPlir0XI8ZOQfBd9J0I4JIqnyoj40_1bF1zUDGc014bdGfxazxwlGph_ysKAP39wV7X9DBFS3ZmeSIn-r3s-sci0HmwnJUb2r03m40rFuNTV1cJMAFP7ZY7PQQQ0TtlO_al0uedaOWylLauap_eoRqc6xGJ2rSz1e7cOevksUlAqzK5xknYKHlsW970xuDGHKOZnKPg8O9nb2PKrcjwEQF5RFPc3l8TtOUXPhhvTERZFGoEuGuSuSp1cJhzba06yPnL-wE3CstYUm3jvkaUme6kKqM4tWBCQDg-_2PYf24xXYlmkIklylskqId826Y3pVVUd7e0vQO0POPeVYU1qwtTp7Ln-MhYEWexxptdNkVQ-kWx63w6HXF6_kefSxaf0UcvL8tOV73u7w_udle9MC_TXgwJZpoW2tSi5HETjQ_i28FAP2iJmclWOm3gP08cMiXvgpTpjzh6meBdvKepnifl_ivPzRnyjz3mYCZH-UJ4LmOHIonv-8arnckhCwHoFIpaIX7eSZyY0JcbBETKImtUwrlTSlbD8l02KDtqw2FJURtEWI5dC1sTS8c2HcyjXyQDA9A25a0M1yIgZyaadODGQ1zoa9xXB
92+
id: rs_001fd29e2d5573f70068ece2e816fc819c82755f049c987ea4
93+
summary: []
94+
type: reasoning
95+
- arguments: '{"city":"Mexico City","country":"Mexico"}'
96+
call_id: call_LIXPi261Xx3dGYzlDsOoyHGk
97+
id: fc_001fd29e2d5573f70068ece2ecc140819c97ca83bd4647a717
98+
name: final_result
99+
status: completed
100+
type: function_call
101+
parallel_tool_calls: true
102+
previous_response_id: null
103+
prompt_cache_key: null
104+
reasoning:
105+
effort: medium
106+
summary: null
107+
safety_identifier: null
108+
service_tier: default
109+
status: completed
110+
store: true
111+
temperature: 1.0
112+
text:
113+
format:
114+
type: text
115+
verbosity: medium
116+
tool_choice: required
117+
tools:
118+
- description: The final response which ends this conversation
119+
name: final_result
120+
parameters:
121+
additionalProperties: false
122+
properties:
123+
city:
124+
type: string
125+
country:
126+
type: string
127+
required:
128+
- city
129+
- country
130+
type: object
131+
strict: true
132+
type: function
133+
top_logprobs: 0
134+
top_p: 1.0
135+
truncation: disabled
136+
usage:
137+
input_tokens: 103
138+
input_tokens_details:
139+
cached_tokens: 0
140+
output_tokens: 409
141+
output_tokens_details:
142+
reasoning_tokens: 384
143+
total_tokens: 512
144+
user: null
145+
status:
146+
code: 200
147+
message: OK
148+
version: 1

0 commit comments

Comments
 (0)