@@ -878,7 +878,9 @@ def _process_response(self, response: responses.Response) -> ModelResponse:
878
878
if isinstance (content , responses .ResponseOutputText ): # pragma: no branch
879
879
items .append (TextPart (content .text ))
880
880
elif isinstance (item , responses .ResponseFunctionToolCall ):
881
- items .append (ToolCallPart (item .name , item .arguments , tool_call_id = item .call_id ))
881
+ items .append (
882
+ ToolCallPart (item .name , item .arguments , tool_call_id = _combine_tool_call_ids (item .call_id , item .id ))
883
+ )
882
884
883
885
finish_reason : FinishReason | None = None
884
886
provider_details : dict [str , Any ] | None = None
@@ -1084,27 +1086,29 @@ async def _map_messages( # noqa: C901
1084
1086
elif isinstance (part , UserPromptPart ):
1085
1087
openai_messages .append (await self ._map_user_prompt (part ))
1086
1088
elif isinstance (part , ToolReturnPart ):
1087
- openai_messages . append (
1088
- FunctionCallOutput (
1089
- type = 'function_call_output' ,
1090
- call_id = _guard_tool_call_id ( t = part ) ,
1091
- output = part . model_response_str () ,
1092
- )
1089
+ call_id = _guard_tool_call_id ( t = part )
1090
+ call_id , _ = _split_combined_tool_call_id ( call_id )
1091
+ item = FunctionCallOutput (
1092
+ type = 'function_call_output' ,
1093
+ call_id = call_id ,
1094
+ output = part . model_response_str (),
1093
1095
)
1096
+ openai_messages .append (item )
1094
1097
elif isinstance (part , RetryPromptPart ):
1095
1098
# TODO(Marcelo): How do we test this conditional branch?
1096
1099
if part .tool_name is None : # pragma: no cover
1097
1100
openai_messages .append (
1098
1101
Message (role = 'user' , content = [{'type' : 'input_text' , 'text' : part .model_response ()}])
1099
1102
)
1100
1103
else :
1101
- openai_messages . append (
1102
- FunctionCallOutput (
1103
- type = 'function_call_output' ,
1104
- call_id = _guard_tool_call_id ( t = part ) ,
1105
- output = part . model_response () ,
1106
- )
1104
+ call_id = _guard_tool_call_id ( t = part )
1105
+ call_id , _ = _split_combined_tool_call_id ( call_id )
1106
+ item = FunctionCallOutput (
1107
+ type = 'function_call_output' ,
1108
+ call_id = call_id ,
1109
+ output = part . model_response (),
1107
1110
)
1111
+ openai_messages .append (item )
1108
1112
else :
1109
1113
assert_never (part )
1110
1114
elif isinstance (message , ModelResponse ):
@@ -1141,12 +1145,18 @@ async def _map_messages( # noqa: C901
1141
1145
1142
1146
@staticmethod
1143
1147
def _map_tool_call (t : ToolCallPart ) -> responses .ResponseFunctionToolCallParam :
1144
- return responses .ResponseFunctionToolCallParam (
1145
- arguments = t .args_as_json_str (),
1146
- call_id = _guard_tool_call_id (t = t ),
1148
+ call_id = _guard_tool_call_id (t = t )
1149
+ call_id , id = _split_combined_tool_call_id (call_id )
1150
+
1151
+ param = responses .ResponseFunctionToolCallParam (
1147
1152
name = t .tool_name ,
1153
+ arguments = t .args_as_json_str (),
1154
+ call_id = call_id ,
1148
1155
type = 'function_call' ,
1149
1156
)
1157
+ if id : # pragma: no branch
1158
+ param ['id' ] = id
1159
+ return param
1150
1160
1151
1161
def _map_json_schema (self , o : OutputObjectDefinition ) -> responses .ResponseFormatTextJSONSchemaConfigParam :
1152
1162
response_format_param : responses .ResponseFormatTextJSONSchemaConfigParam = {
@@ -1365,7 +1375,7 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1365
1375
vendor_part_id = chunk .item .id ,
1366
1376
tool_name = chunk .item .name ,
1367
1377
args = chunk .item .arguments ,
1368
- tool_call_id = chunk .item .call_id ,
1378
+ tool_call_id = _combine_tool_call_ids ( chunk .item .call_id , chunk . item . id ) ,
1369
1379
)
1370
1380
elif isinstance (chunk .item , responses .ResponseReasoningItem ):
1371
1381
pass
@@ -1506,3 +1516,17 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
1506
1516
u .input_audio_tokens = response_usage .prompt_tokens_details .audio_tokens or 0
1507
1517
u .cache_read_tokens = response_usage .prompt_tokens_details .cached_tokens or 0
1508
1518
return u
1519
+
1520
+
1521
+ def _combine_tool_call_ids (call_id : str , id : str | None ) -> str :
1522
+ # When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
1523
+ # Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
1524
+ return f'{ call_id } |{ id } ' if id else call_id
1525
+
1526
+
1527
+ def _split_combined_tool_call_id (combined_id : str ) -> tuple [str , str | None ]:
1528
+ if '|' in combined_id :
1529
+ call_id , id = combined_id .split ('|' , 1 )
1530
+ return call_id , id
1531
+ else :
1532
+ return combined_id , None # pragma: no cover
0 commit comments