@@ -1012,29 +1012,30 @@ def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam
10121012 ),
10131013 }
10141014
1015- def _get_response_id_and_trim (
1016- self , messages : list [ModelMessage ]
1017- ) -> tuple [list [ModelMessage ], str | NotGiven | None ]:
1015+ def _get_response_id_and_trim (self , messages : list [ModelMessage ]) -> tuple [list [ModelMessage ], str | None ]:
10181016 # If the message history contains only openai responses,
10191017 # we can limit the history to the most recent ModelRequest.
10201018 # The provider_response_id from the latest ModelResponse is
10211019 # then passed as previous_response_id to preserve context.
1022- response_id = NOT_GIVEN
1023- latest_model_response : list [ ModelMessage ] = []
1020+ response_id = None
1021+ latest_model_request : ModelRequest | None = None
10241022 for m in messages :
10251023 # Openai may return a dated model_name that differs from self.model_name
10261024 # (e.g., "gpt-5" vs "gpt-5-2025-08-07").
1027- if isinstance (m , ModelResponse ) and (self .model_name in m .model_name ): # type: ignore
1028- response_id = m .provider_response_id
1025+ if isinstance (m , ModelResponse ) and m .model_name :
1026+ if self .model_name in m .model_name :
1027+ response_id = m .provider_response_id
1028+ else :
1029+ # Mixed model responses invalidate response_id,
1030+ # so the history is kept intact.
1031+ response_id = None
1032+ break
10291033 elif isinstance (m , ModelRequest ):
1030- latest_model_response = [ m ]
1034+ latest_model_request = m
10311035 else :
1032- # Mixed model responses invalidate response_id,
1033- # so the history is kept intact.
1034- response_id = NOT_GIVEN
1035- break
1036- if response_id :
1037- messages = latest_model_response
1036+ pass
1037+ if response_id and latest_model_request :
1038+ messages = [latest_model_request ]
10381039 return messages , response_id
10391040
10401041 async def _map_messages (
0 commit comments