@@ -990,7 +990,7 @@ async def _responses_create(
990990
991991 previous_response_id = model_settings .get ('openai_previous_response_id' )
992992 if previous_response_id == 'auto' :
993- messages , previous_response_id = self ._get_response_id_and_trim (messages )
993+ previous_response_id , messages = self ._get_previous_response_id_and_new_messages (messages )
994994
995995 instructions , openai_messages = await self ._map_messages (messages , model_settings )
996996 reasoning = self ._get_reasoning (model_settings )
@@ -1108,24 +1108,26 @@ def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam
11081108 ),
11091109 }
11101110
1111- def _get_response_id_and_trim (self , messages : list [ModelMessage ]) -> tuple [list [ModelMessage ], str | None ]:
1111+ def _get_previous_response_id_and_new_messages (
1112+ self , messages : list [ModelMessage ]
1113+ ) -> tuple [str | None , list [ModelMessage ]]:
11121114 # In `auto` mode, the history is trimmed up to (but not including)
11131115 # the latest ModelResponse with a valid `provider_response_id`.
11141116 # This is then passed as `previous_response_id` in the next request
11151117 # to maintain context along with the trimmed history.
1116- response_id = None
1118+ previous_response_id = None
11171119 trimmed_messages : list [ModelMessage ] = []
11181120 for m in reversed (messages ):
11191121 if isinstance (m , ModelResponse ) and m .provider_name == self .system :
1120- response_id = m .provider_response_id
1122+ previous_response_id = m .provider_response_id
11211123 break
11221124 else :
11231125 trimmed_messages .append (m )
11241126
1125- if response_id and trimmed_messages :
1126- return list (reversed (trimmed_messages )), response_id
1127+ if previous_response_id and trimmed_messages :
1128+ return previous_response_id , list (reversed (trimmed_messages ))
11271129 else :
1128- return messages , None
1130+ return None , messages
11291131
11301132 async def _map_messages ( # noqa: C901
11311133 self , messages : list [ModelMessage ], model_settings : OpenAIResponsesModelSettings
0 commit comments