Skip to content

Commit 9e09c32

Browse files
committed
update function signature
1 parent 81809be commit 9e09c32

File tree

2 files changed

+13
-11
lines changed

2 files changed

+13
-11
lines changed

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -990,7 +990,7 @@ async def _responses_create(
990990

991991
previous_response_id = model_settings.get('openai_previous_response_id')
992992
if previous_response_id == 'auto':
993-
messages, previous_response_id = self._get_response_id_and_trim(messages)
993+
previous_response_id, messages = self._get_previous_response_id_and_new_messages(messages)
994994

995995
instructions, openai_messages = await self._map_messages(messages, model_settings)
996996
reasoning = self._get_reasoning(model_settings)
@@ -1108,24 +1108,26 @@ def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam
11081108
),
11091109
}
11101110

1111-
def _get_response_id_and_trim(self, messages: list[ModelMessage]) -> tuple[list[ModelMessage], str | None]:
1111+
def _get_previous_response_id_and_new_messages(
1112+
self, messages: list[ModelMessage]
1113+
) -> tuple[str | None, list[ModelMessage]]:
11121114
# In `auto` mode, the history is trimmed up to (but not including)
11131115
# the latest ModelResponse with a valid `provider_response_id`.
11141116
# This is then passed as `previous_response_id` in the next request
11151117
# to maintain context along with the trimmed history.
1116-
response_id = None
1118+
previous_response_id = None
11171119
trimmed_messages: list[ModelMessage] = []
11181120
for m in reversed(messages):
11191121
if isinstance(m, ModelResponse) and m.provider_name == self.system:
1120-
response_id = m.provider_response_id
1122+
previous_response_id = m.provider_response_id
11211123
break
11221124
else:
11231125
trimmed_messages.append(m)
11241126

1125-
if response_id and trimmed_messages:
1126-
return list(reversed(trimmed_messages)), response_id
1127+
if previous_response_id and trimmed_messages:
1128+
return previous_response_id, list(reversed(trimmed_messages))
11271129
else:
1128-
return messages, None
1130+
return None, messages
11291131

11301132
async def _map_messages( # noqa: C901
11311133
self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings

tests/models/test_openai_responses.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1262,9 +1262,9 @@ async def test_openai_previous_response_id_mixed_model_history(allow_model_reque
12621262
]
12631263

12641264
model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key))
1265-
history, previous_response_id = model._get_response_id_and_trim(history) # type: ignore
1265+
previous_response_id, messages = model._get_previous_response_id_and_new_messages(history) # type: ignore
12661266
assert not previous_response_id
1267-
assert history == snapshot(
1267+
assert messages == snapshot(
12681268
[
12691269
ModelRequest(parts=[UserPromptPart(content='The first secret key is sesame', timestamp=IsDatetime())]),
12701270
ModelResponse(
@@ -1323,9 +1323,9 @@ async def test_openai_previous_response_id_same_model_history(allow_model_reques
13231323
]
13241324

13251325
model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key))
1326-
history, previous_response_id = model._get_response_id_and_trim(history) # type: ignore
1326+
previous_response_id, messages = model._get_previous_response_id_and_new_messages(history) # type: ignore
13271327
assert previous_response_id == 'resp_68b9bda81f5c8197a5a51a20a9f4150a000497db2a4c777b'
1328-
assert history == snapshot(
1328+
assert messages == snapshot(
13291329
[
13301330
ModelRequest(parts=[UserPromptPart(content='what is the first secret key?', timestamp=IsDatetime())]),
13311331
]

0 commit comments

Comments
 (0)