Skip to content

Commit 823fc07

Browse files
committed
update logic
update logic update logic update logic
1 parent 4c15f2b commit 823fc07

File tree

2 files changed

+29
-41
lines changed

2 files changed

+29
-41
lines changed

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -966,9 +966,11 @@ async def _responses_create(
966966
tool_choice = 'required'
967967
else:
968968
tool_choice = 'auto'
969+
969970
previous_response_id = model_settings.get('openai_previous_response_id')
970971
if previous_response_id == 'auto':
971972
messages, previous_response_id = self._get_response_id_and_trim(messages)
973+
972974
instructions, openai_messages = await self._map_messages(messages)
973975
reasoning = self._get_reasoning(model_settings)
974976

@@ -1081,26 +1083,21 @@ def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam
10811083
}
10821084

10831085
def _get_response_id_and_trim(self, messages: list[ModelMessage]) -> tuple[list[ModelMessage], str | None]:
1084-
# If the message history contains only openai responses,
1085-
# we can limit the history to the most recent ModelRequest.
1086-
# The provider_response_id from the latest ModelResponse is
1087-
# then passed as previous_response_id to preserve context.
1086+
# In `auto` mode, the history is trimmed up to (but not including)
1087+
# the latest ModelResponse with a valid `provider_response_id`.
1088+
# This is then passed as `previous_response_id` in the next request
1089+
# to maintain context along with the trimmed history.
10881090
response_id = None
1089-
latest_model_request: ModelRequest | None = None
1090-
for m in messages:
1091-
# Openai may return a dated model_name that differs from self.model_name
1092-
# (e.g., "gpt-5" vs "gpt-5-2025-08-07").
1093-
if isinstance(m, ModelResponse) and m.model_name and (self.model_name in m.model_name):
1091+
trimmed_messages: list[ModelMessage] = []
1092+
for m in reversed(messages):
1093+
if isinstance(m, ModelResponse) and m.provider_name == self.system:
10941094
response_id = m.provider_response_id
1095-
elif isinstance(m, ModelRequest):
1096-
latest_model_request = m
1097-
else:
1098-
# Mixed model responses invalidate response_id,
1099-
# so the history is kept intact.
1100-
response_id = None
11011095
break
1102-
if response_id and latest_model_request:
1103-
return [latest_model_request], response_id
1096+
else:
1097+
trimmed_messages.append(m)
1098+
1099+
if response_id and trimmed_messages:
1100+
return list(reversed(trimmed_messages)), response_id
11041101
else:
11051102
return messages, None
11061103

tests/models/test_openai_responses.py

Lines changed: 15 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1190,6 +1190,21 @@ async def test_openai_previous_response_id_auto_mode(allow_model_requests: None,
11901190
async def test_openai_previous_response_id_mixed_model_history(allow_model_requests: None, openai_api_key: str):
11911191
"""Test if invalid previous response id is ignored when history contains non-OpenAI responses"""
11921192
history = [
1193+
# ModelRequest(
1194+
# parts=[
1195+
# UserPromptPart(
1196+
# content='The first secret key is sesame',
1197+
# ),
1198+
# ],
1199+
# ),
1200+
# ModelResponse(
1201+
# parts=[
1202+
# TextPart(content='Open sesame! What would you like to unlock?'),
1203+
# ],
1204+
# model_name='gpt-5',
1205+
# provider_name='openai',
1206+
# provider_response_id='resp_68b9bd97025c8195b443af591ca2345c08cb6072affe6099',
1207+
# ),
11931208
ModelRequest(
11941209
parts=[
11951210
UserPromptPart(
@@ -1201,21 +1216,6 @@ async def test_openai_previous_response_id_mixed_model_history(allow_model_reque
12011216
parts=[
12021217
TextPart(content='Open sesame! What would you like to unlock?'),
12031218
],
1204-
model_name='gpt-5',
1205-
provider_name='openai',
1206-
provider_response_id='resp_68b9bd97025c8195b443af591ca2345c08cb6072affe6099',
1207-
),
1208-
ModelRequest(
1209-
parts=[
1210-
UserPromptPart(
1211-
content='The second secret key is olives',
1212-
),
1213-
],
1214-
),
1215-
ModelResponse(
1216-
parts=[
1217-
TextPart(content='Understood'),
1218-
],
12191219
model_name='claude-3-5-sonnet-latest',
12201220
provider_name='anthropic',
12211221
provider_response_id='msg_01XUQuedGz9gusk4xZm4gWJj',
@@ -1238,15 +1238,6 @@ async def test_openai_previous_response_id_mixed_model_history(allow_model_reque
12381238
ModelResponse(
12391239
parts=[TextPart(content='Open sesame! What would you like to unlock?')],
12401240
usage=RequestUsage(),
1241-
model_name='gpt-5',
1242-
timestamp=IsDatetime(),
1243-
provider_name='openai',
1244-
provider_response_id='resp_68b9bd97025c8195b443af591ca2345c08cb6072affe6099',
1245-
),
1246-
ModelRequest(parts=[UserPromptPart(content='The second secret key is olives', timestamp=IsDatetime())]),
1247-
ModelResponse(
1248-
parts=[TextPart(content='Understood')],
1249-
usage=RequestUsage(),
12501241
model_name='claude-3-5-sonnet-latest',
12511242
timestamp=IsDatetime(),
12521243
provider_name='anthropic',

0 commit comments

Comments
 (0)