Skip to content

Commit b00646d

Browse files
authored
Make OpenAIResponsesModel work with reasoning from other models and modified history (#2881)
1 parent 3530dcb commit b00646d

File tree

5 files changed

+906
-26
lines changed

5 files changed

+906
-26
lines changed

docs/thinking.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,16 @@ See the sections below for how to enable thinking for each provider.
1111
When using the [`OpenAIChatModel`][pydantic_ai.models.openai.OpenAIChatModel], text output inside `<think>` tags are converted to [`ThinkingPart`][pydantic_ai.messages.ThinkingPart] objects.
1212
You can customize the tags using the [`thinking_tags`][pydantic_ai.profiles.ModelProfile.thinking_tags] field on the [model profile](models/openai.md#model-profile).
1313

14+
### OpenAI Responses
15+
1416
The [`OpenAIResponsesModel`][pydantic_ai.models.openai.OpenAIResponsesModel] can generate native thinking parts.
1517
To enable this functionality, you need to set the `openai_reasoning_effort` and `openai_reasoning_summary` fields in the
1618
[`OpenAIResponsesModelSettings`][pydantic_ai.models.openai.OpenAIResponsesModelSettings].
1719

20+
By default, reasoning IDs from the message history are sent to the model, which can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
21+
if the message history you're sending does not match exactly what was received from the Responses API in a previous response, for example if you're using a [history processor](message-history.md#processing-message-history).
22+
To disable this, you can set the `openai_send_reasoning_ids` field on [`OpenAIResponsesModelSettings`][pydantic_ai.models.openai.OpenAIResponsesModelSettings].
23+
1824
```python {title="openai_thinking_part.py"}
1925
from pydantic_ai import Agent
2026
from pydantic_ai.models.openai import OpenAIResponsesModel, OpenAIResponsesModelSettings

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 52 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -190,10 +190,19 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
190190
This can be useful for debugging and understanding the model's reasoning process.
191191
One of `concise` or `detailed`.
192192
193-
Check the [OpenAI Computer use documentation](https://platform.openai.com/docs/guides/tools-computer-use#1-send-a-request-to-the-model)
193+
Check the [OpenAI Reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries)
194194
for more details.
195195
"""
196196

197+
openai_send_reasoning_ids: bool
198+
"""Whether to send reasoning IDs from the message history to the model. Enabled by default.
199+
200+
This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
201+
if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
202+
for example if you're using a [history processor](../../message-history.md#processing-message-history).
203+
In that case, you'll want to disable this.
204+
"""
205+
197206
openai_truncation: Literal['disabled', 'auto']
198207
"""The truncation strategy to use for the model response.
199208
@@ -968,7 +977,7 @@ async def _responses_create(
968977
else:
969978
tool_choice = 'auto'
970979

971-
instructions, openai_messages = await self._map_messages(messages)
980+
instructions, openai_messages = await self._map_messages(messages, model_settings)
972981
reasoning = self._get_reasoning(model_settings)
973982

974983
text: responses.ResponseTextConfigParam | None = None
@@ -1084,7 +1093,7 @@ def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam
10841093
}
10851094

10861095
async def _map_messages( # noqa: C901
1087-
self, messages: list[ModelMessage]
1096+
self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
10881097
) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
10891098
"""Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
10901099
openai_messages: list[responses.ResponseInputItemParam] = []
@@ -1153,20 +1162,39 @@ async def _map_messages( # noqa: C901
11531162
# We don't currently track built-in tool calls from OpenAI
11541163
pass
11551164
elif isinstance(item, ThinkingPart):
1156-
if reasoning_item is None or reasoning_item['id'] != item.id:
1157-
reasoning_item = responses.ResponseReasoningItemParam(
1158-
id=item.id or _utils.generate_tool_call_id(),
1159-
summary=[],
1160-
encrypted_content=item.signature if item.provider_name == self.system else None,
1161-
type='reasoning',
1162-
)
1163-
openai_messages.append(reasoning_item)
1165+
if (
1166+
item.id
1167+
and item.provider_name == self.system
1168+
and OpenAIModelProfile.from_profile(
1169+
self.profile
1170+
).openai_supports_encrypted_reasoning_content
1171+
and model_settings.get('openai_send_reasoning_ids', True)
1172+
):
1173+
if (
1174+
reasoning_item is None
1175+
or reasoning_item['id'] != item.id
1176+
and (item.signature or item.content)
1177+
): # pragma: no branch
1178+
reasoning_item = responses.ResponseReasoningItemParam(
1179+
id=item.id,
1180+
summary=[],
1181+
encrypted_content=item.signature,
1182+
type='reasoning',
1183+
)
1184+
openai_messages.append(reasoning_item)
11641185

1165-
if item.content:
1166-
reasoning_item['summary'] = [
1167-
*reasoning_item['summary'],
1168-
Summary(text=item.content, type='summary_text'),
1169-
]
1186+
if item.content:
1187+
reasoning_item['summary'] = [
1188+
*reasoning_item['summary'],
1189+
Summary(text=item.content, type='summary_text'),
1190+
]
1191+
else:
1192+
start_tag, end_tag = self.profile.thinking_tags
1193+
openai_messages.append(
1194+
responses.EasyInputMessageParam(
1195+
role='assistant', content='\n'.join([start_tag, item.content, end_tag])
1196+
)
1197+
)
11701198
else:
11711199
assert_never(item)
11721200
else:
@@ -1422,15 +1450,14 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
14221450

14231451
elif isinstance(chunk, responses.ResponseOutputItemDoneEvent):
14241452
if isinstance(chunk.item, responses.ResponseReasoningItem):
1425-
# Add the signature to the part corresponding to the first summary item
1426-
signature = chunk.item.encrypted_content
1427-
yield self._parts_manager.handle_thinking_delta(
1428-
vendor_part_id=f'{chunk.item.id}-0',
1429-
id=chunk.item.id,
1430-
signature=signature,
1431-
provider_name=self.provider_name if signature else None,
1432-
)
1433-
pass
1453+
if signature := chunk.item.encrypted_content: # pragma: no branch
1454+
# Add the signature to the part corresponding to the first summary item
1455+
yield self._parts_manager.handle_thinking_delta(
1456+
vendor_part_id=f'{chunk.item.id}-0',
1457+
id=chunk.item.id,
1458+
signature=signature,
1459+
provider_name=self.provider_name,
1460+
)
14341461

14351462
elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
14361463
yield self._parts_manager.handle_thinking_delta(

0 commit comments

Comments
 (0)