Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,10 @@ async def _fetch_response(
include_set: set[str] = set(converted_tools.includes)
if model_settings.response_include is not None:
include_set.update(model_settings.response_include)

if model_settings.store is False and model_settings.response_include is None:
include_set.add("reasoning.encrypted_content")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

non-reasoning models such as gpt-4.1 do not require this (actually it does not work), so always having this does not work. why don't you manually add this option to include?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are right, I forgot to consider that GPT-4.1 would throw errors, and this solution will cause breaking changes, which is not good. However, I do hope there’s a better way to help developers realize that when Store=False, they need to include reasoning.encrypted_content, otherwise GitHub issues will keep showing up.


if model_settings.top_logprobs is not None:
include_set.add("message.output_text.logprobs")
include = cast(list[ResponseIncludable], list(include_set))
Expand Down
50 changes: 50 additions & 0 deletions tests/test_store_response_include.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import pytest
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails

from agents import ModelSettings, ModelTracing, OpenAIResponsesModel


class DummyResponses:
async def create(self, **kwargs):
self.kwargs = kwargs

class DummyResponse:
id = "dummy"
output = []
usage = type(
"Usage",
(),
{
"input_tokens": 0,
"output_tokens": 0,
"total_tokens": 0,
"input_tokens_details": InputTokensDetails(cached_tokens=0),
"output_tokens_details": OutputTokensDetails(reasoning_tokens=0),
},
)()

return DummyResponse()


class DummyClient:
def __init__(self):
self.responses = DummyResponses()


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
async def test_store_false_includes_encrypted_reasoning():
client = DummyClient()
model = OpenAIResponsesModel(model="gpt-5", openai_client=client) # type: ignore
await model.get_response(
system_instructions=None,
input="hi",
model_settings=ModelSettings(store=False),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
previous_response_id=None,
)
include = set(client.responses.kwargs["include"])
assert "reasoning.encrypted_content" in include