Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/agents/model_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class ModelSettings:
tool_choice: Literal["auto", "required", "none"] | str | None = None
"""The tool choice to use when calling the model."""

parallel_tool_calls: bool | None = False
parallel_tool_calls: bool | None = None
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rm-openai this seemed like a typo, since it meant that if you tried to override a different setting at the Runner.run level it would also set this back to False

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually never mind since an or was used below it's fine. but we should actually not use or below because "0" is false-y in Python, so setting "temperature=0" at the Runner.run level would fail to override temperature at the agent level

"""Whether to use parallel tool calls when calling the model."""

truncation: Literal["auto", "disabled"] | None = None
Expand All @@ -39,6 +39,9 @@ class ModelSettings:
max_tokens: int | None = None
"""The maximum number of output tokens to generate."""

store: bool | None = None
"""Whether to store the generated model response for later retrieval."""

def resolve(self, override: ModelSettings | None) -> ModelSettings:
"""Produce a new ModelSettings by overlaying any non-None values from the
override on top of this instance."""
Expand All @@ -53,4 +56,5 @@ def resolve(self, override: ModelSettings | None) -> ModelSettings:
parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
truncation=override.truncation or self.truncation,
max_tokens=override.max_tokens or self.max_tokens,
store=override.store if override.store is not None else self.store,
)
4 changes: 4 additions & 0 deletions src/agents/models/openai_chatcompletions.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,9 @@ async def _fetch_response(
f"Response format: {response_format}\n"
)

# Match the behavior of Responses where store is True when not given
store = model_settings.store if model_settings.store is not None else True

ret = await self._get_client().chat.completions.create(
model=self.model,
messages=converted_messages,
Expand All @@ -532,6 +535,7 @@ async def _fetch_response(
parallel_tool_calls=parallel_tool_calls,
stream=stream,
stream_options={"include_usage": True} if stream else NOT_GIVEN,
store=store,
extra_headers=_HEADERS,
)

Expand Down
1 change: 1 addition & 0 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ async def _fetch_response(
stream=stream,
extra_headers=_HEADERS,
text=response_format,
store=self._non_null_or_not_given(model_settings.store),
)

def _get_client(self) -> AsyncOpenAI:
Expand Down
2 changes: 2 additions & 0 deletions tests/test_openai_chatcompletions.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,7 @@ def __init__(self, completions: DummyCompletions) -> None:
# Ensure expected args were passed through to OpenAI client.
kwargs = completions.kwargs
assert kwargs["stream"] is False
assert kwargs["store"] is True
assert kwargs["model"] == "gpt-4"
assert kwargs["messages"][0]["role"] == "system"
assert kwargs["messages"][0]["content"] == "sys"
Expand Down Expand Up @@ -279,6 +280,7 @@ def __init__(self, completions: DummyCompletions) -> None:
)
# Check OpenAI client was called for streaming
assert completions.kwargs["stream"] is True
assert completions.kwargs["store"] is True
assert completions.kwargs["stream_options"] == {"include_usage": True}
# Response is a proper openai Response
assert isinstance(response, Response)
Expand Down