Skip to content

Commit e3b4856

Browse files
authored
Fix openai#1407 Add reasoning.effort="minimal" and "verbosity" params to ModelSettings (openai#1439)
This pull request resolves openai#1407 ; the "minimal" reasoning effort param is already supported.
1 parent 534e2d5 commit e3b4856

File tree

5 files changed

+41
-0
lines changed

5 files changed

+41
-0
lines changed

examples/basic/simple_gpt_5.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import asyncio
2+
3+
from openai.types.shared import Reasoning
4+
5+
from agents import Agent, ModelSettings, Runner
6+
7+
# If you have a certain reason to use Chat Completions, you can configure the model this way,
8+
# and then you can pass the chat_completions_model to the Agent constructor.
9+
# from openai import AsyncOpenAI
10+
# client = AsyncOpenAI()
11+
# from agents import OpenAIChatCompletionsModel
12+
# chat_completions_model = OpenAIChatCompletionsModel(model="gpt-5", openai_client=client)
13+
14+
15+
async def main():
16+
agent = Agent(
17+
name="Knowledgable GPT-5 Assistant",
18+
instructions="You're a knowledgable assistant. You always provide an interesting answer.",
19+
model="gpt-5",
20+
model_settings=ModelSettings(
21+
reasoning=Reasoning(effort="minimal"), # "minimal", "low", "medium", "high"
22+
verbosity="low", # "low", "medium", "high"
23+
),
24+
)
25+
result = await Runner.run(agent, "Tell me something about recursion in programming.")
26+
print(result.final_output)
27+
28+
29+
if __name__ == "__main__":
30+
asyncio.run(main())

src/agents/model_settings.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,10 @@ class ModelSettings:
102102
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
103103
"""
104104

105+
verbosity: Literal["low", "medium", "high"] | None = None
106+
"""Constrains the verbosity of the model's response.
107+
"""
108+
105109
metadata: dict[str, str] | None = None
106110
"""Metadata to include with the model response call."""
107111

src/agents/models/openai_chatcompletions.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,7 @@ async def _fetch_response(
287287
stream_options=self._non_null_or_not_given(stream_options),
288288
store=self._non_null_or_not_given(store),
289289
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
290+
verbosity=self._non_null_or_not_given(model_settings.verbosity),
290291
top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
291292
extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
292293
extra_query=model_settings.extra_query,

src/agents/models/openai_responses.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,11 @@ async def _fetch_response(
270270
extra_args = dict(model_settings.extra_args or {})
271271
if model_settings.top_logprobs is not None:
272272
extra_args["top_logprobs"] = model_settings.top_logprobs
273+
if model_settings.verbosity is not None:
274+
if response_format != NOT_GIVEN:
275+
response_format["verbosity"] = model_settings.verbosity # type: ignore [index]
276+
else:
277+
response_format = {"verbosity": model_settings.verbosity}
273278

274279
return await self._client.responses.create(
275280
previous_response_id=self._non_null_or_not_given(previous_response_id),

tests/model_settings/test_serialization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ def test_all_fields_serialization() -> None:
5959
include_usage=False,
6060
response_include=["reasoning.encrypted_content"],
6161
top_logprobs=1,
62+
verbosity="low",
6263
extra_query={"foo": "bar"},
6364
extra_body={"foo": "bar"},
6465
extra_headers={"foo": "bar"},

0 commit comments

Comments
 (0)