diff --git a/src/agents/handoffs.py b/src/agents/handoffs.py index 4d70f6058..2c52737ad 100644 --- a/src/agents/handoffs.py +++ b/src/agents/handoffs.py @@ -119,9 +119,9 @@ class Handoff(Generic[TContext, TAgent]): True, as it increases the likelihood of correct JSON input. """ - is_enabled: bool | Callable[ - [RunContextWrapper[Any], AgentBase[Any]], MaybeAwaitable[bool] - ] = True + is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase[Any]], MaybeAwaitable[bool]] = ( + True + ) """Whether the handoff is enabled. Either a bool or a Callable that takes the run context and agent and returns whether the handoff is enabled. You can use this to dynamically enable/disable a handoff based on your context/state.""" diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index 5af83fe3e..6d2767dda 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -116,6 +116,13 @@ class ModelSettings: """Additional output data to include in the model response. [include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)""" + verbosity: Literal["low", "medium", "high"] | None = None + """Controls response verbosity for supported models. + In Responses API this is sent as `text.verbosity`; + in Chat Completions it is top-level `verbosity`. + Values: "low", "medium", "high". Defaults to provider/model behavior if not set. + """ + extra_query: Query | None = None """Additional query fields to provide with the request. Defaults to None if not provided.""" diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 3798e100d..adafd40f8 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -271,6 +271,19 @@ async def _fetch_response( self._get_client(), model_settings, stream=stream ) + # Carry verbosity for Chat Completions using extra_body until + # official client types include it as a top-level param. + from typing import Any, cast + base_extra_body = cast(dict[str, Any], model_settings.extra_body or {}) + extra_body = { + **base_extra_body, + **( + {"verbosity": model_settings.verbosity} + if model_settings.verbosity is not None + else {} + ), + } + ret = await self._get_client().chat.completions.create( model=self.model, messages=converted_messages, @@ -289,7 +302,7 @@ async def _fetch_response( reasoning_effort=self._non_null_or_not_given(reasoning_effort), extra_headers={**HEADERS, **(model_settings.extra_headers or {})}, extra_query=model_settings.extra_query, - extra_body=model_settings.extra_body, + extra_body=extra_body, metadata=self._non_null_or_not_given(model_settings.metadata), **(model_settings.extra_args or {}), ) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index f6da60b08..b309eb090 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -247,6 +247,20 @@ async def _fetch_response( converted_tools = Converter.convert_tools(tools, handoffs) response_format = Converter.get_response_format(output_schema) + # Merge verbosity into the `text` param alongside any response format. + # Responses API expects verbosity under the `text` object. + if model_settings.verbosity is not None: + if response_format is NOT_GIVEN: + text_param: ResponseTextConfigParam | Any = {"verbosity": model_settings.verbosity} + else: + # response_format is a dict; augment it without mutating the original + from typing import cast + + rf = cast(ResponseTextConfigParam, response_format) + text_param = {**rf, "verbosity": model_settings.verbosity} + else: + text_param = response_format + include: list[ResponseIncludable] = converted_tools.includes if model_settings.response_include is not None: include = list({*include, *model_settings.response_include}) @@ -282,7 +296,7 @@ async def _fetch_response( extra_headers={**_HEADERS, **(model_settings.extra_headers or {})}, extra_query=model_settings.extra_query, extra_body=model_settings.extra_body, - text=response_format, + text=text_param, store=self._non_null_or_not_given(model_settings.store), reasoning=self._non_null_or_not_given(model_settings.reasoning), metadata=self._non_null_or_not_given(model_settings.metadata), diff --git a/src/agents/run.py b/src/agents/run.py index d0748e514..1cc9e76d6 100644 --- a/src/agents/run.py +++ b/src/agents/run.py @@ -1034,7 +1034,6 @@ async def _get_single_step_result_from_streamed_response( run_config: RunConfig, tool_use_tracker: AgentToolUseTracker, ) -> SingleStepResult: - original_input = streamed_result.input pre_step_items = streamed_result.new_items event_queue = streamed_result._event_queue diff --git a/src/agents/tracing/processors.py b/src/agents/tracing/processors.py index 32fd290ec..126c71498 100644 --- a/src/agents/tracing/processors.py +++ b/src/agents/tracing/processors.py @@ -70,8 +70,8 @@ def set_api_key(self, api_key: str): client. """ # Clear the cached property if it exists - if 'api_key' in self.__dict__: - del self.__dict__['api_key'] + if "api_key" in self.__dict__: + del self.__dict__["api_key"] # Update the private attribute self._api_key = api_key diff --git a/tests/test_agent_clone_shallow_copy.py b/tests/test_agent_clone_shallow_copy.py index fdf9e0247..44b41bd3d 100644 --- a/tests/test_agent_clone_shallow_copy.py +++ b/tests/test_agent_clone_shallow_copy.py @@ -5,6 +5,7 @@ def greet(name: str) -> str: return f"Hello, {name}!" + def test_agent_clone_shallow_copy(): """Test that clone creates shallow copy with tools.copy() workaround""" target_agent = Agent(name="Target") @@ -16,9 +17,7 @@ def test_agent_clone_shallow_copy(): ) cloned = original.clone( - name="Cloned", - tools=original.tools.copy(), - handoffs=original.handoffs.copy() + name="Cloned", tools=original.tools.copy(), handoffs=original.handoffs.copy() ) # Basic assertions diff --git a/tests/test_stream_events.py b/tests/test_stream_events.py index 11feb9fe0..0f85b63f8 100644 --- a/tests/test_stream_events.py +++ b/tests/test_stream_events.py @@ -14,6 +14,7 @@ async def foo() -> str: await asyncio.sleep(3) return "success!" + @pytest.mark.asyncio async def test_stream_events_main(): model = FakeModel()