From 3653a409bbd32919a258bd74c337391bc4608d97 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 16:50:13 -0700 Subject: [PATCH 1/9] Add user_agent_override helper contextmanager --- src/agents/__init__.py | 12 ++ src/agents/_config.py | 12 ++ src/agents/extensions/models/litellm_model.py | 10 +- src/agents/models/_openai_shared.py | 10 ++ src/agents/models/openai_chatcompletions.py | 9 +- src/agents/models/openai_responses.py | 9 +- tests/test_config.py | 106 +++++++++++++++++- 7 files changed, 163 insertions(+), 5 deletions(-) diff --git a/src/agents/__init__.py b/src/agents/__init__.py index 3a8260f29..624fdf826 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -1,3 +1,5 @@ +from collections.abc import Iterator +from contextlib import contextmanager import logging import sys from typing import Literal @@ -159,6 +161,15 @@ def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> Non _config.set_default_openai_api(api) +@contextmanager +def user_agent_override(user_agent: str | None) -> Iterator[None]: + """Set the user agent override for OpenAI LLM requests. This is useful if you want to set a custom + user agent for OpenAI LLM requests. + """ + with _config.user_agent_override(user_agent): + yield + + def enable_verbose_stdout_logging(): """Enables verbose logging to stdout. This is useful for debugging.""" logger = logging.getLogger("openai.agents") @@ -286,6 +297,7 @@ def enable_verbose_stdout_logging(): "set_default_openai_key", "set_default_openai_client", "set_default_openai_api", + "set_user_agent_override", "set_tracing_export_api_key", "enable_verbose_stdout_logging", "gen_trace_id", diff --git a/src/agents/_config.py b/src/agents/_config.py index 304cfb83c..7a50943ac 100644 --- a/src/agents/_config.py +++ b/src/agents/_config.py @@ -1,3 +1,6 @@ +from collections.abc import Iterator +from contextlib import contextmanager + from openai import AsyncOpenAI from typing_extensions import Literal @@ -24,3 +27,12 @@ def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> Non _openai_shared.set_use_responses_by_default(False) else: _openai_shared.set_use_responses_by_default(True) + + +@contextmanager +def user_agent_override(user_agent: str | None) -> Iterator[None]: + try: + _openai_shared.set_user_agent_override(user_agent) + yield + finally: + _openai_shared.set_user_agent_override(None) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 8d39ad390..8f7aa6945 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -10,6 +10,8 @@ from agents.exceptions import ModelBehaviorError +from ...models import _openai_shared + try: import litellm except ImportError as _e: @@ -353,7 +355,7 @@ async def _fetch_response( stream_options=stream_options, reasoning_effort=reasoning_effort, top_logprobs=model_settings.top_logprobs, - extra_headers={**HEADERS, **(model_settings.extra_headers or {})}, + extra_headers=self._merge_headers(model_settings), api_key=self.api_key, base_url=self.base_url, **extra_kwargs, @@ -384,6 +386,12 @@ def _remove_not_given(self, value: Any) -> Any: return None return value + def _merge_headers(self, model_settings: ModelSettings): + merged = {**HEADERS, **(model_settings.extra_headers or {})} + if ua_override := _openai_shared.get_user_agent_override(): + merged["User-Agent"] = ua_override + return merged + class LitellmConverter: @classmethod diff --git a/src/agents/models/_openai_shared.py b/src/agents/models/_openai_shared.py index 2e1450187..e9f775547 100644 --- a/src/agents/models/_openai_shared.py +++ b/src/agents/models/_openai_shared.py @@ -5,6 +5,7 @@ _default_openai_key: str | None = None _default_openai_client: AsyncOpenAI | None = None _use_responses_by_default: bool = True +_user_agent_override: str | None = None def set_default_openai_key(key: str) -> None: @@ -32,3 +33,12 @@ def set_use_responses_by_default(use_responses: bool) -> None: def get_use_responses_by_default() -> bool: return _use_responses_by_default + + +def set_user_agent_override(user_agent: str | None) -> None: + global _user_agent_override + _user_agent_override = user_agent + + +def get_user_agent_override() -> str | None: + return _user_agent_override diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index a50a1a8a5..2b9a0b750 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -24,6 +24,7 @@ from ..tracing.spans import Span from ..usage import Usage from ..util._json import _to_dump_compatible +from . import _openai_shared from .chatcmpl_converter import Converter from .chatcmpl_helpers import HEADERS, ChatCmplHelpers from .chatcmpl_stream_handler import ChatCmplStreamHandler @@ -306,7 +307,7 @@ async def _fetch_response( reasoning_effort=self._non_null_or_not_given(reasoning_effort), verbosity=self._non_null_or_not_given(model_settings.verbosity), top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs), - extra_headers={**HEADERS, **(model_settings.extra_headers or {})}, + extra_headers=self._merge_headers(model_settings), extra_query=model_settings.extra_query, extra_body=model_settings.extra_body, metadata=self._non_null_or_not_given(model_settings.metadata), @@ -349,3 +350,9 @@ def _get_client(self) -> AsyncOpenAI: if self._client is None: self._client = AsyncOpenAI() return self._client + + def _merge_headers(self, model_settings: ModelSettings): + merged = {**HEADERS, **(model_settings.extra_headers or {})} + if ua_override := _openai_shared.get_user_agent_override(): + merged["User-Agent"] = ua_override + return merged diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 9ca2d324f..5918c5051 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -40,6 +40,7 @@ from ..usage import Usage from ..util._json import _to_dump_compatible from ..version import __version__ +from . import _openai_shared from .interface import Model, ModelTracing if TYPE_CHECKING: @@ -312,7 +313,7 @@ async def _fetch_response( tool_choice=tool_choice, parallel_tool_calls=parallel_tool_calls, stream=stream, - extra_headers={**_HEADERS, **(model_settings.extra_headers or {})}, + extra_headers=self._merge_headers(model_settings), extra_query=model_settings.extra_query, extra_body=model_settings.extra_body, text=response_format, @@ -327,6 +328,12 @@ def _get_client(self) -> AsyncOpenAI: self._client = AsyncOpenAI() return self._client + def _merge_headers(self, model_settings: ModelSettings): + merged = {**_HEADERS, **(model_settings.extra_headers or {})} + if ua_override := _openai_shared.get_user_agent_override(): + merged["User-Agent"] = ua_override + return merged + @dataclass class ConvertedTools: diff --git a/tests/test_config.py b/tests/test_config.py index dba854db3..8fda0c926 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -2,11 +2,22 @@ import openai import pytest - -from agents import set_default_openai_api, set_default_openai_client, set_default_openai_key +from openai.types.chat.chat_completion import ChatCompletion, Choice +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.responses import ResponseCompletedEvent + +from agents import ( + ModelSettings, + ModelTracing, + set_default_openai_api, + set_default_openai_client, + set_default_openai_key, +) +from agents._config import user_agent_override from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel from agents.models.openai_provider import OpenAIProvider from agents.models.openai_responses import OpenAIResponsesModel +from tests.fake_model import get_response_obj def test_cc_no_default_key_errors(monkeypatch): @@ -62,3 +73,94 @@ def test_set_default_openai_api(): assert isinstance(OpenAIProvider().get_model("gpt-4"), OpenAIResponsesModel), ( "Should be responses model" ) + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +async def test_user_agent_override(): + called_kwargs = {} + + class DummyStream: + def __aiter__(self): + async def gen(): + yield ResponseCompletedEvent( + type="response.completed", + response=get_response_obj([]), + sequence_number=0, + ) + + return gen() + + class DummyResponses: + async def create(self, **kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + return DummyStream() + + class DummyClient: + def __init__(self): + self.responses = DummyResponses() + + model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyClient()) # type: ignore + + with user_agent_override("test_user_agent"): + stream = model.stream_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + ) + + async for _ in stream: + pass + + assert "extra_headers" in called_kwargs + assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +async def test_user_agent_override_chat_completions(): + called_kwargs = {} + + class DummyCompletions: + async def create(self, **kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + + msg = ChatCompletionMessage(role="assistant", content="Hello") + choice = Choice(index=0, finish_reason="stop", message=msg) + return ChatCompletion( + id="resp-id", + created=0, + model="fake", + object="chat.completion", + choices=[choice], + usage=None, + ) + + class DummyClient: + def __init__(self): + self.chat = type("_Chat", (), {"completions": DummyCompletions()})() + self.base_url = "https://api.openai.com" + + model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore + + with user_agent_override("test_user_agent"): + await model.get_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + previous_response_id=None, + conversation_id=None, + ) + + assert "extra_headers" in called_kwargs + assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" From 329d9b43814ef555e4a374dfe57b8db657a18444 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 18:29:51 -0700 Subject: [PATCH 2/9] add tests --- tests/test_config.py | 78 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/tests/test_config.py b/tests/test_config.py index 8fda0c926..6aec7e6b1 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,4 +1,5 @@ import os +from typing import Any import openai import pytest @@ -77,7 +78,7 @@ def test_set_default_openai_api(): @pytest.mark.allow_call_model_methods @pytest.mark.asyncio -async def test_user_agent_override(): +async def test_user_agent_override_responses(): called_kwargs = {} class DummyStream: @@ -164,3 +165,78 @@ def __init__(self): assert "extra_headers" in called_kwargs assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +async def test_user_agent_override_litellm(monkeypatch): + import importlib + import sys + import types as pytypes + + called_kwargs = {} + + # Create a fake litellm module so we don't need the real dependency + litellm_fake: Any = pytypes.ModuleType("litellm") + + class DummyMessage: + role = "assistant" + content = "Hello" + tool_calls = None + + def get(self, _key, _default=None): + return None + + def model_dump(self): + return {"role": self.role, "content": self.content} + + class Choices: # noqa: N801 - mimic litellm naming + def __init__(self): + self.message = DummyMessage() + + class DummyModelResponse: + def __init__(self): + # Minimal shape expected by get_response() + self.choices = [Choices()] + + async def acompletion(**kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + return DummyModelResponse() + + utils_ns = pytypes.SimpleNamespace() + utils_ns.Choices = Choices + utils_ns.ModelResponse = DummyModelResponse + + litellm_types = pytypes.SimpleNamespace( + utils=utils_ns, + llms=pytypes.SimpleNamespace(openai=pytypes.SimpleNamespace(ChatCompletionAnnotation=dict)), + ) + litellm_fake.acompletion = acompletion + litellm_fake.types = litellm_types + + monkeypatch.setitem(sys.modules, "litellm", litellm_fake) + + # Import after injecting fake module and patch the module's symbol directly + litellm_mod = importlib.import_module("agents.extensions.models.litellm_model") + monkeypatch.setattr(litellm_mod, "litellm", litellm_fake, raising=True) + LitellmModel = litellm_mod.LitellmModel + + model = LitellmModel(model="gpt-4") + + with user_agent_override("test_user_agent"): + await model.get_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + previous_response_id=None, + conversation_id=None, + prompt=None, + ) + + assert "extra_headers" in called_kwargs + assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" From d0b63b4f4e22ce721d8ef5392781997eb961f3f4 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 18:37:46 -0700 Subject: [PATCH 3/9] update docstring --- src/agents/__init__.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/agents/__init__.py b/src/agents/__init__.py index 624fdf826..23a76f7e5 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -1,7 +1,7 @@ -from collections.abc import Iterator -from contextlib import contextmanager import logging import sys +from collections.abc import Iterator +from contextlib import contextmanager from typing import Literal from openai import AsyncOpenAI @@ -163,8 +163,13 @@ def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> Non @contextmanager def user_agent_override(user_agent: str | None) -> Iterator[None]: - """Set the user agent override for OpenAI LLM requests. This is useful if you want to set a custom - user agent for OpenAI LLM requests. + """ + Temporarily override the User-Agent header for outbound OpenAI LLM requests. + + This is **not** part of the public API and may change or be removed at any time + without notice. Intended only for OpenAI-maintained packages and tests. + + External integrators should use `model_settings.extra_headers` instead. """ with _config.user_agent_override(user_agent): yield From c8b933cb062c2e9f5b9b419c140e632982e25611 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 19:48:30 -0700 Subject: [PATCH 4/9] Added tests for no ua override cases too --- tests/test_config.py | 51 ++++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/tests/test_config.py b/tests/test_config.py index 6aec7e6b1..00e67e157 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,12 +1,14 @@ +from contextlib import nullcontext import os from typing import Any import openai -import pytest from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_message import ChatCompletionMessage from openai.types.responses import ResponseCompletedEvent +import pytest +from agents import __version__ from agents import ( ModelSettings, ModelTracing, @@ -78,8 +80,10 @@ def test_set_default_openai_api(): @pytest.mark.allow_call_model_methods @pytest.mark.asyncio -async def test_user_agent_override_responses(): +@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) +async def test_user_agent_header_responses(override_ua): called_kwargs = {} + expected_ua = override_ua or f"Agents/Python {__version__}" class DummyStream: def __aiter__(self): @@ -98,13 +102,14 @@ async def create(self, **kwargs): called_kwargs = kwargs return DummyStream() - class DummyClient: + class DummyResponsesClient: def __init__(self): self.responses = DummyResponses() - model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyClient()) # type: ignore + model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyResponsesClient()) # type: ignore - with user_agent_override("test_user_agent"): + cm = user_agent_override(override_ua) if override_ua else nullcontext() + with cm: stream = model.stream_response( system_instructions=None, input="hi", @@ -114,24 +119,24 @@ def __init__(self): handoffs=[], tracing=ModelTracing.DISABLED, ) - async for _ in stream: pass assert "extra_headers" in called_kwargs - assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" + assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua @pytest.mark.allow_call_model_methods @pytest.mark.asyncio -async def test_user_agent_override_chat_completions(): +@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) +async def test_user_agent_header_chat_completions(override_ua): called_kwargs = {} + expected_ua = override_ua or f"Agents/Python {__version__}" class DummyCompletions: async def create(self, **kwargs): nonlocal called_kwargs called_kwargs = kwargs - msg = ChatCompletionMessage(role="assistant", content="Hello") choice = Choice(index=0, finish_reason="stop", message=msg) return ChatCompletion( @@ -143,14 +148,15 @@ async def create(self, **kwargs): usage=None, ) - class DummyClient: + class DummyChatClient: def __init__(self): self.chat = type("_Chat", (), {"completions": DummyCompletions()})() self.base_url = "https://api.openai.com" - model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore + model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyChatClient()) # type: ignore - with user_agent_override("test_user_agent"): + cm = user_agent_override(override_ua) if override_ua else nullcontext() + with cm: await model.get_response( system_instructions=None, input="hi", @@ -164,19 +170,20 @@ def __init__(self): ) assert "extra_headers" in called_kwargs - assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" + assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua @pytest.mark.allow_call_model_methods @pytest.mark.asyncio -async def test_user_agent_override_litellm(monkeypatch): +@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) +async def test_user_agent_header_litellm(override_ua, monkeypatch): + called_kwargs = {} + expected_ua = override_ua or f"Agents/Python {__version__}" + import importlib import sys import types as pytypes - called_kwargs = {} - - # Create a fake litellm module so we don't need the real dependency litellm_fake: Any = pytypes.ModuleType("litellm") class DummyMessage: @@ -196,7 +203,6 @@ def __init__(self): class DummyModelResponse: def __init__(self): - # Minimal shape expected by get_response() self.choices = [Choices()] async def acompletion(**kwargs): @@ -217,14 +223,14 @@ async def acompletion(**kwargs): monkeypatch.setitem(sys.modules, "litellm", litellm_fake) - # Import after injecting fake module and patch the module's symbol directly litellm_mod = importlib.import_module("agents.extensions.models.litellm_model") monkeypatch.setattr(litellm_mod, "litellm", litellm_fake, raising=True) LitellmModel = litellm_mod.LitellmModel model = LitellmModel(model="gpt-4") - with user_agent_override("test_user_agent"): + cm = user_agent_override(override_ua) if override_ua else nullcontext() + with cm: await model.get_response( system_instructions=None, input="hi", @@ -239,4 +245,7 @@ async def acompletion(**kwargs): ) assert "extra_headers" in called_kwargs - assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" + assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua + + +# (Replaced by test_user_agent_header_parametrized) From 544b5a3b8730becf31c4a89837b4db1ab58992a9 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 19:52:56 -0700 Subject: [PATCH 5/9] type fixes --- src/agents/__init__.py | 2 ++ src/agents/_config.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/agents/__init__.py b/src/agents/__init__.py index 23a76f7e5..25a886dd2 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging import sys from collections.abc import Iterator diff --git a/src/agents/_config.py b/src/agents/_config.py index 7a50943ac..b2f2859e7 100644 --- a/src/agents/_config.py +++ b/src/agents/_config.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections.abc import Iterator from contextlib import contextmanager From f0511820e5b11769d034da2a7d84993ffd519a69 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 19:54:29 -0700 Subject: [PATCH 6/9] make format --- examples/basic/tools.py | 1 + tests/test_config.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/basic/tools.py b/examples/basic/tools.py index 1c4496603..2052d9427 100644 --- a/examples/basic/tools.py +++ b/examples/basic/tools.py @@ -18,6 +18,7 @@ def get_weather(city: Annotated[str, "The city to get the weather for"]) -> Weat print("[debug] get_weather called") return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.") + agent = Agent( name="Hello world", instructions="You are a helpful agent.", diff --git a/tests/test_config.py b/tests/test_config.py index 00e67e157..b50a07fe0 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,17 +1,17 @@ -from contextlib import nullcontext import os +from contextlib import nullcontext from typing import Any import openai +import pytest from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_message import ChatCompletionMessage from openai.types.responses import ResponseCompletedEvent -import pytest -from agents import __version__ from agents import ( ModelSettings, ModelTracing, + __version__, set_default_openai_api, set_default_openai_client, set_default_openai_key, From 449c461781470d885bb01896ef746bda25609614 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 20:17:49 -0700 Subject: [PATCH 7/9] Use ContextVar colocated with model instead of adding top level export --- src/agents/__init__.py | 19 -- src/agents/_config.py | 14 -- src/agents/extensions/models/litellm_model.py | 9 +- src/agents/models/_openai_shared.py | 4 - src/agents/models/chatcmpl_helpers.py | 6 + src/agents/models/openai_chatcompletions.py | 8 +- src/agents/models/openai_responses.py | 12 +- tests/models/test_litellm_user_agent.py | 89 ++++++++ tests/test_config.py | 191 +----------------- tests/test_openai_chatcompletions.py | 57 +++++- tests/test_openai_responses.py | 65 ++++++ 11 files changed, 235 insertions(+), 239 deletions(-) create mode 100644 tests/models/test_litellm_user_agent.py create mode 100644 tests/test_openai_responses.py diff --git a/src/agents/__init__.py b/src/agents/__init__.py index 25a886dd2..3a8260f29 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -1,9 +1,5 @@ -from __future__ import annotations - import logging import sys -from collections.abc import Iterator -from contextlib import contextmanager from typing import Literal from openai import AsyncOpenAI @@ -163,20 +159,6 @@ def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> Non _config.set_default_openai_api(api) -@contextmanager -def user_agent_override(user_agent: str | None) -> Iterator[None]: - """ - Temporarily override the User-Agent header for outbound OpenAI LLM requests. - - This is **not** part of the public API and may change or be removed at any time - without notice. Intended only for OpenAI-maintained packages and tests. - - External integrators should use `model_settings.extra_headers` instead. - """ - with _config.user_agent_override(user_agent): - yield - - def enable_verbose_stdout_logging(): """Enables verbose logging to stdout. This is useful for debugging.""" logger = logging.getLogger("openai.agents") @@ -304,7 +286,6 @@ def enable_verbose_stdout_logging(): "set_default_openai_key", "set_default_openai_client", "set_default_openai_api", - "set_user_agent_override", "set_tracing_export_api_key", "enable_verbose_stdout_logging", "gen_trace_id", diff --git a/src/agents/_config.py b/src/agents/_config.py index b2f2859e7..304cfb83c 100644 --- a/src/agents/_config.py +++ b/src/agents/_config.py @@ -1,8 +1,3 @@ -from __future__ import annotations - -from collections.abc import Iterator -from contextlib import contextmanager - from openai import AsyncOpenAI from typing_extensions import Literal @@ -29,12 +24,3 @@ def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> Non _openai_shared.set_use_responses_by_default(False) else: _openai_shared.set_use_responses_by_default(True) - - -@contextmanager -def user_agent_override(user_agent: str | None) -> Iterator[None]: - try: - _openai_shared.set_user_agent_override(user_agent) - yield - finally: - _openai_shared.set_user_agent_override(None) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 8f7aa6945..3743d82f2 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -10,8 +10,6 @@ from agents.exceptions import ModelBehaviorError -from ...models import _openai_shared - try: import litellm except ImportError as _e: @@ -41,7 +39,7 @@ from ...logger import logger from ...model_settings import ModelSettings from ...models.chatcmpl_converter import Converter -from ...models.chatcmpl_helpers import HEADERS +from ...models.chatcmpl_helpers import HEADERS, USER_AGENT_OVERRIDE from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler from ...models.fake_id import FAKE_RESPONSES_ID from ...models.interface import Model, ModelTracing @@ -388,8 +386,9 @@ def _remove_not_given(self, value: Any) -> Any: def _merge_headers(self, model_settings: ModelSettings): merged = {**HEADERS, **(model_settings.extra_headers or {})} - if ua_override := _openai_shared.get_user_agent_override(): - merged["User-Agent"] = ua_override + ua_ctx = USER_AGENT_OVERRIDE.get() + if ua_ctx is not None: + merged["User-Agent"] = ua_ctx return merged diff --git a/src/agents/models/_openai_shared.py b/src/agents/models/_openai_shared.py index e9f775547..96ed20ed4 100644 --- a/src/agents/models/_openai_shared.py +++ b/src/agents/models/_openai_shared.py @@ -38,7 +38,3 @@ def get_use_responses_by_default() -> bool: def set_user_agent_override(user_agent: str | None) -> None: global _user_agent_override _user_agent_override = user_agent - - -def get_user_agent_override() -> str | None: - return _user_agent_override diff --git a/src/agents/models/chatcmpl_helpers.py b/src/agents/models/chatcmpl_helpers.py index 0cee21ecc..51f2cc258 100644 --- a/src/agents/models/chatcmpl_helpers.py +++ b/src/agents/models/chatcmpl_helpers.py @@ -1,5 +1,7 @@ from __future__ import annotations +from contextvars import ContextVar + from openai import AsyncOpenAI from ..model_settings import ModelSettings @@ -8,6 +10,10 @@ _USER_AGENT = f"Agents/Python {__version__}" HEADERS = {"User-Agent": _USER_AGENT} +USER_AGENT_OVERRIDE: ContextVar[str | None] = ContextVar( + "openai_chatcompletions_user_agent_override", default=None +) + class ChatCmplHelpers: @classmethod diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 2b9a0b750..ea355b325 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -24,9 +24,8 @@ from ..tracing.spans import Span from ..usage import Usage from ..util._json import _to_dump_compatible -from . import _openai_shared from .chatcmpl_converter import Converter -from .chatcmpl_helpers import HEADERS, ChatCmplHelpers +from .chatcmpl_helpers import HEADERS, USER_AGENT_OVERRIDE, ChatCmplHelpers from .chatcmpl_stream_handler import ChatCmplStreamHandler from .fake_id import FAKE_RESPONSES_ID from .interface import Model, ModelTracing @@ -353,6 +352,7 @@ def _get_client(self) -> AsyncOpenAI: def _merge_headers(self, model_settings: ModelSettings): merged = {**HEADERS, **(model_settings.extra_headers or {})} - if ua_override := _openai_shared.get_user_agent_override(): - merged["User-Agent"] = ua_override + ua_ctx = USER_AGENT_OVERRIDE.get() + if ua_ctx is not None: + merged["User-Agent"] = ua_ctx return merged diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 5918c5051..5886b4833 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -2,6 +2,7 @@ import json from collections.abc import AsyncIterator +from contextvars import ContextVar from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Literal, cast, overload @@ -40,7 +41,6 @@ from ..usage import Usage from ..util._json import _to_dump_compatible from ..version import __version__ -from . import _openai_shared from .interface import Model, ModelTracing if TYPE_CHECKING: @@ -50,6 +50,11 @@ _USER_AGENT = f"Agents/Python {__version__}" _HEADERS = {"User-Agent": _USER_AGENT} +# Override for the User-Agent header used by the Responses API. +_USER_AGENT_OVERRIDE: ContextVar[str | None] = ContextVar( + "openai_responses_user_agent_override", default=None +) + class OpenAIResponsesModel(Model): """ @@ -330,8 +335,9 @@ def _get_client(self) -> AsyncOpenAI: def _merge_headers(self, model_settings: ModelSettings): merged = {**_HEADERS, **(model_settings.extra_headers or {})} - if ua_override := _openai_shared.get_user_agent_override(): - merged["User-Agent"] = ua_override + ua_ctx = _USER_AGENT_OVERRIDE.get() + if ua_ctx is not None: + merged["User-Agent"] = ua_ctx return merged diff --git a/tests/models/test_litellm_user_agent.py b/tests/models/test_litellm_user_agent.py new file mode 100644 index 000000000..03f0f6b84 --- /dev/null +++ b/tests/models/test_litellm_user_agent.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import Any + +import pytest + +from agents import ModelSettings, ModelTracing, __version__ +from agents.models.chatcmpl_helpers import USER_AGENT_OVERRIDE + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) +async def test_user_agent_header_litellm(override_ua: str | None, monkeypatch): + called_kwargs: dict[str, Any] = {} + expected_ua = override_ua or f"Agents/Python {__version__}" + + import importlib + import sys + import types as pytypes + + litellm_fake: Any = pytypes.ModuleType("litellm") + + class DummyMessage: + role = "assistant" + content = "Hello" + tool_calls: list[Any] | None = None + + def get(self, _key, _default=None): + return None + + def model_dump(self): + return {"role": self.role, "content": self.content} + + class Choices: # noqa: N801 - mimic litellm naming + def __init__(self): + self.message = DummyMessage() + + class DummyModelResponse: + def __init__(self): + self.choices = [Choices()] + + async def acompletion(**kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + return DummyModelResponse() + + utils_ns = pytypes.SimpleNamespace() + utils_ns.Choices = Choices + utils_ns.ModelResponse = DummyModelResponse + + litellm_types = pytypes.SimpleNamespace( + utils=utils_ns, + llms=pytypes.SimpleNamespace(openai=pytypes.SimpleNamespace(ChatCompletionAnnotation=dict)), + ) + litellm_fake.acompletion = acompletion + litellm_fake.types = litellm_types + + monkeypatch.setitem(sys.modules, "litellm", litellm_fake) + + litellm_mod = importlib.import_module("agents.extensions.models.litellm_model") + monkeypatch.setattr(litellm_mod, "litellm", litellm_fake, raising=True) + LitellmModel = litellm_mod.LitellmModel + + model = LitellmModel(model="gpt-4") + + if override_ua is not None: + token = USER_AGENT_OVERRIDE.set(override_ua) + else: + token = None + try: + await model.get_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + previous_response_id=None, + conversation_id=None, + prompt=None, + ) + finally: + if token is not None: + USER_AGENT_OVERRIDE.reset(token) + + assert "extra_headers" in called_kwargs + assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua diff --git a/tests/test_config.py b/tests/test_config.py index b50a07fe0..dba854db3 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,26 +1,12 @@ import os -from contextlib import nullcontext -from typing import Any import openai import pytest -from openai.types.chat.chat_completion import ChatCompletion, Choice -from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.responses import ResponseCompletedEvent - -from agents import ( - ModelSettings, - ModelTracing, - __version__, - set_default_openai_api, - set_default_openai_client, - set_default_openai_key, -) -from agents._config import user_agent_override + +from agents import set_default_openai_api, set_default_openai_client, set_default_openai_key from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel from agents.models.openai_provider import OpenAIProvider from agents.models.openai_responses import OpenAIResponsesModel -from tests.fake_model import get_response_obj def test_cc_no_default_key_errors(monkeypatch): @@ -76,176 +62,3 @@ def test_set_default_openai_api(): assert isinstance(OpenAIProvider().get_model("gpt-4"), OpenAIResponsesModel), ( "Should be responses model" ) - - -@pytest.mark.allow_call_model_methods -@pytest.mark.asyncio -@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) -async def test_user_agent_header_responses(override_ua): - called_kwargs = {} - expected_ua = override_ua or f"Agents/Python {__version__}" - - class DummyStream: - def __aiter__(self): - async def gen(): - yield ResponseCompletedEvent( - type="response.completed", - response=get_response_obj([]), - sequence_number=0, - ) - - return gen() - - class DummyResponses: - async def create(self, **kwargs): - nonlocal called_kwargs - called_kwargs = kwargs - return DummyStream() - - class DummyResponsesClient: - def __init__(self): - self.responses = DummyResponses() - - model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyResponsesClient()) # type: ignore - - cm = user_agent_override(override_ua) if override_ua else nullcontext() - with cm: - stream = model.stream_response( - system_instructions=None, - input="hi", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[], - tracing=ModelTracing.DISABLED, - ) - async for _ in stream: - pass - - assert "extra_headers" in called_kwargs - assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua - - -@pytest.mark.allow_call_model_methods -@pytest.mark.asyncio -@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) -async def test_user_agent_header_chat_completions(override_ua): - called_kwargs = {} - expected_ua = override_ua or f"Agents/Python {__version__}" - - class DummyCompletions: - async def create(self, **kwargs): - nonlocal called_kwargs - called_kwargs = kwargs - msg = ChatCompletionMessage(role="assistant", content="Hello") - choice = Choice(index=0, finish_reason="stop", message=msg) - return ChatCompletion( - id="resp-id", - created=0, - model="fake", - object="chat.completion", - choices=[choice], - usage=None, - ) - - class DummyChatClient: - def __init__(self): - self.chat = type("_Chat", (), {"completions": DummyCompletions()})() - self.base_url = "https://api.openai.com" - - model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyChatClient()) # type: ignore - - cm = user_agent_override(override_ua) if override_ua else nullcontext() - with cm: - await model.get_response( - system_instructions=None, - input="hi", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[], - tracing=ModelTracing.DISABLED, - previous_response_id=None, - conversation_id=None, - ) - - assert "extra_headers" in called_kwargs - assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua - - -@pytest.mark.allow_call_model_methods -@pytest.mark.asyncio -@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) -async def test_user_agent_header_litellm(override_ua, monkeypatch): - called_kwargs = {} - expected_ua = override_ua or f"Agents/Python {__version__}" - - import importlib - import sys - import types as pytypes - - litellm_fake: Any = pytypes.ModuleType("litellm") - - class DummyMessage: - role = "assistant" - content = "Hello" - tool_calls = None - - def get(self, _key, _default=None): - return None - - def model_dump(self): - return {"role": self.role, "content": self.content} - - class Choices: # noqa: N801 - mimic litellm naming - def __init__(self): - self.message = DummyMessage() - - class DummyModelResponse: - def __init__(self): - self.choices = [Choices()] - - async def acompletion(**kwargs): - nonlocal called_kwargs - called_kwargs = kwargs - return DummyModelResponse() - - utils_ns = pytypes.SimpleNamespace() - utils_ns.Choices = Choices - utils_ns.ModelResponse = DummyModelResponse - - litellm_types = pytypes.SimpleNamespace( - utils=utils_ns, - llms=pytypes.SimpleNamespace(openai=pytypes.SimpleNamespace(ChatCompletionAnnotation=dict)), - ) - litellm_fake.acompletion = acompletion - litellm_fake.types = litellm_types - - monkeypatch.setitem(sys.modules, "litellm", litellm_fake) - - litellm_mod = importlib.import_module("agents.extensions.models.litellm_model") - monkeypatch.setattr(litellm_mod, "litellm", litellm_fake, raising=True) - LitellmModel = litellm_mod.LitellmModel - - model = LitellmModel(model="gpt-4") - - cm = user_agent_override(override_ua) if override_ua else nullcontext() - with cm: - await model.get_response( - system_instructions=None, - input="hi", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[], - tracing=ModelTracing.DISABLED, - previous_response_id=None, - conversation_id=None, - prompt=None, - ) - - assert "extra_headers" in called_kwargs - assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua - - -# (Replaced by test_user_agent_header_parametrized) diff --git a/tests/test_openai_chatcompletions.py b/tests/test_openai_chatcompletions.py index d52d89b47..df44021a2 100644 --- a/tests/test_openai_chatcompletions.py +++ b/tests/test_openai_chatcompletions.py @@ -31,9 +31,10 @@ ModelTracing, OpenAIChatCompletionsModel, OpenAIProvider, + __version__, generation_span, ) -from agents.models.chatcmpl_helpers import ChatCmplHelpers +from agents.models.chatcmpl_helpers import USER_AGENT_OVERRIDE, ChatCmplHelpers from agents.models.fake_id import FAKE_RESPONSES_ID @@ -370,6 +371,60 @@ def test_store_param(): "Should respect explicitly set store=True" ) + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) +async def test_user_agent_header_chat_completions(override_ua): + called_kwargs: dict[str, Any] = {} + expected_ua = override_ua or f"Agents/Python {__version__}" + + class DummyCompletions: + async def create(self, **kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + msg = ChatCompletionMessage(role="assistant", content="Hello") + choice = Choice(index=0, finish_reason="stop", message=msg) + return ChatCompletion( + id="resp-id", + created=0, + model="fake", + object="chat.completion", + choices=[choice], + usage=None, + ) + + class DummyChatClient: + def __init__(self): + self.chat = type("_Chat", (), {"completions": DummyCompletions()})() + self.base_url = "https://api.openai.com" + + model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyChatClient()) # type: ignore + + if override_ua is not None: + token = USER_AGENT_OVERRIDE.set(override_ua) + else: + token = None + + try: + await model.get_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + previous_response_id=None, + conversation_id=None, + ) + finally: + if token is not None: + USER_AGENT_OVERRIDE.reset(token) + + assert "extra_headers" in called_kwargs + assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua + client = AsyncOpenAI(base_url="http://www.notopenai.com") model_settings = ModelSettings() assert ChatCmplHelpers.get_store_param(client, model_settings) is None, ( diff --git a/tests/test_openai_responses.py b/tests/test_openai_responses.py new file mode 100644 index 000000000..81e16c03e --- /dev/null +++ b/tests/test_openai_responses.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import Any + +import pytest +from openai.types.responses import ResponseCompletedEvent + +from agents import ModelSettings, ModelTracing, __version__ +from agents.models.openai_responses import _USER_AGENT_OVERRIDE as RESP_UA, OpenAIResponsesModel +from tests.fake_model import get_response_obj + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +@pytest.mark.parametrize("override_ua", [None, "test_user_agent"]) +async def test_user_agent_header_responses(override_ua: str | None): + called_kwargs: dict[str, Any] = {} + expected_ua = override_ua or f"Agents/Python {__version__}" + + class DummyStream: + def __aiter__(self): + async def gen(): + yield ResponseCompletedEvent( + type="response.completed", + response=get_response_obj([]), + sequence_number=0, + ) + + return gen() + + class DummyResponses: + async def create(self, **kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + return DummyStream() + + class DummyResponsesClient: + def __init__(self): + self.responses = DummyResponses() + + model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyResponsesClient()) # type: ignore + + if override_ua is not None: + token = RESP_UA.set(override_ua) + else: + token = None + + try: + stream = model.stream_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + ) + async for _ in stream: + pass + finally: + if token is not None: + RESP_UA.reset(token) + + assert "extra_headers" in called_kwargs + assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua From a490e6275724ef4d72cabf3c42ece545640df9f3 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 20:20:52 -0700 Subject: [PATCH 8/9] delete unused code --- src/agents/models/_openai_shared.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/agents/models/_openai_shared.py b/src/agents/models/_openai_shared.py index 96ed20ed4..2e1450187 100644 --- a/src/agents/models/_openai_shared.py +++ b/src/agents/models/_openai_shared.py @@ -5,7 +5,6 @@ _default_openai_key: str | None = None _default_openai_client: AsyncOpenAI | None = None _use_responses_by_default: bool = True -_user_agent_override: str | None = None def set_default_openai_key(key: str) -> None: @@ -33,8 +32,3 @@ def set_use_responses_by_default(use_responses: bool) -> None: def get_use_responses_by_default() -> bool: return _use_responses_by_default - - -def set_user_agent_override(user_agent: str | None) -> None: - global _user_agent_override - _user_agent_override = user_agent From 122ec73ea2fa9591a075c26dcc3a6e7ba9d90796 Mon Sep 17 00:00:00 2001 From: Jiwon Kim Date: Wed, 17 Sep 2025 20:24:28 -0700 Subject: [PATCH 9/9] undo auto newline added by make format --- examples/basic/tools.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/basic/tools.py b/examples/basic/tools.py index 2052d9427..1c4496603 100644 --- a/examples/basic/tools.py +++ b/examples/basic/tools.py @@ -18,7 +18,6 @@ def get_weather(city: Annotated[str, "The city to get the weather for"]) -> Weat print("[debug] get_weather called") return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.") - agent = Agent( name="Hello world", instructions="You are a helpful agent.",