Skip to content

Commit c8b933c

Browse files
committed
Added tests for no ua override cases too
1 parent d0b63b4 commit c8b933c

File tree

1 file changed

+30
-21
lines changed

1 file changed

+30
-21
lines changed

tests/test_config.py

Lines changed: 30 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
1+
from contextlib import nullcontext
12
import os
23
from typing import Any
34

45
import openai
5-
import pytest
66
from openai.types.chat.chat_completion import ChatCompletion, Choice
77
from openai.types.chat.chat_completion_message import ChatCompletionMessage
88
from openai.types.responses import ResponseCompletedEvent
9+
import pytest
910

11+
from agents import __version__
1012
from agents import (
1113
ModelSettings,
1214
ModelTracing,
@@ -78,8 +80,10 @@ def test_set_default_openai_api():
7880

7981
@pytest.mark.allow_call_model_methods
8082
@pytest.mark.asyncio
81-
async def test_user_agent_override_responses():
83+
@pytest.mark.parametrize("override_ua", [None, "test_user_agent"])
84+
async def test_user_agent_header_responses(override_ua):
8285
called_kwargs = {}
86+
expected_ua = override_ua or f"Agents/Python {__version__}"
8387

8488
class DummyStream:
8589
def __aiter__(self):
@@ -98,13 +102,14 @@ async def create(self, **kwargs):
98102
called_kwargs = kwargs
99103
return DummyStream()
100104

101-
class DummyClient:
105+
class DummyResponsesClient:
102106
def __init__(self):
103107
self.responses = DummyResponses()
104108

105-
model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyClient()) # type: ignore
109+
model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyResponsesClient()) # type: ignore
106110

107-
with user_agent_override("test_user_agent"):
111+
cm = user_agent_override(override_ua) if override_ua else nullcontext()
112+
with cm:
108113
stream = model.stream_response(
109114
system_instructions=None,
110115
input="hi",
@@ -114,24 +119,24 @@ def __init__(self):
114119
handoffs=[],
115120
tracing=ModelTracing.DISABLED,
116121
)
117-
118122
async for _ in stream:
119123
pass
120124

121125
assert "extra_headers" in called_kwargs
122-
assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent"
126+
assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua
123127

124128

125129
@pytest.mark.allow_call_model_methods
126130
@pytest.mark.asyncio
127-
async def test_user_agent_override_chat_completions():
131+
@pytest.mark.parametrize("override_ua", [None, "test_user_agent"])
132+
async def test_user_agent_header_chat_completions(override_ua):
128133
called_kwargs = {}
134+
expected_ua = override_ua or f"Agents/Python {__version__}"
129135

130136
class DummyCompletions:
131137
async def create(self, **kwargs):
132138
nonlocal called_kwargs
133139
called_kwargs = kwargs
134-
135140
msg = ChatCompletionMessage(role="assistant", content="Hello")
136141
choice = Choice(index=0, finish_reason="stop", message=msg)
137142
return ChatCompletion(
@@ -143,14 +148,15 @@ async def create(self, **kwargs):
143148
usage=None,
144149
)
145150

146-
class DummyClient:
151+
class DummyChatClient:
147152
def __init__(self):
148153
self.chat = type("_Chat", (), {"completions": DummyCompletions()})()
149154
self.base_url = "https://api.openai.com"
150155

151-
model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore
156+
model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyChatClient()) # type: ignore
152157

153-
with user_agent_override("test_user_agent"):
158+
cm = user_agent_override(override_ua) if override_ua else nullcontext()
159+
with cm:
154160
await model.get_response(
155161
system_instructions=None,
156162
input="hi",
@@ -164,19 +170,20 @@ def __init__(self):
164170
)
165171

166172
assert "extra_headers" in called_kwargs
167-
assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent"
173+
assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua
168174

169175

170176
@pytest.mark.allow_call_model_methods
171177
@pytest.mark.asyncio
172-
async def test_user_agent_override_litellm(monkeypatch):
178+
@pytest.mark.parametrize("override_ua", [None, "test_user_agent"])
179+
async def test_user_agent_header_litellm(override_ua, monkeypatch):
180+
called_kwargs = {}
181+
expected_ua = override_ua or f"Agents/Python {__version__}"
182+
173183
import importlib
174184
import sys
175185
import types as pytypes
176186

177-
called_kwargs = {}
178-
179-
# Create a fake litellm module so we don't need the real dependency
180187
litellm_fake: Any = pytypes.ModuleType("litellm")
181188

182189
class DummyMessage:
@@ -196,7 +203,6 @@ def __init__(self):
196203

197204
class DummyModelResponse:
198205
def __init__(self):
199-
# Minimal shape expected by get_response()
200206
self.choices = [Choices()]
201207

202208
async def acompletion(**kwargs):
@@ -217,14 +223,14 @@ async def acompletion(**kwargs):
217223

218224
monkeypatch.setitem(sys.modules, "litellm", litellm_fake)
219225

220-
# Import after injecting fake module and patch the module's symbol directly
221226
litellm_mod = importlib.import_module("agents.extensions.models.litellm_model")
222227
monkeypatch.setattr(litellm_mod, "litellm", litellm_fake, raising=True)
223228
LitellmModel = litellm_mod.LitellmModel
224229

225230
model = LitellmModel(model="gpt-4")
226231

227-
with user_agent_override("test_user_agent"):
232+
cm = user_agent_override(override_ua) if override_ua else nullcontext()
233+
with cm:
228234
await model.get_response(
229235
system_instructions=None,
230236
input="hi",
@@ -239,4 +245,7 @@ async def acompletion(**kwargs):
239245
)
240246

241247
assert "extra_headers" in called_kwargs
242-
assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent"
248+
assert called_kwargs["extra_headers"]["User-Agent"] == expected_ua
249+
250+
251+
# (Replaced by test_user_agent_header_parametrized)

0 commit comments

Comments
 (0)