|
1 | 1 | import os
|
| 2 | +from typing import Any |
2 | 3 |
|
3 | 4 | import openai
|
4 | 5 | import pytest
|
@@ -77,7 +78,7 @@ def test_set_default_openai_api():
|
77 | 78 |
|
78 | 79 | @pytest.mark.allow_call_model_methods
|
79 | 80 | @pytest.mark.asyncio
|
80 |
| -async def test_user_agent_override(): |
| 81 | +async def test_user_agent_override_responses(): |
81 | 82 | called_kwargs = {}
|
82 | 83 |
|
83 | 84 | class DummyStream:
|
@@ -164,3 +165,78 @@ def __init__(self):
|
164 | 165 |
|
165 | 166 | assert "extra_headers" in called_kwargs
|
166 | 167 | assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent"
|
| 168 | + |
| 169 | + |
| 170 | +@pytest.mark.allow_call_model_methods |
| 171 | +@pytest.mark.asyncio |
| 172 | +async def test_user_agent_override_litellm(monkeypatch): |
| 173 | + import importlib |
| 174 | + import sys |
| 175 | + import types as pytypes |
| 176 | + |
| 177 | + called_kwargs = {} |
| 178 | + |
| 179 | + # Create a fake litellm module so we don't need the real dependency |
| 180 | + litellm_fake: Any = pytypes.ModuleType("litellm") |
| 181 | + |
| 182 | + class DummyMessage: |
| 183 | + role = "assistant" |
| 184 | + content = "Hello" |
| 185 | + tool_calls = None |
| 186 | + |
| 187 | + def get(self, _key, _default=None): |
| 188 | + return None |
| 189 | + |
| 190 | + def model_dump(self): |
| 191 | + return {"role": self.role, "content": self.content} |
| 192 | + |
| 193 | + class Choices: # noqa: N801 - mimic litellm naming |
| 194 | + def __init__(self): |
| 195 | + self.message = DummyMessage() |
| 196 | + |
| 197 | + class DummyModelResponse: |
| 198 | + def __init__(self): |
| 199 | + # Minimal shape expected by get_response() |
| 200 | + self.choices = [Choices()] |
| 201 | + |
| 202 | + async def acompletion(**kwargs): |
| 203 | + nonlocal called_kwargs |
| 204 | + called_kwargs = kwargs |
| 205 | + return DummyModelResponse() |
| 206 | + |
| 207 | + utils_ns = pytypes.SimpleNamespace() |
| 208 | + utils_ns.Choices = Choices |
| 209 | + utils_ns.ModelResponse = DummyModelResponse |
| 210 | + |
| 211 | + litellm_types = pytypes.SimpleNamespace( |
| 212 | + utils=utils_ns, |
| 213 | + llms=pytypes.SimpleNamespace(openai=pytypes.SimpleNamespace(ChatCompletionAnnotation=dict)), |
| 214 | + ) |
| 215 | + litellm_fake.acompletion = acompletion |
| 216 | + litellm_fake.types = litellm_types |
| 217 | + |
| 218 | + monkeypatch.setitem(sys.modules, "litellm", litellm_fake) |
| 219 | + |
| 220 | + # Import after injecting fake module and patch the module's symbol directly |
| 221 | + litellm_mod = importlib.import_module("agents.extensions.models.litellm_model") |
| 222 | + monkeypatch.setattr(litellm_mod, "litellm", litellm_fake, raising=True) |
| 223 | + LitellmModel = litellm_mod.LitellmModel |
| 224 | + |
| 225 | + model = LitellmModel(model="gpt-4") |
| 226 | + |
| 227 | + with user_agent_override("test_user_agent"): |
| 228 | + await model.get_response( |
| 229 | + system_instructions=None, |
| 230 | + input="hi", |
| 231 | + model_settings=ModelSettings(), |
| 232 | + tools=[], |
| 233 | + output_schema=None, |
| 234 | + handoffs=[], |
| 235 | + tracing=ModelTracing.DISABLED, |
| 236 | + previous_response_id=None, |
| 237 | + conversation_id=None, |
| 238 | + prompt=None, |
| 239 | + ) |
| 240 | + |
| 241 | + assert "extra_headers" in called_kwargs |
| 242 | + assert called_kwargs["extra_headers"]["User-Agent"] == "test_user_agent" |
0 commit comments