Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/agents/extensions/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,8 @@ async def _fetch_response(
extra_kwargs["extra_query"] = model_settings.extra_query
if model_settings.metadata:
extra_kwargs["metadata"] = model_settings.metadata
if model_settings.extra_body and isinstance(model_settings.extra_body, dict):
extra_kwargs.update(model_settings.extra_body)

ret = await litellm.acompletion(
model=self.model,
Expand Down
45 changes: 45 additions & 0 deletions tests/models/test_litellm_extra_body.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import litellm
import pytest
from litellm.types.utils import Choices, Message, ModelResponse, Usage

from agents.extensions.models.litellm_model import LitellmModel
from agents.model_settings import ModelSettings
from agents.models.interface import ModelTracing


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
async def test_extra_body_is_forwarded(monkeypatch):
"""
Forward `extra_body` entries into litellm.acompletion kwargs.

This ensures that user-provided parameters (e.g. cached_content)
arrive alongside default arguments.
"""
captured: dict[str, object] = {}

async def fake_acompletion(model, messages=None, **kwargs):
captured.update(kwargs)
msg = Message(role="assistant", content="ok")
choice = Choices(index=0, message=msg)
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))

monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
settings = ModelSettings(
temperature=0.1,
extra_body={"cached_content": "some_cache", "foo": 123}
)
model = LitellmModel(model="test-model")

await model.get_response(
system_instructions=None,
input=[],
model_settings=settings,
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
previous_response_id=None,
)

assert {"cached_content": "some_cache", "foo": 123}.items() <= captured.items()