Skip to content

Commit e98aadd

Browse files
committed
Realtime providers
1 parent 9ca8643 commit e98aadd

File tree

10 files changed

+2104
-25
lines changed

10 files changed

+2104
-25
lines changed

examples/guardrails_wrapper/guardrails.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ async def _check_guardrails(self, text: str) -> GuardrailCheckResult:
213213

214214
try:
215215
response_text = ""
216-
stream = self._guardrail_llm.chat(messages, tools=None)
216+
stream = await self._guardrail_llm.chat(messages, tools=None)
217217
async with stream:
218218
async for chunk in stream:
219219
if chunk.text:

line/llm_agent/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@
1616
# LLM Agent
1717
from line.llm_agent.llm_agent import LlmAgent
1818

19+
# Provider facade
20+
from line.llm_agent.provider import LlmProvider
21+
1922
# Tool type decorators
2023
from line.llm_agent.tools.decorators import handoff_tool, loopback_tool, passthrough_tool
2124

@@ -38,6 +41,8 @@
3841
"History",
3942
# LLM Agent
4043
"LlmAgent",
44+
# Provider
45+
"LlmProvider",
4146
# Configuration
4247
"LlmConfig",
4348
"FALLBACK_SYSTEM_PROMPT",

line/llm_agent/llm_agent.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
Union,
2323
)
2424

25-
from litellm import get_supported_openai_params
2625
from loguru import logger
2726

2827
from line.agent import AgentCallable, TurnEnv
@@ -43,7 +42,7 @@
4342
)
4443
from line.llm_agent.config import LlmConfig, _merge_configs, _normalize_config
4544
from line.llm_agent.history import _HISTORY_EVENT_TYPES, History
46-
from line.llm_agent.provider import LLMProvider, Message, ToolCall
45+
from line.llm_agent.provider import LlmProvider, Message, ToolCall
4746
from line.llm_agent.tools.decorators import loopback_tool
4847
from line.llm_agent.tools.system import EndCallTool, WebSearchTool
4948
from line.llm_agent.tools.utils import FunctionTool, ToolEnv, ToolType, construct_function_tool
@@ -75,19 +74,9 @@ def __init__(
7574
):
7675
if not api_key:
7776
raise ValueError("Missing API key in LLmAgent initialization")
78-
supported_params = get_supported_openai_params(model=model)
79-
if supported_params is None:
80-
raise ValueError(
81-
f"Model {model} is not supported. See https://models.litellm.ai/ for supported models."
82-
)
8377

8478
# Resolve the base config to insert default values for any _UNSET sentinels.
8579
effective_config = _normalize_config(config or LlmConfig())
86-
if effective_config.reasoning_effort is not None and "reasoning_effort" not in supported_params:
87-
raise ValueError(
88-
f"Model {model} does not support reasoning_effort. "
89-
"Remove reasoning_effort from your LlmConfig or use a model that supports it."
90-
)
9180

9281
self._model = model
9382
self._api_key = api_key
@@ -96,10 +85,10 @@ def __init__(
9685

9786
self._tools: List[ToolSpec] = list(tools or [])
9887

99-
self._llm = LLMProvider(
100-
model=self._model,
101-
api_key=self._api_key,
102-
config=self._config,
88+
self._llm = LlmProvider(
89+
model=model,
90+
api_key=api_key,
91+
config=effective_config,
10392
)
10493

10594
self._introduction_sent = False
@@ -186,11 +175,18 @@ async def process(
186175

187176
# Handle CallStarted
188177
if isinstance(event, CallStarted):
178+
warmup_task = asyncio.create_task(
179+
self._llm.warmup(config=effective_config)
180+
)
189181
if effective_config.introduction and not self._introduction_sent:
190182
output = AgentSendText(text=effective_config.introduction)
191183
self.history._append_local(output)
192184
self._introduction_sent = True
193185
yield output
186+
try:
187+
await warmup_task
188+
except Exception as e:
189+
logger.warning(f"Provider warmup failed: {e}")
194190
return
195191

196192
# Handle CallEnded
@@ -347,7 +343,7 @@ async def _generate_response(
347343
first_token_logged = False
348344
first_agent_text_logged = False
349345

350-
stream = self._llm.chat(
346+
stream = await self._llm.chat(
351347
messages,
352348
tools if tools else None,
353349
config=config,

line/llm_agent/provider.py

Lines changed: 57 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class Message:
5252
name: Optional[str] = None
5353

5454

55-
class LLMProvider:
55+
class _HttpProvider:
5656
"""
5757
LLM provider using LiteLLM for unified multi-provider access.
5858
@@ -85,7 +85,7 @@ def __init__(
8585
except Exception:
8686
pass
8787

88-
def chat(
88+
async def chat(
8989
self,
9090
messages: List[Message],
9191
tools: Optional[List[FunctionTool]] = None,
@@ -101,7 +101,7 @@ def chat(
101101
are used for sampling/model parameters and system prompt instead
102102
of the config passed at init time.
103103
"""
104-
cfg = config or self._config
104+
cfg = _normalize_config(config) if config else self._config
105105
llm_messages = self._build_messages(messages, cfg)
106106

107107
llm_kwargs: Dict[str, Any] = {
@@ -150,7 +150,7 @@ def _build_messages(
150150
self, messages: List[Message], config: Optional[LlmConfig] = None
151151
) -> List[Dict[str, Any]]:
152152
"""Convert Message objects to LiteLLM format."""
153-
cfg = config or self._config
153+
cfg = _normalize_config(config) if config else self._config
154154
result = []
155155

156156
if cfg.system_prompt:
@@ -189,6 +189,10 @@ def _build_messages(
189189
result.append(llm_msg)
190190
return result
191191

192+
async def warmup(self, config=None):
193+
"""No-op for stateless HTTP provider."""
194+
pass
195+
192196
async def aclose(self) -> None:
193197
"""Close the provider (no-op for LiteLLM)."""
194198
pass
@@ -271,3 +275,52 @@ async def __aiter__(self) -> AsyncIterator[StreamChunk]:
271275
tool_calls=list(tool_calls.values()) if tool_calls else [],
272276
is_final=finish_reason is not None,
273277
)
278+
279+
280+
# ---------------------------------------------------------------------------
281+
# Unified facade
282+
# ---------------------------------------------------------------------------
283+
284+
285+
def _is_realtime_model(model: str) -> bool:
286+
"""Check if a model name indicates an OpenAI Realtime model."""
287+
return "realtime" in model.lower()
288+
289+
290+
def _is_websocket_model(model: str) -> bool:
291+
"""Check if a model should use the WebSocket (Responses API) backend."""
292+
lower = model.lower()
293+
return lower.startswith("gpt-5.2") or lower.startswith("gpt5.2")
294+
295+
296+
class LlmProvider:
297+
"""Unified LLM provider facade.
298+
299+
Selects the appropriate backend (HTTP/LiteLLM, Realtime WS, or Responses WS)
300+
based on the model name, and delegates all calls to it.
301+
"""
302+
303+
def __init__(self, model, api_key=None, config=None):
304+
if _is_realtime_model(model):
305+
from line.llm_agent.realtime_provider import RealtimeProvider
306+
307+
self._backend = RealtimeProvider(model=model, api_key=api_key, config=config)
308+
elif _is_websocket_model(model):
309+
from line.llm_agent.websocket_provider import WebSocketProvider
310+
311+
self._backend = WebSocketProvider(model=model, api_key=api_key, config=config)
312+
else:
313+
self._backend = _HttpProvider(model=model, api_key=api_key, config=config)
314+
315+
async def chat(self, messages, tools=None, config=None, **kwargs):
316+
return await self._backend.chat(messages, tools, config=config, **kwargs)
317+
318+
async def warmup(self, config=None):
319+
await self._backend.warmup(config)
320+
321+
async def aclose(self):
322+
await self._backend.aclose()
323+
324+
325+
# Backward-compat alias — existing scripts and examples import LLMProvider directly.
326+
LLMProvider = _HttpProvider

0 commit comments

Comments
 (0)