@@ -52,7 +52,7 @@ class Message:
5252 name : Optional [str ] = None
5353
5454
55- class LLMProvider :
55+ class _HttpProvider :
5656 """
5757 LLM provider using LiteLLM for unified multi-provider access.
5858
@@ -85,7 +85,7 @@ def __init__(
8585 except Exception :
8686 pass
8787
88- def chat (
88+ async def chat (
8989 self ,
9090 messages : List [Message ],
9191 tools : Optional [List [FunctionTool ]] = None ,
@@ -101,7 +101,7 @@ def chat(
101101 are used for sampling/model parameters and system prompt instead
102102 of the config passed at init time.
103103 """
104- cfg = config or self ._config
104+ cfg = _normalize_config ( config ) if config else self ._config
105105 llm_messages = self ._build_messages (messages , cfg )
106106
107107 llm_kwargs : Dict [str , Any ] = {
@@ -150,7 +150,7 @@ def _build_messages(
150150 self , messages : List [Message ], config : Optional [LlmConfig ] = None
151151 ) -> List [Dict [str , Any ]]:
152152 """Convert Message objects to LiteLLM format."""
153- cfg = config or self ._config
153+ cfg = _normalize_config ( config ) if config else self ._config
154154 result = []
155155
156156 if cfg .system_prompt :
@@ -189,6 +189,10 @@ def _build_messages(
189189 result .append (llm_msg )
190190 return result
191191
192+ async def warmup (self , config = None ):
193+ """No-op for stateless HTTP provider."""
194+ pass
195+
192196 async def aclose (self ) -> None :
193197 """Close the provider (no-op for LiteLLM)."""
194198 pass
@@ -271,3 +275,52 @@ async def __aiter__(self) -> AsyncIterator[StreamChunk]:
271275 tool_calls = list (tool_calls .values ()) if tool_calls else [],
272276 is_final = finish_reason is not None ,
273277 )
278+
279+
280+ # ---------------------------------------------------------------------------
281+ # Unified facade
282+ # ---------------------------------------------------------------------------
283+
284+
285+ def _is_realtime_model (model : str ) -> bool :
286+ """Check if a model name indicates an OpenAI Realtime model."""
287+ return "realtime" in model .lower ()
288+
289+
290+ def _is_websocket_model (model : str ) -> bool :
291+ """Check if a model should use the WebSocket (Responses API) backend."""
292+ lower = model .lower ()
293+ return lower .startswith ("gpt-5.2" ) or lower .startswith ("gpt5.2" )
294+
295+
296+ class LlmProvider :
297+ """Unified LLM provider facade.
298+
299+ Selects the appropriate backend (HTTP/LiteLLM, Realtime WS, or Responses WS)
300+ based on the model name, and delegates all calls to it.
301+ """
302+
303+ def __init__ (self , model , api_key = None , config = None ):
304+ if _is_realtime_model (model ):
305+ from line .llm_agent .realtime_provider import RealtimeProvider
306+
307+ self ._backend = RealtimeProvider (model = model , api_key = api_key , config = config )
308+ elif _is_websocket_model (model ):
309+ from line .llm_agent .websocket_provider import WebSocketProvider
310+
311+ self ._backend = WebSocketProvider (model = model , api_key = api_key , config = config )
312+ else :
313+ self ._backend = _HttpProvider (model = model , api_key = api_key , config = config )
314+
315+ async def chat (self , messages , tools = None , config = None , ** kwargs ):
316+ return await self ._backend .chat (messages , tools , config = config , ** kwargs )
317+
318+ async def warmup (self , config = None ):
319+ await self ._backend .warmup (config )
320+
321+ async def aclose (self ):
322+ await self ._backend .aclose ()
323+
324+
325+ # Backward-compat alias — existing scripts and examples import LLMProvider directly.
326+ LLMProvider = _HttpProvider
0 commit comments