-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathllm.py
More file actions
297 lines (250 loc) · 10.2 KB
/
llm.py
File metadata and controls
297 lines (250 loc) · 10.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
"""LLM service with provider abstraction and LangFuse observability."""
import os
from typing import Any
from langchain_anthropic import ChatAnthropic
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from src.api.config import Settings, get_settings
# OpenRouter API base URL
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
class LLMService:
"""Service for creating and managing LLM instances with observability."""
# Model mappings for direct OpenAI API
OPENAI_MODELS = {
"gpt-5.2": "gpt-5.2",
"gpt-5": "gpt-5",
"gpt-5-mini": "gpt-5-mini",
"gpt-4.1": "gpt-4.1",
"gpt-4.1-mini": "gpt-4.1-mini",
"gpt-4o": "gpt-4o",
"gpt-4o-mini": "gpt-4o-mini",
"o4-mini": "o4-mini",
"o3": "o3",
"o3-mini": "o3-mini",
}
# Model mappings for direct Anthropic API
ANTHROPIC_MODELS = {
"claude-sonnet-4.6": "claude-sonnet-4-6-20250610",
"claude-opus-4.6": "claude-opus-4-6-20250610",
"claude-sonnet-4.5": "claude-sonnet-4-5-20250514",
"claude-opus-4.5": "claude-opus-4-5-20250514",
"claude-haiku-4.5": "claude-haiku-4-5-20251001",
"claude-sonnet-4": "claude-sonnet-4-20250514",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3.5-haiku": "claude-3-5-haiku-20241022",
}
@property
def default_model(self) -> str:
"""Get the default model from settings."""
return self.settings.default_model
@property
def test_model(self) -> str:
"""Get the test model from settings."""
return self.settings.test_model
def get_test_model(
self,
api_key: str | None = None,
temperature: float | None = None,
streaming: bool = True,
) -> BaseChatModel:
"""Get a model instance configured for testing.
Convenience method that uses settings.test_model and settings.test_model_provider.
"""
return self.get_model(
model_name=self.settings.test_model,
api_key=api_key,
temperature=temperature,
streaming=streaming,
provider=self.settings.test_model_provider,
)
def __init__(self, settings: Settings | None = None) -> None:
"""Initialize the LLM service.
Args:
settings: Optional settings instance. If not provided, uses defaults.
"""
self.settings = settings or get_settings()
def get_langfuse_handler(
self,
trace_id: str | None = None,
) -> Any | None:
"""Create a LangFuse callback handler for tracing.
LangFuse uses environment variables for authentication:
- LANGFUSE_PUBLIC_KEY
- LANGFUSE_SECRET_KEY
- LANGFUSE_HOST
This method sets these from settings before creating the handler.
Note: Langfuse import is lazy to avoid Python 3.14 compatibility issues
with Pydantic v1 (used by langfuse internally).
Args:
trace_id: Optional custom trace ID for the root LangChain run.
Returns:
CallbackHandler instance if langfuse is configured and importable,
None otherwise.
"""
if not self.settings.langfuse_public_key or not self.settings.langfuse_secret_key:
return None
# Lazy import to avoid Pydantic v1 compatibility issues on Python 3.14
# See: https://github.com/OpenScience-Collective/osa/issues/108
try:
from langfuse.langchain import CallbackHandler as LangfuseHandler
except (ImportError, Exception) as e:
import warnings
warnings.warn(
f"Langfuse import failed: {e}. Observability disabled. "
"Install with: uv pip install 'open-science-assistant[observability]'",
stacklevel=2,
)
return None
# Set environment variables for LangFuse client
os.environ["LANGFUSE_PUBLIC_KEY"] = self.settings.langfuse_public_key
os.environ["LANGFUSE_SECRET_KEY"] = self.settings.langfuse_secret_key
os.environ["LANGFUSE_HOST"] = self.settings.langfuse_host
# Create handler with optional trace context
if trace_id:
return LangfuseHandler(trace_context={"trace_id": trace_id})
return LangfuseHandler()
def get_model(
self,
model_name: str | None = None,
api_key: str | None = None,
temperature: float | None = None,
streaming: bool = True,
provider: str | None = None,
) -> BaseChatModel:
"""Get a chat model instance.
Args:
model_name: Model name. Supports:
- OpenRouter format: 'creator/model' (e.g., 'openai/gpt-oss-120b', 'qwen/qwen3-235b')
- Direct OpenAI: 'gpt-4o', 'gpt-4o-mini', etc.
- Direct Anthropic: 'claude-3.5-sonnet', etc.
If not provided, uses settings.default_model.
api_key: Optional API key override (for BYOK).
temperature: Model temperature. If not provided, uses settings.llm_temperature.
streaming: Whether to enable streaming.
provider: Optional provider for routing (e.g., 'Cerebras' for fast inference).
If not provided, uses settings.default_model_provider.
Returns:
A configured chat model instance.
Raises:
ValueError: If the model is not recognized or API key is missing.
"""
model_name = model_name or self.settings.default_model
temperature = temperature if temperature is not None else self.settings.llm_temperature
# OpenRouter models contain "/" (e.g., "openai/gpt-oss-120b")
if "/" in model_name:
# Use provided provider or fall back to settings
effective_provider = (
provider if provider is not None else self.settings.default_model_provider
)
return self._get_openrouter_model(
model_name, api_key, temperature, streaming, effective_provider
)
# Direct OpenAI API
elif model_name in self.OPENAI_MODELS:
return self._get_openai_model(model_name, api_key, temperature, streaming)
# Direct Anthropic API
elif model_name in self.ANTHROPIC_MODELS:
return self._get_anthropic_model(model_name, api_key, temperature, streaming)
else:
raise ValueError(
f"Unknown model: {model_name}. "
f"Use OpenRouter format 'provider/model' or one of: "
f"{list(self.OPENAI_MODELS.keys()) + list(self.ANTHROPIC_MODELS.keys())}"
)
def _get_openai_model(
self,
model_name: str,
api_key: str | None,
temperature: float,
streaming: bool,
) -> ChatOpenAI:
"""Create an OpenAI chat model."""
key = api_key or self.settings.openai_api_key
if not key:
raise ValueError("OpenAI API key required but not configured")
return ChatOpenAI(
model=self.OPENAI_MODELS[model_name],
api_key=key,
temperature=temperature,
streaming=streaming,
)
def _get_anthropic_model(
self,
model_name: str,
api_key: str | None,
temperature: float,
streaming: bool,
) -> ChatAnthropic:
"""Create an Anthropic chat model."""
key = api_key or self.settings.anthropic_api_key
if not key:
raise ValueError("Anthropic API key required but not configured")
return ChatAnthropic(
model=self.ANTHROPIC_MODELS[model_name],
api_key=key,
temperature=temperature,
streaming=streaming,
)
def _get_openrouter_model(
self,
model_name: str,
api_key: str | None,
temperature: float,
streaming: bool,
provider: str | None = None,
) -> ChatOpenAI:
"""Create a model via OpenRouter API.
OpenRouter provides access to many models through an OpenAI-compatible API.
Model names are in format 'creator/model' (e.g., 'openai/gpt-oss-120b').
Provider specifies where the model runs (e.g., 'Cerebras' for fast inference).
Args:
model_name: OpenRouter model ID (creator/model format)
api_key: OpenRouter API key
temperature: Model temperature
streaming: Enable streaming responses
provider: Provider for routing (e.g., 'Cerebras' for fast inference)
"""
key = api_key or self.settings.openrouter_api_key
if not key:
raise ValueError("OpenRouter API key required but not configured")
# Build extra body with provider preferences if specified
extra_body: dict | None = None
if provider:
extra_body = {"provider": {"order": [provider]}}
return ChatOpenAI(
model=model_name,
api_key=key,
base_url=OPENROUTER_BASE_URL,
temperature=temperature,
streaming=streaming,
default_headers={
"HTTP-Referer": "https://osc.earth/osa",
"X-Title": "Open Science Assistant",
},
extra_body=extra_body,
)
def get_config_with_tracing(
self,
trace_id: str | None = None,
) -> dict[str, Any]:
"""Get a config dict with LangFuse tracing callbacks.
Use this with LangGraph invoke/ainvoke:
config = llm_service.get_config_with_tracing(trace_id="abc")
result = graph.invoke(state, config=config)
Args:
trace_id: Optional custom trace ID for the root LangChain run.
"""
config: dict[str, Any] = {}
handler = self.get_langfuse_handler(trace_id)
if handler:
config["callbacks"] = [handler]
return config
# Singleton instance for convenience
_llm_service: LLMService | None = None
def get_llm_service(settings: Settings | None = None) -> LLMService:
"""Get the LLM service singleton."""
global _llm_service
if _llm_service is None:
_llm_service = LLMService(settings)
return _llm_service