|
| 1 | +from typing import Dict, Optional, Tuple |
| 2 | + |
| 3 | +from litellm import cost_calculator |
| 4 | + |
| 5 | + |
1 | 6 | supported_llm_models = { |
2 | 7 | "anthropic": [ |
3 | 8 | "anthropic/claude-sonnet-4-5", |
|
206 | 211 |
|
207 | 212 | providers_list = list(supported_llm_models.keys()) |
208 | 213 |
|
| 214 | + |
| 215 | +def _get_model_costs(model: str) -> Optional[Tuple[float, float]]: |
| 216 | + """ |
| 217 | + Get the input and output costs per 1M tokens for a model. |
| 218 | +
|
| 219 | + Uses litellm's cost_calculator (same as tracing/inline.py) for consistency. |
| 220 | +
|
| 221 | + Args: |
| 222 | + model: The model name (e.g., "gpt-4o" or "anthropic/claude-3-opus-20240229") |
| 223 | +
|
| 224 | + Returns: |
| 225 | + Tuple of (input_cost, output_cost) per 1M tokens, or None if not found. |
| 226 | + """ |
| 227 | + try: |
| 228 | + costs = cost_calculator.cost_per_token( |
| 229 | + model=model, |
| 230 | + prompt_tokens=1_000_000, |
| 231 | + completion_tokens=1_000_000, |
| 232 | + ) |
| 233 | + if costs: |
| 234 | + input_cost, output_cost = costs |
| 235 | + if input_cost > 0 or output_cost > 0: |
| 236 | + return (input_cost, output_cost) |
| 237 | + except Exception: |
| 238 | + pass |
| 239 | + return None |
| 240 | + |
| 241 | + |
| 242 | +def _build_model_metadata() -> Dict[str, Dict[str, Dict[str, float]]]: |
| 243 | + """ |
| 244 | + Build metadata dictionary with costs for all supported models. |
| 245 | +
|
| 246 | + Returns: |
| 247 | + Nested dict: {provider: {model: {"input": cost, "output": cost}}} |
| 248 | + """ |
| 249 | + metadata: Dict[str, Dict[str, Dict[str, float]]] = {} |
| 250 | + |
| 251 | + for provider, models in supported_llm_models.items(): |
| 252 | + metadata[provider] = {} |
| 253 | + for model in models: |
| 254 | + costs = _get_model_costs(model) |
| 255 | + if costs: |
| 256 | + metadata[provider][model] = { |
| 257 | + "input": costs[0], |
| 258 | + "output": costs[1], |
| 259 | + } |
| 260 | + |
| 261 | + return metadata |
| 262 | + |
| 263 | + |
| 264 | +model_metadata = _build_model_metadata() |
| 265 | + |
209 | 266 | model_to_provider_mapping = { |
210 | 267 | model: provider |
211 | 268 | for provider, models in supported_llm_models.items() |
|
0 commit comments