|
1 | 1 | #!/usr/bin/env python3 |
2 | 2 | """Script to display all available LLMs configured in the environment using a rich table.""" |
3 | 3 |
|
| 4 | +import os |
| 5 | +from typing import Any |
| 6 | + |
4 | 7 | from rich.console import Console |
5 | 8 | from rich.table import Table |
6 | 9 | from rich.text import Text |
|
12 | 15 | ) |
13 | 16 |
|
14 | 17 |
|
| 18 | +def _check_api_key_format(api_key: str | None) -> bool: |
| 19 | + """Check if an API key exists and has a reasonable format.""" |
| 20 | + if not api_key: |
| 21 | + return False |
| 22 | + # Basic validation: API keys should be non-empty and reasonably long |
| 23 | + return len(api_key) > 4 |
| 24 | + |
| 25 | + |
| 26 | +def _check_openai_auth(llm_obj: Any) -> str: |
| 27 | + """Check OpenAI authentication status.""" |
| 28 | + api_key = os.getenv("OPENAI_API_KEY") |
| 29 | + if not _check_api_key_format(api_key): |
| 30 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 31 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 32 | + |
| 33 | + |
| 34 | +def _check_google_auth(llm_obj: Any) -> str: |
| 35 | + """Check Google/Gemini authentication status.""" |
| 36 | + api_key = os.getenv("GEMINI_API_KEY") |
| 37 | + if not _check_api_key_format(api_key): |
| 38 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 39 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 40 | + |
| 41 | + |
| 42 | +def _check_litellm_auth(llm_obj: Any) -> str: |
| 43 | + """Check LiteLLM authentication status.""" |
| 44 | + model = os.getenv("LITELLM_MODEL") |
| 45 | + if not model: |
| 46 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 47 | + |
| 48 | + # Try to extract provider from model name |
| 49 | + provider = model.split("/")[0] if "/" in model else model |
| 50 | + |
| 51 | + # Check if we have required auth for the provider |
| 52 | + # Most providers require an API key in environment |
| 53 | + required_keys = [ |
| 54 | + f"{provider.upper()}_API_KEY", |
| 55 | + "LITELLM_API_KEY", |
| 56 | + ] |
| 57 | + |
| 58 | + for key_name in required_keys: |
| 59 | + if _check_api_key_format(os.getenv(key_name)): |
| 60 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 61 | + |
| 62 | + return "[bold yellow]⚠ PARTIAL[/bold yellow]" |
| 63 | + |
| 64 | + |
| 65 | +def _check_litellm_proxy_auth(llm_obj: Any) -> str: |
| 66 | + """Check LiteLLM Proxy authentication status.""" |
| 67 | + api_key = os.getenv("LITELLM_PROXY_API_KEY") |
| 68 | + base_url = os.getenv("LITELLM_PROXY_URL") |
| 69 | + |
| 70 | + if not _check_api_key_format(api_key): |
| 71 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 72 | + if not base_url: |
| 73 | + return "[bold yellow]⚠ PARTIAL[/bold yellow]" |
| 74 | + |
| 75 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 76 | + |
| 77 | + |
| 78 | +def _check_watsonx_auth(llm_obj: Any) -> str: |
| 79 | + """Check WatsonX authentication status.""" |
| 80 | + required_vars = ["WATSONX_APIKEY", "WATSONX_URL", "WATSONX_PROJECTID"] |
| 81 | + missing = [var for var in required_vars if not os.getenv(var)] |
| 82 | + |
| 83 | + if missing: |
| 84 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 85 | + |
| 86 | + api_key = os.getenv("WATSONX_APIKEY") |
| 87 | + if not _check_api_key_format(api_key): |
| 88 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 89 | + |
| 90 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 91 | + |
| 92 | + |
| 93 | +def _get_auth_status(llm_name: str, llm_obj: Any) -> str: |
| 94 | + """Get authentication status for an LLM based on its type.""" |
| 95 | + provider = _get_provider_name(llm_name, llm_obj) |
| 96 | + |
| 97 | + # Check specific providers |
| 98 | + if "gemini" in llm_name.lower(): |
| 99 | + return _check_google_auth(llm_obj) |
| 100 | + elif "openai" in llm_name.lower() and "compatible" not in llm_name.lower(): |
| 101 | + return _check_openai_auth(llm_obj) |
| 102 | + elif "litellm_proxy" in llm_name.lower(): |
| 103 | + return _check_litellm_proxy_auth(llm_obj) |
| 104 | + elif "litellm" in llm_name.lower(): |
| 105 | + return _check_litellm_auth(llm_obj) |
| 106 | + elif "watsonx" in llm_name.lower(): |
| 107 | + return _check_watsonx_auth(llm_obj) |
| 108 | + elif "vllm" in llm_name.lower(): |
| 109 | + # vLLM requires URL |
| 110 | + base_url = os.getenv("VLLM_URL") |
| 111 | + if base_url: |
| 112 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 113 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 114 | + elif "ollama" in llm_name.lower(): |
| 115 | + # Ollama requires model ID |
| 116 | + model_id = os.getenv("OLLAMA_MODEL_ID") |
| 117 | + if model_id: |
| 118 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 119 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 120 | + elif "openai_compatible" in llm_name.lower(): |
| 121 | + # OpenAI compatible requires API key and URL |
| 122 | + api_key = os.getenv("OPENAI_COMPATIBLE_API_KEY") |
| 123 | + base_url = os.getenv("OPENAI_COMPATIBLE_BASE_URL") |
| 124 | + if _check_api_key_format(api_key) and base_url: |
| 125 | + return "[bold green]✓ AUTHENTICATED[/bold green]" |
| 126 | + return "[bold red]✗ NO AUTH[/bold red]" |
| 127 | + |
| 128 | + return "[bold yellow]⚠ UNKNOWN[/bold yellow]" |
| 129 | + |
| 130 | + |
15 | 131 | def _get_provider_name(llm_name: str, llm_obj) -> str: |
16 | 132 | """Extract provider name from LLM name and object.""" |
17 | 133 | # Map based on LLM name patterns |
@@ -112,10 +228,20 @@ def main() -> None: |
112 | 228 | provider = _get_provider_name(name, llm) |
113 | 229 | model = _get_model_info(llm) |
114 | 230 | env_vars = llms_env_vars.get(name, []) |
115 | | - env_vars_str = ", ".join(env_vars) if env_vars else "N/A" |
| 231 | + # Filter to only show URL, model, and API key related variables |
| 232 | + filtered_vars = [ |
| 233 | + var |
| 234 | + for var in env_vars |
| 235 | + if any( |
| 236 | + keyword in var.lower() for keyword in ["url", "model", "api_key", "key"] |
| 237 | + ) |
| 238 | + ] |
| 239 | + env_vars_str = ", ".join(filtered_vars) if filtered_vars else "N/A" |
116 | 240 | is_active = name == active_llm_name |
117 | 241 |
|
118 | | - status = "[bold green]● ACTIVE[/bold green]" if is_active else "" |
| 242 | + auth_status = _get_auth_status(name, llm) |
| 243 | + active_indicator = " ● ACTIVE" if is_active else "" |
| 244 | + status = f"{auth_status}{active_indicator}" |
119 | 245 | table.add_row(names_str, provider, model, env_vars_str, status) |
120 | 246 |
|
121 | 247 | # Add active LLM footer row spanning all columns |
|
0 commit comments