|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +Example: Using CLI token with LiteLLM SDK |
| 4 | +
|
| 5 | +This example shows how to use the CLI authentication token |
| 6 | +in your Python scripts after running `litellm-proxy login`. |
| 7 | +""" |
| 8 | + |
| 9 | +from textwrap import indent |
| 10 | +import litellm |
| 11 | +LITELLM_BASE_URL = "http://localhost:4000/" |
| 12 | + |
| 13 | + |
| 14 | +def main(): |
| 15 | + """Using CLI token with LiteLLM SDK""" |
| 16 | + print("🚀 Using CLI Token with LiteLLM SDK") |
| 17 | + print("=" * 40) |
| 18 | + #litellm._turn_on_debug() |
| 19 | + |
| 20 | + # Get the CLI token |
| 21 | + api_key = litellm.get_litellm_gateway_api_key() |
| 22 | + |
| 23 | + if not api_key: |
| 24 | + print("❌ No CLI token found. Please run 'litellm-proxy login' first.") |
| 25 | + return |
| 26 | + |
| 27 | + print("✅ Found CLI token.") |
| 28 | + |
| 29 | + available_models = litellm.get_valid_models( |
| 30 | + check_provider_endpoint=True, |
| 31 | + custom_llm_provider="litellm_proxy", |
| 32 | + api_key=api_key, |
| 33 | + api_base=LITELLM_BASE_URL |
| 34 | + ) |
| 35 | + |
| 36 | + print("✅ Available models:") |
| 37 | + if available_models: |
| 38 | + for i, model in enumerate(available_models, 1): |
| 39 | + print(f" {i:2d}. {model}") |
| 40 | + else: |
| 41 | + print(" No models available") |
| 42 | + |
| 43 | + # Use with LiteLLM |
| 44 | + try: |
| 45 | + response = litellm.completion( |
| 46 | + model="litellm_proxy/gemini/gemini-2.5-flash", |
| 47 | + messages=[{"role": "user", "content": "Hello from CLI token!"}], |
| 48 | + api_key=api_key, |
| 49 | + base_url=LITELLM_BASE_URL |
| 50 | + ) |
| 51 | + print(f"✅ LLM Response: {response.model_dump_json(indent=4)}") |
| 52 | + except Exception as e: |
| 53 | + print(f"❌ Error: {e}") |
| 54 | + |
| 55 | + |
| 56 | +if __name__ == "__main__": |
| 57 | + main() |
| 58 | + |
| 59 | + print("\n💡 Tips:") |
| 60 | + print("1. Run 'litellm-proxy login' to authenticate first") |
| 61 | + print("2. Replace 'https://your-proxy.com' with your actual proxy URL") |
| 62 | + print("3. The token is stored locally at ~/.litellm/token.json") |
0 commit comments