Skip to content

Commit b9ffa98

Browse files
[Feat] Proxy CLI: Create a python method to login using litellm proxy (#14782)
* fix: cli auth with SSO okta * fix: add LITTELM_CLI_SERVICE_ACCOUNT_NAME * fix: get_litellm_cli_user_api_key_auth * use existing_key CLI * fix: use existing key * test auth commands * test_cli_sso_callback_regenerate_vs_create_flow * feat: add CLI Token Utilities * fix: get_stored_api_key * move file * fix: get_valid_models * fix config.yaml * TestCLITokenUtils * TestGetValidModelsWithCLI * fix: tie user id to keys created through CLI * fix: add teams interface to CLI * add /keys/update to the list client commands * fix /sso/cli/poll to return the user_id * fix: working TeamsManagementClient * fix CLI Login command * fixes for auth * Potential fix for code scanning alert no. 3400: Clear-text logging of sensitive information Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * ruff fix --------- Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
1 parent 3e9540f commit b9ffa98

File tree

19 files changed

+1055
-67
lines changed

19 files changed

+1055
-67
lines changed
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Example: Using CLI token with LiteLLM SDK
4+
5+
This example shows how to use the CLI authentication token
6+
in your Python scripts after running `litellm-proxy login`.
7+
"""
8+
9+
from textwrap import indent
10+
import litellm
11+
LITELLM_BASE_URL = "http://localhost:4000/"
12+
13+
14+
def main():
15+
"""Using CLI token with LiteLLM SDK"""
16+
print("🚀 Using CLI Token with LiteLLM SDK")
17+
print("=" * 40)
18+
#litellm._turn_on_debug()
19+
20+
# Get the CLI token
21+
api_key = litellm.get_litellm_gateway_api_key()
22+
23+
if not api_key:
24+
print("❌ No CLI token found. Please run 'litellm-proxy login' first.")
25+
return
26+
27+
print("✅ Found CLI token.")
28+
29+
available_models = litellm.get_valid_models(
30+
check_provider_endpoint=True,
31+
custom_llm_provider="litellm_proxy",
32+
api_key=api_key,
33+
api_base=LITELLM_BASE_URL
34+
)
35+
36+
print("✅ Available models:")
37+
if available_models:
38+
for i, model in enumerate(available_models, 1):
39+
print(f" {i:2d}. {model}")
40+
else:
41+
print(" No models available")
42+
43+
# Use with LiteLLM
44+
try:
45+
response = litellm.completion(
46+
model="litellm_proxy/gemini/gemini-2.5-flash",
47+
messages=[{"role": "user", "content": "Hello from CLI token!"}],
48+
api_key=api_key,
49+
base_url=LITELLM_BASE_URL
50+
)
51+
print(f"✅ LLM Response: {response.model_dump_json(indent=4)}")
52+
except Exception as e:
53+
print(f"❌ Error: {e}")
54+
55+
56+
if __name__ == "__main__":
57+
main()
58+
59+
print("\n💡 Tips:")
60+
print("1. Run 'litellm-proxy login' to authenticate first")
61+
print("2. Replace 'https://your-proxy.com' with your actual proxy URL")
62+
print("3. The token is stored locally at ~/.litellm/token.json")

enterprise/litellm_enterprise/integrations/prometheus.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2328,7 +2328,6 @@ def get_custom_labels_from_tags(tags: List[str]) -> Dict[str, str]:
23282328
"tag_Service_web_app_v1": "false",
23292329
}
23302330
"""
2331-
import re
23322331

23332332
from litellm.router_utils.pattern_match_deployments import PatternMatchRouter
23342333
from litellm.types.integrations.prometheus import _sanitize_prometheus_label_name

enterprise/litellm_enterprise/proxy/management_endpoints/internal_user_endpoints.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
Enterprise internal user management endpoints
33
"""
44

5-
import os
65

76
from fastapi import APIRouter, Depends, HTTPException
87

enterprise/litellm_enterprise/proxy/vector_stores/endpoints.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import copy
1212
from typing import List, Optional
1313

14-
from fastapi import APIRouter, Depends, HTTPException, Request, Response
14+
from fastapi import APIRouter, Depends, HTTPException
1515

1616
import litellm
1717
from litellm._logging import verbose_proxy_logger

litellm/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1343,5 +1343,8 @@ def add_known_models():
13431343
)
13441344
global_disable_no_log_param: bool = False
13451345

1346+
### CLI UTILITIES ###
1347+
from litellm.litellm_core_utils.cli_token_utils import get_litellm_gateway_api_key
1348+
13461349
### PASSTHROUGH ###
13471350
from .passthrough import allm_passthrough_route, llm_passthrough_route
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
"""
2+
CLI Token Utilities
3+
4+
SDK-level utilities for reading CLI authentication tokens.
5+
This module has no dependencies on proxy code and can be safely imported at the SDK level.
6+
"""
7+
8+
import json
9+
import os
10+
from pathlib import Path
11+
from typing import Optional
12+
13+
14+
def get_cli_token_file_path() -> str:
15+
"""Get the path to the CLI token file"""
16+
home_dir = Path.home()
17+
config_dir = home_dir / ".litellm"
18+
return str(config_dir / "token.json")
19+
20+
21+
def load_cli_token() -> Optional[dict]:
22+
"""Load CLI token data from file"""
23+
token_file = get_cli_token_file_path()
24+
if not os.path.exists(token_file):
25+
return None
26+
27+
try:
28+
with open(token_file, 'r') as f:
29+
return json.load(f)
30+
except (json.JSONDecodeError, IOError):
31+
return None
32+
33+
34+
def get_litellm_gateway_api_key() -> Optional[str]:
35+
"""
36+
Get the stored CLI API key for use with LiteLLM SDK.
37+
38+
This function reads the token file created by `litellm-proxy login`
39+
and returns the API key for use in Python scripts.
40+
41+
Returns:
42+
str: The API key if found, None otherwise
43+
44+
Example:
45+
>>> import litellm
46+
>>> api_key = litellm.get_litellm_gateway_api_key()
47+
>>> if api_key:
48+
>>> response = litellm.completion(
49+
>>> model="gpt-3.5-turbo",
50+
>>> messages=[{"role": "user", "content": "Hello"}],
51+
>>> api_key=api_key,
52+
>>> base_url="https://your-proxy.com/v1"
53+
>>> )
54+
"""
55+
token_data = load_cli_token()
56+
if token_data and 'key' in token_data:
57+
return token_data['key']
58+
return None

0 commit comments

Comments
 (0)