Skip to content

Commit c9197b4

Browse files
committed
1 parent 139a741 commit c9197b4

File tree

2 files changed

+209
-0
lines changed

2 files changed

+209
-0
lines changed

python/README.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,26 @@ These Python scripts can be run directly from their URLs using `uv run`.
44

55
Their source code is [available on GitHub](https://github.com/simonw/tools/tree/main/python).
66

7+
## list_llm_model_ids.py
8+
9+
List available model IDs from OpenAI, Anthropic, and Gemini.
10+
11+
You must have [LLM](https://llm.datasette.io/) installed and have configured the API keys for one or more of `anthorpic`, `openai`, or `gemini` using `llm keys set`.
12+
13+
```bash
14+
uv run https://tools.simonwillison.net/python/list_llm_model_ids.py
15+
```
16+
Output starts:
17+
```
18+
===================
19+
openai (136 models)
20+
===================
21+
babbage-002
22+
chatgpt-4o-latest
23+
chatgpt-image-latest
24+
...
25+
```
26+
727
## git_read_only_http.py
828

929
Serve a local Git repository over HTTP in read-only mode.

python/list_llm_model_ids.py

Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
#!/usr/bin/env -S uv run
2+
# /// script
3+
# requires-python = ">=3.10"
4+
# dependencies = ["httpx"]
5+
# ///
6+
"""
7+
List model IDs from OpenAI, Anthropic, and Gemini using httpx.
8+
9+
API keys are obtained via:
10+
llm keys get openai
11+
llm keys get anthropic
12+
llm keys get gemini
13+
14+
Providers whose key command fails are skipped.
15+
"""
16+
17+
from __future__ import annotations
18+
19+
import json
20+
import subprocess
21+
import sys
22+
from dataclasses import dataclass
23+
from typing import Iterable, Optional
24+
25+
import httpx
26+
27+
28+
@dataclass(frozen=True)
29+
class ProviderResult:
30+
provider: str
31+
models: list[str]
32+
33+
34+
def _get_key_via_llm(provider: str) -> Optional[str]:
35+
"""
36+
Run: llm keys get <provider>
37+
Return the key string (stripped) or None if command fails.
38+
"""
39+
try:
40+
proc = subprocess.run(
41+
["llm", "keys", "get", provider],
42+
check=True,
43+
capture_output=True,
44+
text=True,
45+
)
46+
except (subprocess.CalledProcessError, FileNotFoundError):
47+
return None
48+
49+
key = (proc.stdout or "").strip()
50+
return key or None
51+
52+
53+
def _print_section(title: str) -> None:
54+
print("\n" + "=" * len(title))
55+
print(title)
56+
print("=" * len(title))
57+
58+
59+
def list_openai_models(client: httpx.Client, api_key: str) -> ProviderResult:
60+
url = "https://api.openai.com/v1/models"
61+
headers = {"Authorization": f"Bearer {api_key}"}
62+
63+
r = client.get(url, headers=headers)
64+
r.raise_for_status()
65+
data = r.json()
66+
67+
models = []
68+
for m in data.get("data", []) or []:
69+
mid = m.get("id")
70+
if isinstance(mid, str):
71+
models.append(mid)
72+
73+
models = sorted(set(models))
74+
return ProviderResult("openai", models)
75+
76+
77+
def list_anthropic_models(client: httpx.Client, api_key: str) -> ProviderResult:
78+
url = "https://api.anthropic.com/v1/models"
79+
# Anthropic requires the anthropic-version header on API requests.
80+
headers = {
81+
"x-api-key": api_key,
82+
"anthropic-version": "2023-06-01",
83+
}
84+
85+
models: list[str] = []
86+
after_id: Optional[str] = None
87+
88+
while True:
89+
params = {}
90+
if after_id:
91+
params["after_id"] = after_id
92+
93+
r = client.get(url, headers=headers, params=params)
94+
r.raise_for_status()
95+
payload = r.json()
96+
97+
for m in payload.get("data", []) or []:
98+
mid = m.get("id")
99+
if isinstance(mid, str):
100+
models.append(mid)
101+
102+
has_more = bool(payload.get("has_more"))
103+
after_id = payload.get("last_id") if has_more else None
104+
if not has_more or not isinstance(after_id, str) or not after_id:
105+
break
106+
107+
models = sorted(set(models))
108+
return ProviderResult("anthropic", models)
109+
110+
111+
def list_gemini_models(client: httpx.Client, api_key: str) -> ProviderResult:
112+
url = "https://generativelanguage.googleapis.com/v1beta/models"
113+
114+
models: list[str] = []
115+
page_token: Optional[str] = None
116+
117+
while True:
118+
params = {"key": api_key}
119+
if page_token:
120+
params["pageToken"] = page_token
121+
122+
r = client.get(url, params=params)
123+
r.raise_for_status()
124+
payload = r.json()
125+
126+
for m in payload.get("models", []) or []:
127+
# Gemini uses "name" like "models/gemini-2.0-flash"
128+
name = m.get("name")
129+
if isinstance(name, str):
130+
models.append(name)
131+
132+
page_token = payload.get("nextPageToken")
133+
if not isinstance(page_token, str) or not page_token:
134+
break
135+
136+
models = sorted(set(models))
137+
return ProviderResult("gemini", models)
138+
139+
140+
def main(argv: list[str]) -> int:
141+
# Optional: --json to emit machine-readable output.
142+
as_json = "--json" in argv
143+
144+
# Try to get keys. Skip providers that error.
145+
keys = {
146+
"openai": _get_key_via_llm("openai"),
147+
"anthropic": _get_key_via_llm("anthropic"),
148+
"gemini": _get_key_via_llm("gemini"),
149+
}
150+
151+
available = {k: v for k, v in keys.items() if v}
152+
if not available:
153+
print("No API keys found via `llm keys get {openai,anthropic,gemini}`. Nothing to do.", file=sys.stderr)
154+
return 2
155+
156+
results: list[ProviderResult] = []
157+
158+
with httpx.Client(timeout=httpx.Timeout(30.0)) as client:
159+
for provider, key in available.items():
160+
try:
161+
if provider == "openai":
162+
results.append(list_openai_models(client, key))
163+
elif provider == "anthropic":
164+
results.append(list_anthropic_models(client, key))
165+
elif provider == "gemini":
166+
results.append(list_gemini_models(client, key))
167+
except httpx.HTTPError as e:
168+
# Don't kill the whole run if one provider fails.
169+
print(f"[{provider}] HTTP error: {e}", file=sys.stderr)
170+
except (ValueError, json.JSONDecodeError) as e:
171+
print(f"[{provider}] JSON parse error: {e}", file=sys.stderr)
172+
173+
if as_json:
174+
out = {r.provider: r.models for r in results}
175+
print(json.dumps(out, indent=2, sort_keys=True))
176+
return 0
177+
178+
# Pretty output
179+
for r in results:
180+
_print_section(f"{r.provider} ({len(r.models)} models)")
181+
for mid in r.models:
182+
print(mid)
183+
184+
return 0
185+
186+
187+
if __name__ == "__main__":
188+
raise SystemExit(main(sys.argv[1:]))
189+

0 commit comments

Comments
 (0)