-
Notifications
You must be signed in to change notification settings - Fork 45
Expand file tree
/
Copy pathusage_pricing.toml.example
More file actions
94 lines (85 loc) · 4.49 KB
/
usage_pricing.toml.example
File metadata and controls
94 lines (85 loc) · 4.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# OpenCrabs Usage Pricing Table — usage_pricing.toml.example
#
# This file is copied to ~/.opencrabs/usage_pricing.toml on first run.
# Edit that file live — changes take effect immediately on next /usage open.
# No restart needed.
#
# Rules:
# - `prefix` is matched as a case-insensitive substring of the model name
# - First match within each provider wins — put specific prefixes before general ones
# - Costs are per 1 million tokens (USD)
# - Add any provider/model — if it's not here, cost shows as $0.00
[providers.anthropic]
entries = [
{ prefix = "claude-opus-4", input_per_m = 5.0, output_per_m = 25.0 },
{ prefix = "claude-3-opus", input_per_m = 15.0, output_per_m = 75.0 },
{ prefix = "claude-sonnet-4", input_per_m = 3.0, output_per_m = 15.0 },
{ prefix = "claude-3-7-sonnet", input_per_m = 3.0, output_per_m = 15.0 },
{ prefix = "claude-3-5-sonnet", input_per_m = 3.0, output_per_m = 15.0 },
{ prefix = "claude-3-sonnet", input_per_m = 3.0, output_per_m = 15.0 },
{ prefix = "claude-haiku-4", input_per_m = 1.0, output_per_m = 5.0 },
{ prefix = "claude-3-5-haiku", input_per_m = 0.80, output_per_m = 4.0 },
{ prefix = "claude-3-haiku", input_per_m = 0.25, output_per_m = 1.25 },
]
[providers.openai]
entries = [
{ prefix = "gpt-4o-mini", input_per_m = 0.15, output_per_m = 0.60 },
{ prefix = "gpt-4o", input_per_m = 2.50, output_per_m = 10.0 },
{ prefix = "gpt-4-turbo", input_per_m = 10.0, output_per_m = 30.0 },
{ prefix = "gpt-4-32k", input_per_m = 60.0, output_per_m = 120.0 },
{ prefix = "gpt-4", input_per_m = 30.0, output_per_m = 60.0 },
{ prefix = "gpt-3.5-turbo-16k", input_per_m = 3.0, output_per_m = 4.0 },
{ prefix = "gpt-3.5-turbo", input_per_m = 0.50, output_per_m = 1.50 },
{ prefix = "o3-mini", input_per_m = 1.10, output_per_m = 4.40 },
{ prefix = "o3", input_per_m = 10.0, output_per_m = 40.0 },
{ prefix = "o1-mini", input_per_m = 1.10, output_per_m = 4.40 },
{ prefix = "o1", input_per_m = 15.0, output_per_m = 60.0 },
]
[providers.minimax]
entries = [
{ prefix = "minimax-m2.5-high", input_per_m = 0.60, output_per_m = 2.40 },
{ prefix = "minimax-m2.5", input_per_m = 0.30, output_per_m = 1.20 },
{ prefix = "minimax-m2.1", input_per_m = 0.30, output_per_m = 1.20 },
{ prefix = "minimax-text-01", input_per_m = 0.20, output_per_m = 1.10 },
{ prefix = "minimax", input_per_m = 0.30, output_per_m = 1.20 },
]
[providers.google]
entries = [
{ prefix = "gemini-2.0-flash", input_per_m = 0.10, output_per_m = 0.40 },
{ prefix = "gemini-1.5-pro", input_per_m = 1.25, output_per_m = 5.0 },
{ prefix = "gemini-1.5-flash", input_per_m = 0.075, output_per_m = 0.30 },
]
[providers.deepseek]
entries = [
{ prefix = "deepseek-r1", input_per_m = 0.55, output_per_m = 2.19 },
{ prefix = "deepseek-v3", input_per_m = 0.27, output_per_m = 1.10 },
{ prefix = "deepseek", input_per_m = 0.27, output_per_m = 1.10 },
]
[providers.meta]
entries = [
{ prefix = "llama-3.3-70b", input_per_m = 0.59, output_per_m = 0.79 },
{ prefix = "llama-3.1-405b", input_per_m = 2.70, output_per_m = 2.70 },
{ prefix = "llama-3.1-70b", input_per_m = 0.52, output_per_m = 0.75 },
{ prefix = "llama-3.1-8b", input_per_m = 0.07, output_per_m = 0.07 },
]
[providers.xai]
entries = [
{ prefix = "grok-3-mini", input_per_m = 0.30, output_per_m = 0.50 },
{ prefix = "grok-3", input_per_m = 3.0, output_per_m = 15.0 },
{ prefix = "grok-2", input_per_m = 2.0, output_per_m = 10.0 },
{ prefix = "grok", input_per_m = 5.0, output_per_m = 15.0 },
]
[providers.moonshot]
entries = [
{ prefix = "kimi-k2.5", input_per_m = 0.60, output_per_m = 3.0 },
{ prefix = "kimi-k2-turbo", input_per_m = 1.15, output_per_m = 8.0 },
{ prefix = "kimi-k2", input_per_m = 0.60, output_per_m = 2.50 },
{ prefix = "kimi", input_per_m = 0.60, output_per_m = 2.50 },
]
[providers.mistral]
entries = [
{ prefix = "mistral-large", input_per_m = 2.0, output_per_m = 6.0 },
{ prefix = "mistral-small", input_per_m = 0.10, output_per_m = 0.30 },
{ prefix = "codestral", input_per_m = 0.20, output_per_m = 0.60 },
{ prefix = "mistral", input_per_m = 0.25, output_per_m = 0.25 },
]