Skip to content

Commit 107293a

Browse files
authored
Merge pull request #33 from mnfst/update/add-base-urls
Add model IDs to all providers in data.json
2 parents 53bb659 + 3158a11 commit 107293a

File tree

1 file changed

+52
-52
lines changed

1 file changed

+52
-52
lines changed

‎data.json‎

Lines changed: 52 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"name": "Cohere",
66
"category": "provider_api",
77
"country": "CA",
8-
"flag": "🇨🇦",
8+
"flag": "\ud83c\udde8\ud83c\udde6",
99
"url": "https://dashboard.cohere.com/api-keys",
1010
"baseUrl": "https://api.cohere.com/v2",
1111
"description": "Free \"Trial\" API key, no credit card. 1,000 API calls/month. Non-commercial use only.",
@@ -36,7 +36,7 @@
3636
"rateLimit": "20 RPM"
3737
},
3838
{
39-
"id": "command-r7b",
39+
"id": "command-r7b-12-2024",
4040
"name": "Command R7B",
4141
"context": "128K",
4242
"maxOutput": "4K",
@@ -46,16 +46,16 @@
4646
{
4747
"id": "embed-v4.0",
4848
"name": "Embed 4",
49-
"context": "—",
50-
"maxOutput": "—",
49+
"context": "\u2014",
50+
"maxOutput": "\u2014",
5151
"modality": "Embeddings (Text + Image)",
5252
"rateLimit": "2,000 inputs/min"
5353
},
5454
{
5555
"id": "rerank-v3.5",
5656
"name": "Rerank 3.5",
57-
"context": "—",
58-
"maxOutput": "—",
57+
"context": "\u2014",
58+
"maxOutput": "\u2014",
5959
"modality": "Reranking",
6060
"rateLimit": "10 RPM"
6161
}
@@ -65,7 +65,7 @@
6565
"name": "Google Gemini",
6666
"category": "provider_api",
6767
"country": "US",
68-
"flag": "🇺🇸",
68+
"flag": "\ud83c\uddfa\ud83c\uddf8",
6969
"url": "https://aistudio.google.com/app/apikey",
7070
"baseUrl": "https://generativelanguage.googleapis.com/v1beta",
7171
"description": "Free tier unavailable in EU/UK/Switzerland. Free-tier prompts may be used by Google to improve products.",
@@ -93,7 +93,7 @@
9393
"name": "Mistral AI",
9494
"category": "provider_api",
9595
"country": "FR",
96-
"flag": "🇫🇷",
96+
"flag": "\ud83c\uddeb\ud83c\uddf7",
9797
"url": "https://console.mistral.ai/api-keys",
9898
"baseUrl": "https://api.mistral.ai/v1",
9999
"description": "Free \"Experiment\" plan, no credit card. ~1B tokens/month.",
@@ -116,7 +116,7 @@
116116
"rateLimit": "~1 RPS, 500K TPM"
117117
},
118118
{
119-
"id": "mistral-large-latest",
119+
"id": "mistral-large-2411",
120120
"name": "Mistral Large 3",
121121
"context": "256K",
122122
"maxOutput": "256K",
@@ -132,15 +132,15 @@
132132
"rateLimit": "~1 RPS, 500K TPM"
133133
},
134134
{
135-
"id": "codestral-latest",
135+
"id": "codestral-2501",
136136
"name": "Codestral",
137137
"context": "256K",
138138
"maxOutput": "256K",
139139
"modality": "Code",
140140
"rateLimit": "~1 RPS, 500K TPM"
141141
},
142142
{
143-
"id": "pixtral-large-latest",
143+
"id": "pixtral-large-2411",
144144
"name": "Pixtral Large",
145145
"context": "128K",
146146
"maxOutput": "128K",
@@ -153,7 +153,7 @@
153153
"name": "Z AI (Zhipu AI)",
154154
"category": "provider_api",
155155
"country": "CN",
156-
"flag": "🇨🇳",
156+
"flag": "\ud83c\udde8\ud83c\uddf3",
157157
"url": "https://open.bigmodel.cn/usercenter/apikeys",
158158
"baseUrl": "https://open.bigmodel.cn/api/paas/v4",
159159
"description": "Permanent free models, no credit card required.",
@@ -189,7 +189,7 @@
189189
"name": "Cerebras",
190190
"category": "inference_provider",
191191
"country": "US",
192-
"flag": "🇺🇸",
192+
"flag": "\ud83c\uddfa\ud83c\uddf8",
193193
"url": "https://cloud.cerebras.ai/",
194194
"baseUrl": "https://api.cerebras.ai/v1",
195195
"description": "Free tier, no credit card. Ultra-fast inference (~2,600 tok/s). 1M tokens/day cap.",
@@ -233,7 +233,7 @@
233233
"name": "Cloudflare Workers AI",
234234
"category": "inference_provider",
235235
"country": "US",
236-
"flag": "🇺🇸",
236+
"flag": "\ud83c\uddfa\ud83c\uddf8",
237237
"url": "https://dash.cloudflare.com/profile/api-tokens",
238238
"baseUrl": "https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run",
239239
"description": "10,000 Neurons/day free. 50+ models available on free tier.",
@@ -317,7 +317,7 @@
317317
"name": "GitHub Models",
318318
"category": "inference_provider",
319319
"country": "US",
320-
"flag": "🇺🇸",
320+
"flag": "\ud83c\uddfa\ud83c\uddf8",
321321
"url": "https://github.com/marketplace/models",
322322
"baseUrl": "https://models.inference.ai.azure.com",
323323
"description": "Free prototyping for all GitHub users. 45+ models. Per-request limits (8K in / 4K out).",
@@ -364,23 +364,23 @@
364364
"rateLimit": "10 RPM, 50 RPD"
365365
},
366366
{
367-
"id": "Llama-4-Scout-17B-16E-Instruct",
367+
"id": "Llama-4-Scout-17B-16E",
368368
"name": "Llama-4-Scout-17B-16E",
369369
"context": "512K",
370370
"maxOutput": "~4K",
371371
"modality": "Text + Vision",
372372
"rateLimit": "15 RPM, 150 RPD"
373373
},
374374
{
375-
"id": "Llama-4-Maverick-17B-128E-Instruct",
375+
"id": "Llama-4-Maverick-17B-128E",
376376
"name": "Llama-4-Maverick-17B-128E",
377377
"context": "256K",
378378
"maxOutput": "~4K",
379379
"modality": "Text + Vision",
380380
"rateLimit": "10 RPM, 50 RPD"
381381
},
382382
{
383-
"id": "Meta-Llama-3.3-70B-Instruct",
383+
"id": "Meta-Llama-3.3-70B",
384384
"name": "Meta-Llama-3.3-70B",
385385
"context": "131K",
386386
"maxOutput": "~4K",
@@ -396,7 +396,7 @@
396396
"rateLimit": "15 RPM, 150 RPD"
397397
},
398398
{
399-
"id": "Mistral-Small-3.1-24B-Instruct",
399+
"id": "Mistral-Small-3.1",
400400
"name": "Mistral-Small-3.1",
401401
"context": "128K",
402402
"maxOutput": "~4K",
@@ -417,7 +417,7 @@
417417
"name": "Groq",
418418
"category": "inference_provider",
419419
"country": "US",
420-
"flag": "🇺🇸",
420+
"flag": "\ud83c\uddfa\ud83c\uddf8",
421421
"url": "https://console.groq.com/keys",
422422
"baseUrl": "https://api.groq.com/openai/v1",
423423
"description": "Free tier, no credit card. Ultra-fast LPU inference.",
@@ -490,17 +490,17 @@
490490
{
491491
"id": "whisper-large-v3",
492492
"name": "whisper-large-v3",
493-
"context": "—",
494-
"maxOutput": "—",
495-
"modality": "Audio → Text",
493+
"context": "\u2014",
494+
"maxOutput": "\u2014",
495+
"modality": "Audio \u2192 Text",
496496
"rateLimit": "20 RPM, 2,000 RPD"
497497
},
498498
{
499499
"id": "whisper-large-v3-turbo",
500500
"name": "whisper-large-v3-turbo",
501-
"context": "—",
502-
"maxOutput": "—",
503-
"modality": "Audio → Text",
501+
"context": "\u2014",
502+
"maxOutput": "\u2014",
503+
"modality": "Audio \u2192 Text",
504504
"rateLimit": "20 RPM, 2,000 RPD"
505505
}
506506
]
@@ -509,7 +509,7 @@
509509
"name": "Hugging Face",
510510
"category": "inference_provider",
511511
"country": "US",
512-
"flag": "🇺🇸",
512+
"flag": "\ud83c\uddfa\ud83c\uddf8",
513513
"url": "https://huggingface.co/settings/tokens",
514514
"baseUrl": "https://api-inference.huggingface.co/models",
515515
"description": "Free Serverless Inference API + ~$0.10/month free credits. Thousands of models.",
@@ -569,7 +569,7 @@
569569
"name": "Kilo Code",
570570
"category": "inference_provider",
571571
"country": "US",
572-
"flag": "🇺🇸",
572+
"flag": "\ud83c\uddfa\ud83c\uddf8",
573573
"url": "https://kilo.ai",
574574
"baseUrl": "https://api.kilo.ai/api/gateway",
575575
"description": "Free models with no credit card required. `kilo-auto/free` auto-router routes to minimax/minimax-m2.5:free (80%) and stepfun/step-3.5-flash:free (20%).",
@@ -578,16 +578,16 @@
578578
{
579579
"id": "bytedance-seed/dola-seed-2.0-pro:free",
580580
"name": "bytedance-seed/dola-seed-2.0-pro:free",
581-
"context": "—",
582-
"maxOutput": "—",
581+
"context": "\u2014",
582+
"maxOutput": "\u2014",
583583
"modality": "Text",
584584
"rateLimit": "~200 req/hr"
585585
},
586586
{
587587
"id": "x-ai/grok-code-fast-1:optimized:free",
588588
"name": "x-ai/grok-code-fast-1:optimized:free",
589-
"context": "—",
590-
"maxOutput": "—",
589+
"context": "\u2014",
590+
"maxOutput": "\u2014",
591591
"modality": "Text (code)",
592592
"rateLimit": "~200 req/hr"
593593
},
@@ -602,8 +602,8 @@
602602
{
603603
"id": "arcee-ai/trinity-large-thinking:free",
604604
"name": "arcee-ai/trinity-large-thinking:free",
605-
"context": "—",
606-
"maxOutput": "—",
605+
"context": "\u2014",
606+
"maxOutput": "\u2014",
607607
"modality": "Text (reasoning)",
608608
"rateLimit": "~200 req/hr"
609609
},
@@ -621,7 +621,7 @@
621621
"name": "LLM7.io",
622622
"category": "inference_provider",
623623
"country": "GB",
624-
"flag": "🇬🇧",
624+
"flag": "\ud83c\uddec\ud83c\udde7",
625625
"url": "https://token.llm7.io",
626626
"baseUrl": "https://api.llm7.io/v1",
627627
"description": "Zero-friction API gateway. No registration needed for basic access. 30+ models.",
@@ -630,48 +630,48 @@
630630
{
631631
"id": "deepseek-r1-0528",
632632
"name": "deepseek-r1-0528",
633-
"context": "—",
634-
"maxOutput": "—",
633+
"context": "\u2014",
634+
"maxOutput": "\u2014",
635635
"modality": "Text (reasoning)",
636636
"rateLimit": "30 RPM (120 with token)"
637637
},
638638
{
639639
"id": "deepseek-v3-0324",
640640
"name": "deepseek-v3-0324",
641-
"context": "—",
642-
"maxOutput": "—",
641+
"context": "\u2014",
642+
"maxOutput": "\u2014",
643643
"modality": "Text",
644644
"rateLimit": "30 RPM (120 with token)"
645645
},
646646
{
647647
"id": "gemini-2.5-flash-lite",
648648
"name": "gemini-2.5-flash-lite",
649-
"context": "—",
650-
"maxOutput": "—",
649+
"context": "\u2014",
650+
"maxOutput": "\u2014",
651651
"modality": "Text + Vision",
652652
"rateLimit": "30 RPM (120 with token)"
653653
},
654654
{
655655
"id": "gpt-4o-mini",
656656
"name": "gpt-4o-mini",
657-
"context": "—",
658-
"maxOutput": "—",
657+
"context": "\u2014",
658+
"maxOutput": "\u2014",
659659
"modality": "Text + Vision",
660660
"rateLimit": "30 RPM (120 with token)"
661661
},
662662
{
663663
"id": "mistral-small-3.1-24b",
664664
"name": "mistral-small-3.1-24b",
665665
"context": "32K",
666-
"maxOutput": "—",
666+
"maxOutput": "\u2014",
667667
"modality": "Text",
668668
"rateLimit": "30 RPM (120 with token)"
669669
},
670670
{
671671
"id": "qwen2.5-coder-32b",
672672
"name": "qwen2.5-coder-32b",
673-
"context": "—",
674-
"maxOutput": "—",
673+
"context": "\u2014",
674+
"maxOutput": "\u2014",
675675
"modality": "Text (code)",
676676
"rateLimit": "30 RPM (120 with token)"
677677
},
@@ -689,7 +689,7 @@
689689
"name": "NVIDIA NIM",
690690
"category": "inference_provider",
691691
"country": "US",
692-
"flag": "🇺🇸",
692+
"flag": "\ud83c\uddfa\ud83c\uddf8",
693693
"url": "https://build.nvidia.com/explore/discover",
694694
"baseUrl": "https://integrate.api.nvidia.com/v1",
695695
"description": "Free with NVIDIA Developer Program membership. 100+ models. No daily token cap.",
@@ -789,7 +789,7 @@
789789
"name": "Ollama Cloud",
790790
"category": "inference_provider",
791791
"country": "US",
792-
"flag": "🇺🇸",
792+
"flag": "\ud83c\uddfa\ud83c\uddf8",
793793
"url": "https://ollama.com/settings/keys",
794794
"baseUrl": "https://api.ollama.com",
795795
"description": "Free tier with qualitative usage limits. 400+ models from Ollama library. Not OpenAI SDK-compatible; uses [Ollama API](https://docs.ollama.com/cloud).",
@@ -849,7 +849,7 @@
849849
"name": "OpenRouter",
850850
"category": "inference_provider",
851851
"country": "US",
852-
"flag": "🇺🇸",
852+
"flag": "\ud83c\uddfa\ud83c\uddf8",
853853
"url": "https://openrouter.ai/keys",
854854
"baseUrl": "https://openrouter.ai/api/v1",
855855
"description": "35+ free models (marked with `:free` suffix). OpenAI SDK-compatible.",
@@ -965,7 +965,7 @@
965965
"name": "SiliconFlow",
966966
"category": "inference_provider",
967967
"country": "CN",
968-
"flag": "🇨🇳",
968+
"flag": "\ud83c\udde8\ud83c\uddf3",
969969
"url": "https://cloud.siliconflow.cn/account/ak",
970970
"baseUrl": "https://api.siliconflow.cn/v1",
971971
"description": "Free tier with 14 CNY signup credits. Permanently free models available.",
@@ -1014,7 +1014,7 @@
10141014
{
10151015
"id": "deepseek-ai/DeepSeek-OCR",
10161016
"name": "deepseek-ai/DeepSeek-OCR",
1017-
"context": "—",
1017+
"context": "\u2014",
10181018
"maxOutput": "8K",
10191019
"modality": "Vision (OCR)",
10201020
"rateLimit": "1,000 RPM, 50K TPM"
@@ -1049,7 +1049,7 @@
10491049
},
10501050
{
10511051
"id": 5,
1052-
"text": "Kilo Code free model list may change over time. nvidia/nemotron-3-super-120b-a12b:free is for trial use only — prompts are logged by NVIDIA. Auto-router `kilo-auto/free` routes to minimax/minimax-m2.5:free (80%) and stepfun/step-3.5-flash:free (20%)."
1052+
"text": "Kilo Code free model list may change over time. nvidia/nemotron-3-super-120b-a12b:free is for trial use only \u2014 prompts are logged by NVIDIA. Auto-router `kilo-auto/free` routes to minimax/minimax-m2.5:free (80%) and stepfun/step-3.5-flash:free (20%)."
10531053
}
10541054
],
10551055
"glossary": [

0 commit comments

Comments
 (0)