@@ -16,24 +16,3 @@ export const FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai/inference";
1616 *
1717 * Thanks!
1818 */
19-
20- // Examples
21- //
22- // "meta-llama/Llama-3.3-70B-Instruct": "accounts/fireworks/models/llama-v3p3-70b-instruct",
23- // "meta-llama/Llama-3.2-3B-Instruct": "accounts/fireworks/models/llama-v3p2-3b-instruct",
24- // "meta-llama/Llama-3.1-8B-Instruct": "accounts/fireworks/models/llama-v3p1-8b-instruct",
25- // "mistralai/Mixtral-8x7B-Instruct-v0.1": "accounts/fireworks/models/mixtral-8x7b-instruct",
26- // "deepseek-ai/DeepSeek-R1": "accounts/fireworks/models/deepseek-r1",
27- // "deepseek-ai/DeepSeek-V3": "accounts/fireworks/models/deepseek-v3",
28- // "meta-llama/Llama-3.2-90B-Vision-Instruct": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
29- // "meta-llama/Llama-3.2-11B-Vision-Instruct": "accounts/fireworks/models/llama-v3p2-11b-vision-instruct",
30- // "meta-llama/Meta-Llama-3-70B-Instruct": "accounts/fireworks/models/llama-v3-70b-instruct",
31- // "meta-llama/Meta-Llama-3-8B-Instruct": "accounts/fireworks/models/llama-v3-8b-instruct",
32- // "mistralai/Mistral-Small-24B-Instruct-2501": "accounts/fireworks/models/mistral-small-24b-instruct-2501",
33- // "mistralai/Mixtral-8x22B-Instruct-v0.1": "accounts/fireworks/models/mixtral-8x22b-instruct",
34- // "Qwen/QWQ-32B-Preview": "accounts/fireworks/models/qwen-qwq-32b-preview",
35- // "Qwen/Qwen2.5-72B-Instruct": "accounts/fireworks/models/qwen2p5-72b-instruct",
36- // "Qwen/Qwen2.5-Coder-32B-Instruct": "accounts/fireworks/models/qwen2p5-coder-32b-instruct",
37- // "Qwen/Qwen2-VL-72B-Instruct": "accounts/fireworks/models/qwen2-vl-72b-instruct",
38- // "Gryphe/MythoMax-L2-13b": "accounts/fireworks/models/mythomax-l2-13b",
39- // "microsoft/Phi-3.5-vision-instruct": "accounts/fireworks/models/phi-3-vision-128k-instruct",
0 commit comments