|
| 1 | +import type { CodingPlanProvider } from '@/lib/providers/coding-plans/types'; |
| 2 | +import { isReasoningExplicitlyDisabled } from '@/lib/providers/openrouter/request-helpers'; |
| 3 | + |
| 4 | +export default [ |
| 5 | + { |
| 6 | + id: 'byteplus-coding', |
| 7 | + name: 'BytePlus Coding Plan', |
| 8 | + base_url: 'https://ark.ap-southeast.bytepluses.com/api/coding/v3', |
| 9 | + ai_sdk_provider: 'openai-compatible', |
| 10 | + transformRequest(context) { |
| 11 | + context.request.body.thinking = { |
| 12 | + type: isReasoningExplicitlyDisabled(context.request) ? 'disabled' : 'enabled', |
| 13 | + }; |
| 14 | + }, |
| 15 | + models: [ |
| 16 | + { |
| 17 | + id: 'bytedance-seed-code', |
| 18 | + name: 'Seed-Code', |
| 19 | + description: |
| 20 | + "ByteDance's latest code model has been deeply optimized for agentic programming tasks.", |
| 21 | + flags: ['recommended', 'vision'], |
| 22 | + context_length: 262144, |
| 23 | + max_completion_tokens: 32768, |
| 24 | + }, |
| 25 | + { |
| 26 | + id: 'kimi-k2.5', |
| 27 | + name: 'Kimi-K2.5', |
| 28 | + description: |
| 29 | + 'Open-source SoTA native multimodal model with text-only input (for now), stronger code/UI generation.', |
| 30 | + flags: ['recommended'], |
| 31 | + context_length: 262144, |
| 32 | + max_completion_tokens: 32768, |
| 33 | + }, |
| 34 | + { |
| 35 | + id: 'glm-4.7', |
| 36 | + name: 'GLM-4.7', |
| 37 | + description: |
| 38 | + "Z.AI's latest flagship model, enhanced programming capabilities and more stable multi-step reasoning/execution.", |
| 39 | + flags: ['recommended'], |
| 40 | + context_length: 204800, |
| 41 | + max_completion_tokens: 131072, |
| 42 | + }, |
| 43 | + { |
| 44 | + id: 'deepseek-v3.2', |
| 45 | + name: 'DeepSeek-V3.2', |
| 46 | + description: |
| 47 | + 'Designed to harmonize high computational efficiency with strong reasoning and agentic tool-use performance.', |
| 48 | + flags: [], |
| 49 | + context_length: 131072, |
| 50 | + max_completion_tokens: 32768, |
| 51 | + }, |
| 52 | + { |
| 53 | + id: 'gpt-oss-120b', |
| 54 | + name: 'GPT-OSS-120B', |
| 55 | + description: |
| 56 | + "OpenAI's open-weight model, 117B parameters with 5.1B active parameters for production, general purpose, high reasoning use cases.", |
| 57 | + flags: [], |
| 58 | + context_length: 131072, |
| 59 | + max_completion_tokens: 65536, |
| 60 | + }, |
| 61 | + { |
| 62 | + id: 'dola-seed-2.0-pro', |
| 63 | + name: 'Dola-Seed-2.0-Pro', |
| 64 | + description: |
| 65 | + 'Focused on long-chain reasoning and stability in complex task execution, designed for complex real-world business scenarios.', |
| 66 | + flags: ['vision'], |
| 67 | + context_length: 262144, |
| 68 | + max_completion_tokens: 131072, |
| 69 | + }, |
| 70 | + { |
| 71 | + id: 'dola-seed-2.0-lite', |
| 72 | + name: 'Dola-Seed-2.0-Lite', |
| 73 | + description: |
| 74 | + 'Balances generation quality and response speed, making it a strong general-purpose production model.', |
| 75 | + flags: ['vision'], |
| 76 | + context_length: 262144, |
| 77 | + max_completion_tokens: 131072, |
| 78 | + }, |
| 79 | + ], |
| 80 | + }, |
| 81 | + { |
| 82 | + id: 'zai-coding', |
| 83 | + name: 'Z.ai Coding Plan', |
| 84 | + base_url: 'https://api.z.ai/api/coding/paas/v4', |
| 85 | + ai_sdk_provider: 'openai-compatible', |
| 86 | + transformRequest(context) { |
| 87 | + context.request.body.thinking = { |
| 88 | + type: isReasoningExplicitlyDisabled(context.request) ? 'disabled' : 'enabled', |
| 89 | + }; |
| 90 | + }, |
| 91 | + models: [ |
| 92 | + { |
| 93 | + id: 'glm-5-turbo', |
| 94 | + name: 'GLM-5 Turbo', |
| 95 | + description: |
| 96 | + 'GLM-5 Turbo is a new model from Z.ai designed for fast inference and strong performance in agent-driven environments such as OpenClaw scenarios. It is deeply optimized for real-world agent workflows involving long execution chains, with improved complex instruction decomposition, tool use, scheduled and persistent execution, and overall stability across extended tasks.', |
| 97 | + flags: ['recommended'], |
| 98 | + context_length: 202752, |
| 99 | + max_completion_tokens: 131072, |
| 100 | + }, |
| 101 | + { |
| 102 | + id: 'glm-5', |
| 103 | + name: 'GLM-5', |
| 104 | + description: |
| 105 | + "GLM-5 is Z.ai's flagship open-source foundation model engineered for complex systems design and long-horizon agent workflows. Built for expert developers, it delivers production-grade performance on large-scale programming tasks, rivaling leading closed-source models. With advanced agentic planning, deep backend reasoning, and iterative self-correction, GLM-5 moves beyond code generation to full-system construction and autonomous execution.", |
| 106 | + flags: ['recommended'], |
| 107 | + context_length: 204800, |
| 108 | + max_completion_tokens: 131072, |
| 109 | + }, |
| 110 | + { |
| 111 | + id: 'glm-4.7', |
| 112 | + name: 'GLM-4.7', |
| 113 | + description: |
| 114 | + "GLM-4.7 is Z.ai's latest flagship model, featuring upgrades in two key areas: enhanced programming capabilities and more stable multi-step reasoning/execution. It demonstrates significant improvements in executing complex agent tasks while delivering more natural conversational experiences and superior front-end aesthetics.", |
| 115 | + flags: ['recommended'], |
| 116 | + context_length: 204800, |
| 117 | + max_completion_tokens: 131072, |
| 118 | + }, |
| 119 | + { |
| 120 | + id: 'glm-4.7-flash', |
| 121 | + name: 'GLM-4.7-Flash', |
| 122 | + description: |
| 123 | + 'As a 30B-class SOTA model, GLM-4.7-Flash offers a new option that balances performance and efficiency. It is further optimized for agentic coding use cases, strengthening coding capabilities, long-horizon task planning, and tool collaboration, and has achieved leading performance among open-source models of the same size on several current public benchmark leaderboards.', |
| 124 | + flags: [], |
| 125 | + context_length: 200000, |
| 126 | + max_completion_tokens: 131072, |
| 127 | + }, |
| 128 | + { |
| 129 | + id: 'glm-4.7-flashx', |
| 130 | + name: 'GLM-4.7-FlashX', |
| 131 | + description: |
| 132 | + 'GLM-4.7-FlashX is an enhanced variant of GLM-4.7-Flash, offering higher throughput and improved performance for agentic coding workflows. It combines the compact 30B-class efficiency of the Flash series with additional capacity for complex instruction following and multi-step tool use.', |
| 133 | + flags: [], |
| 134 | + context_length: 200000, |
| 135 | + max_completion_tokens: 131072, |
| 136 | + }, |
| 137 | + { |
| 138 | + id: 'glm-4.6', |
| 139 | + name: 'GLM-4.6', |
| 140 | + description: |
| 141 | + 'GLM-4.6 brings several key improvements over GLM-4.5: an expanded context window from 128K to 200K tokens for more complex agentic tasks; superior coding performance on code benchmarks and better real-world performance in agentic coding applications; advanced reasoning with tool use support during inference; stronger capability in tool-use and search-based agents; and refined writing that aligns more naturally with human preferences in style and readability.', |
| 142 | + flags: [], |
| 143 | + context_length: 204800, |
| 144 | + max_completion_tokens: 131072, |
| 145 | + }, |
| 146 | + { |
| 147 | + id: 'glm-4.6v', |
| 148 | + name: 'GLM-4.6V', |
| 149 | + description: |
| 150 | + 'GLM-4.6V is a large multimodal model designed for high-fidelity visual understanding and long-context reasoning across images, documents, and mixed media. It supports up to 128K tokens, processes complex page layouts and charts directly as visual inputs, and integrates native multimodal function calling to connect perception with downstream tool execution. The model also enables interleaved image-text generation and UI reconstruction workflows, including screenshot-to-HTML synthesis and iterative visual editing.', |
| 151 | + flags: ['vision'], |
| 152 | + context_length: 128000, |
| 153 | + max_completion_tokens: 32768, |
| 154 | + }, |
| 155 | + { |
| 156 | + id: 'glm-4.5', |
| 157 | + name: 'GLM-4.5', |
| 158 | + description: |
| 159 | + 'GLM-4.5 is Z.ai\'s flagship foundation model purpose-built for agent-based applications. It leverages a Mixture-of-Experts (MoE) architecture and supports a context length of up to 128K tokens. GLM-4.5 delivers significantly enhanced capabilities in reasoning, code generation, and agent alignment, with a hybrid inference mode offering a "thinking mode" for complex reasoning and tool use and a "non-thinking mode" optimized for instant responses.', |
| 160 | + flags: [], |
| 161 | + context_length: 131072, |
| 162 | + max_completion_tokens: 98304, |
| 163 | + }, |
| 164 | + { |
| 165 | + id: 'glm-4.5-air', |
| 166 | + name: 'GLM-4.5-Air', |
| 167 | + description: |
| 168 | + 'GLM-4.5-Air is the lightweight variant of Z.ai\'s latest flagship model family, purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter size. GLM-4.5-Air supports hybrid inference modes, offering a "thinking mode" for advanced reasoning and tool use and a "non-thinking mode" for real-time interaction.', |
| 169 | + flags: [], |
| 170 | + context_length: 131072, |
| 171 | + max_completion_tokens: 98304, |
| 172 | + }, |
| 173 | + { |
| 174 | + id: 'glm-4.5-flash', |
| 175 | + name: 'GLM-4.5-Flash', |
| 176 | + description: |
| 177 | + 'GLM-4.5-Flash is the free, high-speed variant of the GLM-4.5 model family, optimized for low-latency agentic coding tasks. It shares the MoE architecture of GLM-4.5 in a smaller, faster form factor, retaining reasoning and tool-use capabilities at no cost.', |
| 178 | + flags: [], |
| 179 | + context_length: 131072, |
| 180 | + max_completion_tokens: 98304, |
| 181 | + }, |
| 182 | + { |
| 183 | + id: 'glm-4.5v', |
| 184 | + name: 'GLM-4.5V', |
| 185 | + description: |
| 186 | + 'GLM-4.5V is a vision-language foundation model for multimodal agent applications. Built on a Mixture-of-Experts (MoE) architecture with 106B parameters and 12B activated parameters, it achieves state-of-the-art results in video understanding, image Q&A, OCR, and document parsing, with strong gains in front-end web coding, grounding, and spatial reasoning. It supports a hybrid inference mode with "thinking" and "non-thinking" options.', |
| 187 | + flags: ['vision'], |
| 188 | + context_length: 64000, |
| 189 | + max_completion_tokens: 16384, |
| 190 | + }, |
| 191 | + ], |
| 192 | + }, |
| 193 | +] satisfies ReadonlyArray<CodingPlanProvider>; |
0 commit comments