Skip to content

Commit fc04a3a

Browse files
committed
fix: adjust GLM-4.6-turbo max output tokens to 40k
Fixes issue where GLM-4.6-turbo was requesting the entire context window (202752 tokens) for output, leaving no room for input tokens. Now set to 40960 tokens (20% of 200k context) to allow sufficient input space. Fixes #8821
1 parent 97331bc commit fc04a3a

File tree

1 file changed

+11
-7
lines changed

1 file changed

+11
-7
lines changed

packages/types/src/providers/chutes.ts

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,8 @@ export const chutesModels = {
8888
supportsPromptCache: false,
8989
inputPrice: 0.23,
9090
outputPrice: 0.9,
91-
description: "DeepSeek‑V3.1‑Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix‑ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance.",
91+
description:
92+
"DeepSeek‑V3.1‑Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix‑ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance.",
9293
},
9394
"deepseek-ai/DeepSeek-V3.1-turbo": {
9495
maxTokens: 32768,
@@ -97,7 +98,8 @@ export const chutesModels = {
9798
supportsPromptCache: false,
9899
inputPrice: 1.0,
99100
outputPrice: 3.0,
100-
description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2× quota per request and not intended for bulk workloads.",
101+
description:
102+
"DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2× quota per request and not intended for bulk workloads.",
101103
},
102104
"deepseek-ai/DeepSeek-V3.2-Exp": {
103105
maxTokens: 163840,
@@ -106,7 +108,8 @@ export const chutesModels = {
106108
supportsPromptCache: false,
107109
inputPrice: 0.25,
108110
outputPrice: 0.35,
109-
description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long‑context training and inference efficiency while maintaining performance comparable to V3.1‑Terminus.",
111+
description:
112+
"DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long‑context training and inference efficiency while maintaining performance comparable to V3.1‑Terminus.",
110113
},
111114
"unsloth/Llama-3.3-70B-Instruct": {
112115
maxTokens: 32768, // From Groq
@@ -328,7 +331,7 @@ export const chutesModels = {
328331
"GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.",
329332
},
330333
"zai-org/GLM-4.6-turbo": {
331-
maxTokens: 202752, // From Chutes /v1/models: max_output_length
334+
maxTokens: 40960, // 20% of 200K context window to leave room for input
332335
contextWindow: 202752,
333336
supportsImages: false,
334337
supportsPromptCache: false,
@@ -397,8 +400,9 @@ export const chutesModels = {
397400
contextWindow: 262144,
398401
supportsImages: true,
399402
supportsPromptCache: false,
400-
inputPrice: 0.1600,
401-
outputPrice: 0.6500,
402-
description: "Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.",
403+
inputPrice: 0.16,
404+
outputPrice: 0.65,
405+
description:
406+
"Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.",
403407
},
404408
} as const satisfies Record<string, ModelInfo>

0 commit comments

Comments
 (0)