diff --git a/packages/types/src/providers/chutes.ts b/packages/types/src/providers/chutes.ts index c90e0445705..1c140510c4a 100644 --- a/packages/types/src/providers/chutes.ts +++ b/packages/types/src/providers/chutes.ts @@ -34,6 +34,7 @@ export type ChutesModelId = | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-turbo" | "zai-org/GLM-4.6-FP8" + | "zai-org/GLM-4.6-turbo" | "moonshotai/Kimi-K2-Instruct-75k" | "moonshotai/Kimi-K2-Instruct-0905" | "Qwen/Qwen3-235B-A22B-Thinking-2507" @@ -87,7 +88,8 @@ export const chutesModels = { supportsPromptCache: false, inputPrice: 0.23, outputPrice: 0.9, - description: "DeepSeek‑V3.1‑Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix‑ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance.", + description: + "DeepSeek‑V3.1‑Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix‑ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance.", }, "deepseek-ai/DeepSeek-V3.1-turbo": { maxTokens: 32768, @@ -96,7 +98,8 @@ export const chutesModels = { supportsPromptCache: false, inputPrice: 1.0, outputPrice: 3.0, - description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2× quota per request and not intended for bulk workloads.", + description: + "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2× quota per request and not intended for bulk workloads.", }, "deepseek-ai/DeepSeek-V3.2-Exp": { maxTokens: 163840, @@ -105,7 +108,8 @@ export const chutesModels = { supportsPromptCache: false, inputPrice: 0.25, outputPrice: 0.35, - description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long‑context training and inference efficiency while maintaining performance comparable to V3.1‑Terminus.", + description: + "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long‑context training and inference efficiency while maintaining performance comparable to V3.1‑Terminus.", }, "unsloth/Llama-3.3-70B-Instruct": { maxTokens: 32768, // From Groq @@ -326,6 +330,15 @@ export const chutesModels = { description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.", }, + "zai-org/GLM-4.6-turbo": { + maxTokens: 32768, + contextWindow: 202752, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.15, + outputPrice: 3.25, + description: "GLM-4.6-turbo model with 200K+ token context window, optimized for fast inference.", + }, "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": { maxTokens: 32768, contextWindow: 262144, @@ -387,8 +400,9 @@ export const chutesModels = { contextWindow: 262144, supportsImages: true, supportsPromptCache: false, - inputPrice: 0.1600, - outputPrice: 0.6500, - description: "Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.", + inputPrice: 0.16, + outputPrice: 0.65, + description: + "Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.", }, } as const satisfies Record diff --git a/src/api/providers/__tests__/chutes.spec.ts b/src/api/providers/__tests__/chutes.spec.ts index 70ee06a923c..577349b4de9 100644 --- a/src/api/providers/__tests__/chutes.spec.ts +++ b/src/api/providers/__tests__/chutes.spec.ts @@ -492,4 +492,76 @@ describe("ChutesHandler", () => { const model = handlerWithModel.getModel() expect(model.info.temperature).toBe(0.5) }) + + it("should return zai-org/GLM-4.6-turbo model with correct configuration", () => { + const testModelId: ChutesModelId = "zai-org/GLM-4.6-turbo" + const handlerWithModel = new ChutesHandler({ + apiModelId: testModelId, + chutesApiKey: "test-chutes-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual( + expect.objectContaining({ + maxTokens: 32768, + contextWindow: 202752, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.15, + outputPrice: 3.25, + description: "GLM-4.6-turbo model with 200K+ token context window, optimized for fast inference.", + temperature: 0.5, // Default temperature for non-DeepSeek models + }), + ) + // Strengthen test by also asserting the selected model.info matches the static config to catch mapping/regression errors beyond temperature + expect(model.info).toEqual(expect.objectContaining(chutesModels[testModelId])) + }) + + it("should have correct pricing and context for zai-org/GLM-4.6-turbo", () => { + // This test ensures the GLM-4.6-turbo model has the expected pricing and context window + // Assert exact values and capabilities to catch regressions + const model = chutesModels["zai-org/GLM-4.6-turbo"] + expect(model.maxTokens).toBe(32768) + expect(model.contextWindow).toBe(202752) + expect(model.supportsImages).toBe(false) + expect(model.supportsPromptCache).toBe(false) + expect(model.inputPrice).toBe(1.15) + expect(model.outputPrice).toBe(3.25) + }) + + it("createMessage should pass correct parameters to Chutes client for GLM-4.6-turbo model", async () => { + const modelId: ChutesModelId = "zai-org/GLM-4.6-turbo" + const modelInfo = chutesModels[modelId] + const handlerWithModel = new ChutesHandler({ apiModelId: modelId, chutesApiKey: "test-chutes-api-key" }) + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const systemPrompt = "Test system prompt for GLM-4.6-turbo" + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "Test message for GLM-4.6-turbo" }, + ] + + const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: modelId, + max_tokens: modelInfo.maxTokens, // Should be 32768 + temperature: 0.5, // Default temperature for non-DeepSeek models + messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), + stream: true, + stream_options: { include_usage: true }, + }), + undefined, + ) + }) })