Skip to content

Commit c404987

Browse files
committed
fix: update GLM-4.6-FP8 test expectations and add GLM-4.6-turbo test
- Updated GLM-4.6-FP8 test to match resolved merge configuration (contextWindow: 202752, detailed description) - Added missing test for GLM-4.6-turbo model with correct configuration - All 25 tests now pass
1 parent f55e0b3 commit c404987

File tree

1 file changed

+24
-2
lines changed

1 file changed

+24
-2
lines changed

src/api/providers/__tests__/chutes.spec.ts

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -286,13 +286,35 @@ describe("ChutesHandler", () => {
286286
expect(model.info).toEqual(
287287
expect.objectContaining({
288288
maxTokens: 32768,
289-
contextWindow: 200000,
289+
contextWindow: 202752,
290290
supportsImages: false,
291291
supportsPromptCache: false,
292292
inputPrice: 0,
293293
outputPrice: 0,
294294
description:
295-
"GLM-4.6-FP8 model with 200K token context window, state-of-the-art performance with fast inference.",
295+
"GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.",
296+
temperature: 0.5, // Default temperature for non-DeepSeek models
297+
}),
298+
)
299+
})
300+
301+
it("should return zai-org/GLM-4.6-turbo model with correct configuration", () => {
302+
const testModelId: ChutesModelId = "zai-org/GLM-4.6-turbo"
303+
const handlerWithModel = new ChutesHandler({
304+
apiModelId: testModelId,
305+
chutesApiKey: "test-chutes-api-key",
306+
})
307+
const model = handlerWithModel.getModel()
308+
expect(model.id).toBe(testModelId)
309+
expect(model.info).toEqual(
310+
expect.objectContaining({
311+
maxTokens: 202752,
312+
contextWindow: 202752,
313+
supportsImages: false,
314+
supportsPromptCache: false,
315+
inputPrice: 1.15,
316+
outputPrice: 3.25,
317+
description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference.",
296318
temperature: 0.5, // Default temperature for non-DeepSeek models
297319
}),
298320
)

0 commit comments

Comments
 (0)