Skip to content

Commit ccc1294

Browse files
committed
fix: correct context size for deepseek/deepseek-chat-v3.1:free model
- OpenRouter incorrectly reports 64k context for deepseek-chat-v3.1:free - Actual context size should be 163.8k tokens as per provider documentation - Added special handling in parseOpenRouterModel to override the incorrect value - Added test case to verify the fix Fixes #7952
1 parent c4c4780 commit ccc1294

File tree

2 files changed

+38
-0
lines changed

2 files changed

+38
-0
lines changed

src/api/providers/fetchers/__tests__/openrouter.spec.ts

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,36 @@ describe("OpenRouter API", () => {
337337
expect(result.contextWindow).toBe(128000)
338338
})
339339

340+
it("sets deepseek-chat-v3.1:free model to 163.8k context window", () => {
341+
const mockModel = {
342+
name: "DeepSeek Chat V3.1 Free",
343+
description: "DeepSeek Chat V3.1 Free model",
344+
context_length: 65536, // OpenRouter incorrectly reports 64k
345+
max_completion_tokens: null,
346+
pricing: {
347+
prompt: "0",
348+
completion: "0",
349+
input_cache_write: undefined,
350+
input_cache_read: undefined,
351+
},
352+
}
353+
354+
const result = parseOpenRouterModel({
355+
id: "deepseek/deepseek-chat-v3.1:free",
356+
model: mockModel,
357+
inputModality: ["text"],
358+
outputModality: ["text"],
359+
maxTokens: null,
360+
supportedParameters: ["temperature", "top_p", "max_tokens"],
361+
})
362+
363+
// Should override the incorrect 64k context with 163.8k
364+
expect(result.contextWindow).toBe(163840)
365+
// maxTokens should be recalculated based on corrected context
366+
expect(result.maxTokens).toBe(Math.ceil(163840 * 0.2))
367+
expect(result.description).toBe("DeepSeek Chat V3.1 Free model")
368+
})
369+
340370
it("filters out image generation models", () => {
341371
const mockImageModel = {
342372
name: "Image Model",

src/api/providers/fetchers/openrouter.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -263,5 +263,13 @@ export const parseOpenRouterModel = ({
263263
modelInfo.maxTokens = 32768
264264
}
265265

266+
// Set deepseek-chat-v3.1:free model to correct context size
267+
// OpenRouter reports 64k but the actual context is 163.8k tokens
268+
if (id === "deepseek/deepseek-chat-v3.1:free") {
269+
modelInfo.contextWindow = 163840 // 163.8k tokens
270+
// Recalculate maxTokens based on the corrected context window
271+
modelInfo.maxTokens = maxTokens || Math.ceil(163840 * 0.2)
272+
}
273+
266274
return modelInfo
267275
}

0 commit comments

Comments
 (0)