Skip to content

Commit 92c1e3b

Browse files
committed
fix: enable reasoning display for DeepSeek V3 models with reasoning effort
- Updated OpenAI provider to detect DeepSeek V3/chat models when reasoning is enabled - DeepSeek V3.1 models now properly show reasoning/thinking sections - Added test coverage for DeepSeek V3 reasoning scenarios Fixes #7370
1 parent fc70012 commit 92c1e3b

File tree

2 files changed

+55
-1
lines changed

2 files changed

+55
-1
lines changed

src/api/providers/__tests__/openai.spec.ts

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,55 @@ describe("OpenAiHandler", () => {
380380
const callArgs = mockCreate.mock.calls[0][0]
381381
expect(callArgs.temperature).toBe(0.6)
382382
})
383+
384+
it("should detect DeepSeek V3 models with reasoning effort as reasoning models", async () => {
385+
const deepseekV3Options: ApiHandlerOptions = {
386+
...mockOptions,
387+
openAiModelId: "deepseek-v3",
388+
openAiCustomModelInfo: {
389+
...openAiModelInfoSaneDefaults,
390+
supportsReasoningEffort: true,
391+
},
392+
reasoningEffort: "medium",
393+
}
394+
const deepseekHandler = new OpenAiHandler(deepseekV3Options)
395+
const stream = deepseekHandler.createMessage(systemPrompt, messages)
396+
for await (const _chunk of stream) {
397+
// consume stream
398+
}
399+
// Assert the mockCreate was called with R1 format messages
400+
expect(mockCreate).toHaveBeenCalled()
401+
const callArgs = mockCreate.mock.calls[0][0]
402+
// When DeepSeek is detected as a reasoning model, it uses R1 format
403+
// which combines system and user messages
404+
expect(callArgs.messages[0].role).toBe("user")
405+
expect(callArgs.messages[0].content).toContain("You are a helpful assistant.")
406+
expect(callArgs.reasoning_effort).toBe("medium")
407+
})
408+
409+
it("should detect DeepSeek-chat models with reasoning effort as reasoning models", async () => {
410+
const deepseekChatOptions: ApiHandlerOptions = {
411+
...mockOptions,
412+
openAiModelId: "deepseek-chat",
413+
openAiCustomModelInfo: {
414+
...openAiModelInfoSaneDefaults,
415+
supportsReasoningEffort: true,
416+
},
417+
reasoningEffort: "high",
418+
}
419+
const deepseekHandler = new OpenAiHandler(deepseekChatOptions)
420+
const stream = deepseekHandler.createMessage(systemPrompt, messages)
421+
for await (const _chunk of stream) {
422+
// consume stream
423+
}
424+
// Assert the mockCreate was called with R1 format messages
425+
expect(mockCreate).toHaveBeenCalled()
426+
const callArgs = mockCreate.mock.calls[0][0]
427+
// When DeepSeek is detected as a reasoning model, it uses R1 format
428+
expect(callArgs.messages[0].role).toBe("user")
429+
expect(callArgs.messages[0].content).toContain("You are a helpful assistant.")
430+
expect(callArgs.reasoning_effort).toBe("high")
431+
})
383432
})
384433

385434
describe("error handling", () => {

src/api/providers/openai.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,12 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
8989
const enabledR1Format = this.options.openAiR1FormatEnabled ?? false
9090
const enabledLegacyFormat = this.options.openAiLegacyFormat ?? false
9191
const isAzureAiInference = this._isAzureAiInference(modelUrl)
92-
const deepseekReasoner = modelId.includes("deepseek-reasoner") || enabledR1Format
92+
// Check if this is a DeepSeek model with reasoning enabled
93+
const isDeepSeekWithReasoning =
94+
(modelId.toLowerCase().includes("deepseek") && reasoning) ||
95+
modelId.includes("deepseek-reasoner") ||
96+
enabledR1Format
97+
const deepseekReasoner = isDeepSeekWithReasoning
9398
const ark = modelUrl.includes(".volces.com")
9499

95100
if (modelId.includes("o1") || modelId.includes("o3") || modelId.includes("o4")) {

0 commit comments

Comments
 (0)