Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
214 changes: 214 additions & 0 deletions src/api/providers/__tests__/openrouter-deepseek-terminus.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,214 @@
import { describe, it, expect, vi, beforeEach } from "vitest"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider integrating these tests into the main openrouter.spec.ts file or following a consistent naming pattern. Other provider-specific tests are typically included in the main provider test file.

import { OpenRouterHandler } from "../openrouter"
import type { ApiHandlerOptions } from "../../../shared/api"

// Mock the fetchers
vi.mock("../fetchers/modelCache", () => ({
getModels: vi.fn().mockResolvedValue({
"deepseek/deepseek-v3.1-terminus": {
maxTokens: 8192,
contextWindow: 128000,
supportsImages: false,
supportsPromptCache: false,
supportsReasoningEffort: true,
inputPrice: 0.5,
outputPrice: 2.0,
description: "DeepSeek V3.1 Terminus model",
},
}),
}))

vi.mock("../fetchers/modelEndpointCache", () => ({
getModelEndpoints: vi.fn().mockResolvedValue({}),
}))

// Mock OpenAI client
vi.mock("openai", () => {
const mockStream = {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{ delta: { content: "Test response" } }],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15,
},
}
},
}

return {
default: vi.fn().mockImplementation(() => ({
chat: {
completions: {
create: vi.fn().mockResolvedValue(mockStream),
},
},
})),
}
})

describe("OpenRouterHandler - DeepSeek V3.1 Terminus", () => {
let handler: OpenRouterHandler
let mockCreate: any

beforeEach(() => {
vi.clearAllMocks()
})

it("should exclude reasoning for DeepSeek V3.1 Terminus when reasoning is not enabled", async () => {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider adding a test case for when reasoning is already defined in the model configuration. The current tests cover enableReasoningEffort true/false, but not the case where typeof reasoning !== "undefined".

const options: ApiHandlerOptions = {
openRouterApiKey: "test-key",
openRouterModelId: "deepseek/deepseek-v3.1-terminus",
enableReasoningEffort: false,
}

handler = new OpenRouterHandler(options)

// Spy on the OpenAI client's create method
mockCreate = vi.fn().mockResolvedValue({
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{ delta: { content: "Test" } }],
usage: { prompt_tokens: 10, completion_tokens: 5 },
}
},
})
;(handler as any).client.chat.completions.create = mockCreate

// Create a message
const generator = handler.createMessage("System prompt", [{ role: "user", content: "Test message" }])

// Consume the generator
const results = []
for await (const chunk of generator) {
results.push(chunk)
}

// Check that the create method was called with reasoning excluded
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: "deepseek/deepseek-v3.1-terminus",
reasoning: { exclude: true },
}),
)
})

it("should not exclude reasoning for DeepSeek V3.1 Terminus when reasoning is enabled", async () => {
const options: ApiHandlerOptions = {
openRouterApiKey: "test-key",
openRouterModelId: "deepseek/deepseek-v3.1-terminus",
enableReasoningEffort: true,
reasoningEffort: "medium",
}

handler = new OpenRouterHandler(options)

// Spy on the OpenAI client's create method
mockCreate = vi.fn().mockResolvedValue({
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{ delta: { content: "Test" } }],
usage: { prompt_tokens: 10, completion_tokens: 5 },
}
},
})
;(handler as any).client.chat.completions.create = mockCreate

// Create a message
const generator = handler.createMessage("System prompt", [{ role: "user", content: "Test message" }])

// Consume the generator
const results = []
for await (const chunk of generator) {
results.push(chunk)
}

// Check that the create method was called with reasoning effort
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: "deepseek/deepseek-v3.1-terminus",
reasoning: { effort: "medium" },
}),
)
})

it("should not affect other models", async () => {
const options: ApiHandlerOptions = {
openRouterApiKey: "test-key",
openRouterModelId: "anthropic/claude-3-sonnet",
enableReasoningEffort: false,
}

// Mock a different model
const { getModels } = await import("../fetchers/modelCache")
vi.mocked(getModels).mockResolvedValue({
"anthropic/claude-3-sonnet": {
maxTokens: 4096,
contextWindow: 200000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 3.0,
outputPrice: 15.0,
description: "Claude 3 Sonnet",
},
})

handler = new OpenRouterHandler(options)

// Spy on the OpenAI client's create method
mockCreate = vi.fn().mockResolvedValue({
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{ delta: { content: "Test" } }],
usage: { prompt_tokens: 10, completion_tokens: 5 },
}
},
})
;(handler as any).client.chat.completions.create = mockCreate

// Create a message
const generator = handler.createMessage("System prompt", [{ role: "user", content: "Test message" }])

// Consume the generator
const results = []
for await (const chunk of generator) {
results.push(chunk)
}

// Check that reasoning was not excluded for other models
expect(mockCreate).toHaveBeenCalledWith(
expect.not.objectContaining({
reasoning: { exclude: true },
}),
)
})

it("should exclude reasoning in completePrompt for DeepSeek V3.1 Terminus", async () => {
const options: ApiHandlerOptions = {
openRouterApiKey: "test-key",
openRouterModelId: "deepseek/deepseek-v3.1-terminus",
enableReasoningEffort: false,
}

handler = new OpenRouterHandler(options)

// Mock the non-streaming response
mockCreate = vi.fn().mockResolvedValue({
choices: [{ message: { content: "Test response" } }],
})
;(handler as any).client.chat.completions.create = mockCreate

// Call completePrompt
await handler.completePrompt("Test prompt")

// Check that the create method was called with reasoning excluded
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: "deepseek/deepseek-v3.1-terminus",
reasoning: { exclude: true },
stream: false,
}),
)
})
})
19 changes: 19 additions & 0 deletions src/api/providers/openrouter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,16 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
reasoning = { exclude: true }
}

// DeepSeek V3.1 Terminus also has reasoning enabled by default on OpenRouter
// We need to explicitly disable it when the user hasn't enabled reasoning
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider adding a comment explaining why DeepSeek V3.1 Terminus needs special handling, similar to the Gemini comment above. This would help future maintainers understand the reasoning behind this exclusion.

if (
modelId === "deepseek/deepseek-v3.1-terminus" &&
typeof reasoning === "undefined" &&
!this.options.enableReasoningEffort
) {
reasoning = { exclude: true }
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider extracting this reasoning exclusion logic into a helper method to avoid duplication with the same logic in completePrompt() (lines 262-268). For example:

private shouldExcludeReasoning(modelId: string, reasoning: any): boolean {
  return modelId === "deepseek/deepseek-v3.1-terminus" &&
    typeof reasoning === "undefined" &&
    !this.options.enableReasoningEffort;
}

}

// Convert Anthropic messages to OpenAI format.
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
Expand Down Expand Up @@ -248,6 +258,15 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
async completePrompt(prompt: string) {
let { id: modelId, maxTokens, temperature, reasoning } = await this.fetchModel()

// Apply the same reasoning exclusion logic for DeepSeek V3.1 Terminus
if (
modelId === "deepseek/deepseek-v3.1-terminus" &&
typeof reasoning === "undefined" &&
!this.options.enableReasoningEffort
) {
reasoning = { exclude: true }
}

const completionParams: OpenRouterChatCompletionParams = {
model: modelId,
max_tokens: maxTokens,
Expand Down
Loading