Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 86 additions & 0 deletions src/api/providers/__tests__/openrouter.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import OpenAI from "openai"
import { OpenRouterHandler } from "../openrouter"
import { ApiHandlerOptions } from "../../../shared/api"
import { Package } from "../../../shared/package"
import { getModels } from "../fetchers/modelCache"

// Mock dependencies
vitest.mock("openai")
Expand Down Expand Up @@ -44,6 +45,9 @@ vitest.mock("../fetchers/modelCache", () => ({
})
}),
}))
vitest.mock("../fetchers/modelEndpointCache", () => ({
getModelEndpoints: vitest.fn().mockResolvedValue({}),
}))

describe("OpenRouterHandler", () => {
const mockOptions: ApiHandlerOptions = {
Expand Down Expand Up @@ -267,6 +271,88 @@ describe("OpenRouterHandler", () => {
const generator = handler.createMessage("test", [])
await expect(generator.next()).rejects.toThrow("OpenRouter API Error 500: API Error")
})

it("passes reasoning effort and include_reasoning for GPT-5 models via OpenRouter", async () => {
;(getModels as any).mockResolvedValueOnce({
"openai/gpt-5-2025-08-07": {
maxTokens: 8192,
contextWindow: 128000,
supportsPromptCache: false,
supportsReasoningEffort: true,
description: "GPT-5 via OpenRouter",
},
})

const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: "openai/gpt-5-2025-08-07",
choices: [{ delta: { reasoning: "Thinking...", content: "Hello" } }],
usage: { prompt_tokens: 1, completion_tokens: 2, cost: 0.0 },
}
},
}

const mockCreate = vitest.fn().mockResolvedValue(mockStream)
;(OpenAI as any).prototype.chat = { completions: { create: mockCreate } } as any

const handler = new OpenRouterHandler({
openRouterApiKey: "test-key",
openRouterModelId: "openai/gpt-5-2025-08-07",
enableReasoningEffort: true,
reasoningEffort: "minimal" as any,
})

const gen = handler.createMessage("sys", [{ role: "user", content: "hi" } as any])
for await (const _ of gen) {
// drain
}

const call = (mockCreate as any).mock.calls[0][0]
expect(call.model).toBe("openai/gpt-5-2025-08-07")
expect(call.include_reasoning).toBe(true)
expect(call.reasoning).toEqual({ effort: "minimal" })
})

it('defaults GPT-5 reasoning effort to "medium" when enabled but not specified', async () => {
;(getModels as any).mockResolvedValueOnce({
"openai/gpt-5-2025-08-07": {
maxTokens: 8192,
contextWindow: 128000,
supportsPromptCache: false,
supportsReasoningEffort: true,
description: "GPT-5 via OpenRouter",
},
})

const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: "openai/gpt-5-2025-08-07",
choices: [{ delta: { content: "Hi" } }],
usage: { prompt_tokens: 1, completion_tokens: 2, cost: 0.0 },
}
},
}

const mockCreate = vitest.fn().mockResolvedValue(mockStream)
;(OpenAI as any).prototype.chat = { completions: { create: mockCreate } } as any

const handler = new OpenRouterHandler({
openRouterApiKey: "test-key",
openRouterModelId: "openai/gpt-5-2025-08-07",
enableReasoningEffort: true,
})

const gen = handler.createMessage("sys", [{ role: "user", content: "hi" } as any])
for await (const _ of gen) {
// drain
}

const call = (mockCreate as any).mock.calls[0][0]
expect(call.include_reasoning).toBe(true)
expect(call.reasoning).toEqual({ effort: "medium" })
})
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good test coverage for createMessage()! Should we add a similar test for completePrompt() to verify that include_reasoning: true is also passed for GPT-5 models in that method?

})

describe("completePrompt", () => {
Expand Down
13 changes: 12 additions & 1 deletion src/api/providers/openrouter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
messages: openAiMessages,
stream: true,
stream_options: { include_usage: true },
// For GPT-5 via OpenRouter, request reasoning content in the stream explicitly
...(modelId.startsWith("openai/gpt-5") && { include_reasoning: true }),
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider extracting this GPT-5 check to a helper function like isGpt5Model(modelId) since the same check appears in both createMessage() (line 125) and completePrompt() (line 233). This would make future updates easier and more maintainable.

// Only include provider if openRouterSpecificProvider is not "[default]".
...(this.options.openRouterSpecificProvider &&
this.options.openRouterSpecificProvider !== OPENROUTER_DEFAULT_PROVIDER_NAME && {
Expand Down Expand Up @@ -208,7 +210,14 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
defaultTemperature: isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0,
})

return { id, info, topP: isDeepSeekR1 ? 0.95 : undefined, ...params }
// Apply GPT-5 defaults for OpenRouter: default reasoning effort to "medium" when enabled
let adjustedParams = params
if (id.startsWith("openai/gpt-5") && !params.reasoning && this.options.enableReasoningEffort !== false) {
const effort = (this.options.reasoningEffort as any) ?? "medium"
adjustedParams = { ...params, reasoning: { effort } as OpenRouterReasoningParams }
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this logic potentially redundant? I notice we're adding GPT-5-specific reasoning handling here, but getModelParams() already calls getOpenRouterReasoning() which should handle reasoning parameters. Could we rely solely on the existing getModelParams() flow instead of duplicating the logic?

}

return { id, info, topP: isDeepSeekR1 ? 0.95 : undefined, ...adjustedParams }
}

async completePrompt(prompt: string) {
Expand All @@ -220,6 +229,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
temperature,
messages: [{ role: "user", content: prompt }],
stream: false,
// For GPT-5 via OpenRouter, request reasoning details explicitly as well
...(modelId.startsWith("openai/gpt-5") && { include_reasoning: true }),
// Only include provider if openRouterSpecificProvider is not "[default]".
...(this.options.openRouterSpecificProvider &&
this.options.openRouterSpecificProvider !== OPENROUTER_DEFAULT_PROVIDER_NAME && {
Expand Down
Loading