Skip to content

Commit 039e59f

Browse files
committed
fix(openrouter): ensure GPT-5 reasoning effort passed and include_reasoning set for OpenRouter
1 parent 9b8f3b9 commit 039e59f

File tree

2 files changed

+98
-1
lines changed

2 files changed

+98
-1
lines changed

src/api/providers/__tests__/openrouter.spec.ts

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import OpenAI from "openai"
99
import { OpenRouterHandler } from "../openrouter"
1010
import { ApiHandlerOptions } from "../../../shared/api"
1111
import { Package } from "../../../shared/package"
12+
import { getModels } from "../fetchers/modelCache"
1213

1314
// Mock dependencies
1415
vitest.mock("openai")
@@ -44,6 +45,9 @@ vitest.mock("../fetchers/modelCache", () => ({
4445
})
4546
}),
4647
}))
48+
vitest.mock("../fetchers/modelEndpointCache", () => ({
49+
getModelEndpoints: vitest.fn().mockResolvedValue({}),
50+
}))
4751

4852
describe("OpenRouterHandler", () => {
4953
const mockOptions: ApiHandlerOptions = {
@@ -267,6 +271,88 @@ describe("OpenRouterHandler", () => {
267271
const generator = handler.createMessage("test", [])
268272
await expect(generator.next()).rejects.toThrow("OpenRouter API Error 500: API Error")
269273
})
274+
275+
it("passes reasoning effort and include_reasoning for GPT-5 models via OpenRouter", async () => {
276+
;(getModels as any).mockResolvedValueOnce({
277+
"openai/gpt-5-2025-08-07": {
278+
maxTokens: 8192,
279+
contextWindow: 128000,
280+
supportsPromptCache: false,
281+
supportsReasoningEffort: true,
282+
description: "GPT-5 via OpenRouter",
283+
},
284+
})
285+
286+
const mockStream = {
287+
async *[Symbol.asyncIterator]() {
288+
yield {
289+
id: "openai/gpt-5-2025-08-07",
290+
choices: [{ delta: { reasoning: "Thinking...", content: "Hello" } }],
291+
usage: { prompt_tokens: 1, completion_tokens: 2, cost: 0.0 },
292+
}
293+
},
294+
}
295+
296+
const mockCreate = vitest.fn().mockResolvedValue(mockStream)
297+
;(OpenAI as any).prototype.chat = { completions: { create: mockCreate } } as any
298+
299+
const handler = new OpenRouterHandler({
300+
openRouterApiKey: "test-key",
301+
openRouterModelId: "openai/gpt-5-2025-08-07",
302+
enableReasoningEffort: true,
303+
reasoningEffort: "minimal" as any,
304+
})
305+
306+
const gen = handler.createMessage("sys", [{ role: "user", content: "hi" } as any])
307+
for await (const _ of gen) {
308+
// drain
309+
}
310+
311+
const call = (mockCreate as any).mock.calls[0][0]
312+
expect(call.model).toBe("openai/gpt-5-2025-08-07")
313+
expect(call.include_reasoning).toBe(true)
314+
expect(call.reasoning).toEqual({ effort: "minimal" })
315+
})
316+
317+
it('defaults GPT-5 reasoning effort to "medium" when enabled but not specified', async () => {
318+
;(getModels as any).mockResolvedValueOnce({
319+
"openai/gpt-5-2025-08-07": {
320+
maxTokens: 8192,
321+
contextWindow: 128000,
322+
supportsPromptCache: false,
323+
supportsReasoningEffort: true,
324+
description: "GPT-5 via OpenRouter",
325+
},
326+
})
327+
328+
const mockStream = {
329+
async *[Symbol.asyncIterator]() {
330+
yield {
331+
id: "openai/gpt-5-2025-08-07",
332+
choices: [{ delta: { content: "Hi" } }],
333+
usage: { prompt_tokens: 1, completion_tokens: 2, cost: 0.0 },
334+
}
335+
},
336+
}
337+
338+
const mockCreate = vitest.fn().mockResolvedValue(mockStream)
339+
;(OpenAI as any).prototype.chat = { completions: { create: mockCreate } } as any
340+
341+
const handler = new OpenRouterHandler({
342+
openRouterApiKey: "test-key",
343+
openRouterModelId: "openai/gpt-5-2025-08-07",
344+
enableReasoningEffort: true,
345+
})
346+
347+
const gen = handler.createMessage("sys", [{ role: "user", content: "hi" } as any])
348+
for await (const _ of gen) {
349+
// drain
350+
}
351+
352+
const call = (mockCreate as any).mock.calls[0][0]
353+
expect(call.include_reasoning).toBe(true)
354+
expect(call.reasoning).toEqual({ effort: "medium" })
355+
})
270356
})
271357

272358
describe("completePrompt", () => {

src/api/providers/openrouter.ts

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
121121
messages: openAiMessages,
122122
stream: true,
123123
stream_options: { include_usage: true },
124+
// For GPT-5 via OpenRouter, request reasoning content in the stream explicitly
125+
...(modelId.startsWith("openai/gpt-5") && { include_reasoning: true }),
124126
// Only include provider if openRouterSpecificProvider is not "[default]".
125127
...(this.options.openRouterSpecificProvider &&
126128
this.options.openRouterSpecificProvider !== OPENROUTER_DEFAULT_PROVIDER_NAME && {
@@ -208,7 +210,14 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
208210
defaultTemperature: isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0,
209211
})
210212

211-
return { id, info, topP: isDeepSeekR1 ? 0.95 : undefined, ...params }
213+
// Apply GPT-5 defaults for OpenRouter: default reasoning effort to "medium" when enabled
214+
let adjustedParams = params
215+
if (id.startsWith("openai/gpt-5") && !params.reasoning && this.options.enableReasoningEffort !== false) {
216+
const effort = (this.options.reasoningEffort as any) ?? "medium"
217+
adjustedParams = { ...params, reasoning: { effort } as OpenRouterReasoningParams }
218+
}
219+
220+
return { id, info, topP: isDeepSeekR1 ? 0.95 : undefined, ...adjustedParams }
212221
}
213222

214223
async completePrompt(prompt: string) {
@@ -220,6 +229,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
220229
temperature,
221230
messages: [{ role: "user", content: prompt }],
222231
stream: false,
232+
// For GPT-5 via OpenRouter, request reasoning details explicitly as well
233+
...(modelId.startsWith("openai/gpt-5") && { include_reasoning: true }),
223234
// Only include provider if openRouterSpecificProvider is not "[default]".
224235
...(this.options.openRouterSpecificProvider &&
225236
this.options.openRouterSpecificProvider !== OPENROUTER_DEFAULT_PROVIDER_NAME && {

0 commit comments

Comments
 (0)