Skip to content

Commit 6b159c9

Browse files
committed
fix(openrouter): pass minimal reasoning effort to OpenRouter; align tests
1 parent 0cfd314 commit 6b159c9

File tree

2 files changed

+83
-8
lines changed

2 files changed

+83
-8
lines changed

src/api/providers/__tests__/openrouter.spec.ts

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,3 +321,71 @@ describe("OpenRouterHandler", () => {
321321
})
322322
})
323323
})
324+
325+
describe("reasoning effort mapping (OpenRouter)", () => {
326+
it("passes 'minimal' through in reasoning.effort for OpenRouter requests", async () => {
327+
const handler = new OpenRouterHandler({
328+
openRouterApiKey: "test-key",
329+
openRouterModelId: "openai/o1-pro",
330+
reasoningEffort: "minimal",
331+
} as ApiHandlerOptions)
332+
333+
// Prepare a model that supports reasoning effort (not budget)
334+
;(handler as any).models = {
335+
"openai/o1-pro": {
336+
maxTokens: 8192,
337+
contextWindow: 200000,
338+
supportsImages: true,
339+
supportsPromptCache: true,
340+
inputPrice: 0.0,
341+
outputPrice: 0.0,
342+
description: "o1-pro test",
343+
supportsReasoningEffort: true,
344+
},
345+
}
346+
347+
// Ensure endpoints map is empty so base model info is used
348+
;(handler as any).endpoints = {}
349+
350+
// Mock OpenAI client call
351+
const mockCreate = vitest.fn().mockResolvedValue({
352+
async *[Symbol.asyncIterator]() {
353+
yield {
354+
id: "openai/o1-pro",
355+
choices: [{ delta: { content: "ok" } }],
356+
}
357+
yield {
358+
id: "usage-id",
359+
choices: [{ delta: {} }],
360+
usage: { prompt_tokens: 1, completion_tokens: 1, cost: 0 },
361+
}
362+
},
363+
})
364+
;(OpenAI as any).prototype.chat = {
365+
completions: { create: mockCreate },
366+
} as any
367+
368+
const systemPrompt = "system"
369+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "hello" }]
370+
371+
// Stub fetchModel to use the handler's getModel (which applies getModelParams -> getOpenRouterReasoning)
372+
const realGetModel = (handler as any).getModel.bind(handler)
373+
;(handler as any).fetchModel = vitest.fn().mockImplementation(async () => realGetModel())
374+
375+
// Trigger a request
376+
const gen = handler.createMessage(systemPrompt, messages)
377+
// Drain iterator to ensure call is made
378+
for await (const _ of gen) {
379+
// noop
380+
}
381+
382+
// Verify the API call included the normalized effort
383+
expect(mockCreate).toHaveBeenCalledWith(
384+
expect.objectContaining({
385+
model: "openai/o1-pro",
386+
reasoning: { effort: "minimal" }, // 'minimal' should be preserved for OpenRouter
387+
stream: true,
388+
}),
389+
)
390+
})
391+
})

src/api/transform/reasoning.ts

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,21 @@ export const getOpenRouterReasoning = ({
3030
reasoningBudget,
3131
reasoningEffort,
3232
settings,
33-
}: GetModelReasoningOptions): OpenRouterReasoningParams | undefined =>
34-
shouldUseReasoningBudget({ model, settings })
35-
? { max_tokens: reasoningBudget }
36-
: shouldUseReasoningEffort({ model, settings })
37-
? reasoningEffort
38-
? { effort: reasoningEffort }
39-
: undefined
40-
: undefined
33+
}: GetModelReasoningOptions): OpenRouterReasoningParams | undefined => {
34+
// If the model uses a budget-style reasoning config on OpenRouter, pass it through.
35+
if (shouldUseReasoningBudget({ model, settings })) {
36+
return { max_tokens: reasoningBudget! }
37+
}
38+
39+
// Otherwise, if we support traditional reasoning effort, pass through the effort.
40+
// Note: Some models (e.g., GPT‑5 via OpenRouter) may support "minimal".
41+
if (shouldUseReasoningEffort({ model, settings })) {
42+
if (!reasoningEffort) return undefined
43+
return { effort: reasoningEffort }
44+
}
45+
46+
return undefined
47+
}
4148

4249
export const getAnthropicReasoning = ({
4350
model,

0 commit comments

Comments
 (0)