@@ -9,6 +9,7 @@ import OpenAI from "openai"
99import { OpenRouterHandler } from "../openrouter"
1010import { ApiHandlerOptions } from "../../../shared/api"
1111import { Package } from "../../../shared/package"
12+ import { getModels } from "../fetchers/modelCache"
1213
1314// Mock dependencies
1415vitest . mock ( "openai" )
@@ -44,6 +45,9 @@ vitest.mock("../fetchers/modelCache", () => ({
4445 } )
4546 } ) ,
4647} ) )
48+ vitest . mock ( "../fetchers/modelEndpointCache" , ( ) => ( {
49+ getModelEndpoints : vitest . fn ( ) . mockResolvedValue ( { } ) ,
50+ } ) )
4751
4852describe ( "OpenRouterHandler" , ( ) => {
4953 const mockOptions : ApiHandlerOptions = {
@@ -267,6 +271,88 @@ describe("OpenRouterHandler", () => {
267271 const generator = handler . createMessage ( "test" , [ ] )
268272 await expect ( generator . next ( ) ) . rejects . toThrow ( "OpenRouter API Error 500: API Error" )
269273 } )
274+
275+ it ( "passes reasoning effort and include_reasoning for GPT-5 models via OpenRouter" , async ( ) => {
276+ ; ( getModels as any ) . mockResolvedValueOnce ( {
277+ "openai/gpt-5-2025-08-07" : {
278+ maxTokens : 8192 ,
279+ contextWindow : 128000 ,
280+ supportsPromptCache : false ,
281+ supportsReasoningEffort : true ,
282+ description : "GPT-5 via OpenRouter" ,
283+ } ,
284+ } )
285+
286+ const mockStream = {
287+ async * [ Symbol . asyncIterator ] ( ) {
288+ yield {
289+ id : "openai/gpt-5-2025-08-07" ,
290+ choices : [ { delta : { reasoning : "Thinking..." , content : "Hello" } } ] ,
291+ usage : { prompt_tokens : 1 , completion_tokens : 2 , cost : 0.0 } ,
292+ }
293+ } ,
294+ }
295+
296+ const mockCreate = vitest . fn ( ) . mockResolvedValue ( mockStream )
297+ ; ( OpenAI as any ) . prototype . chat = { completions : { create : mockCreate } } as any
298+
299+ const handler = new OpenRouterHandler ( {
300+ openRouterApiKey : "test-key" ,
301+ openRouterModelId : "openai/gpt-5-2025-08-07" ,
302+ enableReasoningEffort : true ,
303+ reasoningEffort : "minimal" as any ,
304+ } )
305+
306+ const gen = handler . createMessage ( "sys" , [ { role : "user" , content : "hi" } as any ] )
307+ for await ( const _ of gen ) {
308+ // drain
309+ }
310+
311+ const call = ( mockCreate as any ) . mock . calls [ 0 ] [ 0 ]
312+ expect ( call . model ) . toBe ( "openai/gpt-5-2025-08-07" )
313+ expect ( call . include_reasoning ) . toBe ( true )
314+ expect ( call . reasoning ) . toEqual ( { effort : "minimal" } )
315+ } )
316+
317+ it ( 'defaults GPT-5 reasoning effort to "medium" when enabled but not specified' , async ( ) => {
318+ ; ( getModels as any ) . mockResolvedValueOnce ( {
319+ "openai/gpt-5-2025-08-07" : {
320+ maxTokens : 8192 ,
321+ contextWindow : 128000 ,
322+ supportsPromptCache : false ,
323+ supportsReasoningEffort : true ,
324+ description : "GPT-5 via OpenRouter" ,
325+ } ,
326+ } )
327+
328+ const mockStream = {
329+ async * [ Symbol . asyncIterator ] ( ) {
330+ yield {
331+ id : "openai/gpt-5-2025-08-07" ,
332+ choices : [ { delta : { content : "Hi" } } ] ,
333+ usage : { prompt_tokens : 1 , completion_tokens : 2 , cost : 0.0 } ,
334+ }
335+ } ,
336+ }
337+
338+ const mockCreate = vitest . fn ( ) . mockResolvedValue ( mockStream )
339+ ; ( OpenAI as any ) . prototype . chat = { completions : { create : mockCreate } } as any
340+
341+ const handler = new OpenRouterHandler ( {
342+ openRouterApiKey : "test-key" ,
343+ openRouterModelId : "openai/gpt-5-2025-08-07" ,
344+ enableReasoningEffort : true ,
345+ } )
346+
347+ const gen = handler . createMessage ( "sys" , [ { role : "user" , content : "hi" } as any ] )
348+ for await ( const _ of gen ) {
349+ // drain
350+ }
351+
352+ const call = ( mockCreate as any ) . mock . calls [ 0 ] [ 0 ]
353+ expect ( call . include_reasoning ) . toBe ( true )
354+ expect ( call . reasoning ) . toEqual ( { effort : "medium" } )
355+ } )
270356 } )
271357
272358 describe ( "completePrompt" , ( ) => {
0 commit comments