1- // npx jest src/ api/transform/__tests__/model-params.test .ts
1+ // npx vitest run api/transform/__tests__/model-params.spec .ts
22
33import { type ModelInfo , ANTHROPIC_DEFAULT_MAX_TOKENS } from "@roo-code/types"
44
55import { getModelParams } from "../model-params"
6+ import {
7+ DEFAULT_HYBRID_REASONING_MODEL_MAX_TOKENS ,
8+ DEFAULT_HYBRID_REASONING_MODEL_THINKING_TOKENS ,
9+ } from "../../../shared/api"
610
711describe ( "getModelParams" , ( ) => {
812 const baseModel : ModelInfo = {
@@ -188,16 +192,15 @@ describe("getModelParams", () => {
188192 it ( "should handle requiredReasoningBudget models correctly" , ( ) => {
189193 const model : ModelInfo = {
190194 ...baseModel ,
191- maxTokens : 2000 ,
192195 requiredReasoningBudget : true ,
193196 }
194197
195- expect ( getModelParams ( { ...anthropicParams , settings : { } , model } ) ) . toEqual ( {
198+ expect ( getModelParams ( { ...anthropicParams , settings : { modelMaxTokens : 2000 } , model } ) ) . toEqual ( {
196199 format : anthropicParams . format ,
197200 maxTokens : 2000 ,
198201 temperature : 1.0 , // Thinking models require temperature 1.0.
199202 reasoningEffort : undefined ,
200- reasoningBudget : 1600 , // 80% of 2000,
203+ reasoningBudget : 0.8 * 2000 ,
201204 reasoning : {
202205 type : "enabled" ,
203206 budget_tokens : 1600 ,
@@ -208,13 +211,12 @@ describe("getModelParams", () => {
208211 it ( "should handle supportsReasoningBudget with enableReasoningEffort setting" , ( ) => {
209212 const model : ModelInfo = {
210213 ...baseModel ,
211- maxTokens : 2000 ,
212214 supportsReasoningBudget : true ,
213215 }
214216
215217 const result = getModelParams ( {
216218 ...anthropicParams ,
217- settings : { enableReasoningEffort : true } ,
219+ settings : { enableReasoningEffort : true , modelMaxTokens : 2000 } ,
218220 model,
219221 } )
220222
@@ -266,11 +268,16 @@ describe("getModelParams", () => {
266268 it ( "should honor customMaxThinkingTokens for reasoning budget models" , ( ) => {
267269 const model : ModelInfo = {
268270 ...baseModel ,
269- maxTokens : 4000 ,
270271 requiredReasoningBudget : true ,
271272 }
272273
273- expect ( getModelParams ( { ...anthropicParams , settings : { modelMaxThinkingTokens : 1500 } , model } ) ) . toEqual ( {
274+ expect (
275+ getModelParams ( {
276+ ...anthropicParams ,
277+ settings : { modelMaxTokens : 4000 , modelMaxThinkingTokens : 1500 } ,
278+ model,
279+ } ) ,
280+ ) . toEqual ( {
274281 format : anthropicParams . format ,
275282 maxTokens : 4000 ,
276283 temperature : 1.0 ,
@@ -302,11 +309,16 @@ describe("getModelParams", () => {
302309 it ( "should clamp thinking budget to at least 1024 tokens" , ( ) => {
303310 const model : ModelInfo = {
304311 ...baseModel ,
305- maxTokens : 2000 ,
306312 requiredReasoningBudget : true ,
307313 }
308314
309- expect ( getModelParams ( { ...anthropicParams , settings : { modelMaxThinkingTokens : 500 } , model } ) ) . toEqual ( {
315+ expect (
316+ getModelParams ( {
317+ ...anthropicParams ,
318+ settings : { modelMaxTokens : 2000 , modelMaxThinkingTokens : 500 } ,
319+ model,
320+ } ) ,
321+ ) . toEqual ( {
310322 format : anthropicParams . format ,
311323 maxTokens : 2000 ,
312324 temperature : 1.0 ,
@@ -322,38 +334,43 @@ describe("getModelParams", () => {
322334 it ( "should clamp thinking budget to at most 80% of max tokens" , ( ) => {
323335 const model : ModelInfo = {
324336 ...baseModel ,
325- maxTokens : 4000 ,
326337 requiredReasoningBudget : true ,
327338 }
328339
329- expect ( getModelParams ( { ...anthropicParams , settings : { modelMaxThinkingTokens : 5000 } , model } ) ) . toEqual ( {
340+ expect (
341+ getModelParams ( {
342+ ...anthropicParams ,
343+ settings : { modelMaxTokens : 4000 , modelMaxThinkingTokens : 5000 } ,
344+ model,
345+ } ) ,
346+ ) . toEqual ( {
330347 format : anthropicParams . format ,
331348 maxTokens : 4000 ,
332349 temperature : 1.0 ,
333350 reasoningEffort : undefined ,
334- reasoningBudget : 3200 , // 80% of 4000
351+ reasoningBudget : 0.8 * 4000 ,
335352 reasoning : {
336353 type : "enabled" ,
337354 budget_tokens : 3200 ,
338355 } ,
339356 } )
340357 } )
341358
342- it ( "should use ANTHROPIC_DEFAULT_MAX_TOKENS when no maxTokens is provided for reasoning budget models" , ( ) => {
359+ it ( "should use DEFAULT_HYBRID_REASONING_MODEL_MAX_TOKENS when no maxTokens is provided for reasoning budget models" , ( ) => {
343360 const model : ModelInfo = {
344361 ...baseModel ,
345362 requiredReasoningBudget : true ,
346363 }
347364
348365 expect ( getModelParams ( { ...anthropicParams , settings : { } , model } ) ) . toEqual ( {
349366 format : anthropicParams . format ,
350- maxTokens : ANTHROPIC_DEFAULT_MAX_TOKENS ,
367+ maxTokens : DEFAULT_HYBRID_REASONING_MODEL_MAX_TOKENS ,
351368 temperature : 1.0 ,
352369 reasoningEffort : undefined ,
353- reasoningBudget : Math . floor ( ANTHROPIC_DEFAULT_MAX_TOKENS * 0.8 ) ,
370+ reasoningBudget : DEFAULT_HYBRID_REASONING_MODEL_THINKING_TOKENS ,
354371 reasoning : {
355372 type : "enabled" ,
356- budget_tokens : Math . floor ( ANTHROPIC_DEFAULT_MAX_TOKENS * 0.8 ) ,
373+ budget_tokens : DEFAULT_HYBRID_REASONING_MODEL_THINKING_TOKENS ,
357374 } ,
358375 } )
359376 } )
@@ -539,17 +556,15 @@ describe("getModelParams", () => {
539556 model,
540557 } )
541558
542- // Should keep model's maxTokens when using reasoning
543- expect ( result . maxTokens ) . toBe ( 8000 )
544- expect ( result . reasoningBudget ) . toBe ( 6400 ) // 80% of 8000
559+ expect ( result . maxTokens ) . toBe ( 16384 ) // Default value.
560+ expect ( result . reasoningBudget ) . toBe ( 8192 ) // Default value.
545561 } )
546562 } )
547563
548564 describe ( "Edge cases and combinations" , ( ) => {
549565 it ( "should handle model with both reasoning capabilities but only one enabled" , ( ) => {
550566 const model : ModelInfo = {
551567 ...baseModel ,
552- maxTokens : 4000 ,
553568 supportsReasoningBudget : true ,
554569 supportsReasoningEffort : true ,
555570 reasoningEffort : "medium" ,
@@ -558,7 +573,7 @@ describe("getModelParams", () => {
558573 // Only reasoning budget should be used (takes precedence)
559574 const result = getModelParams ( {
560575 ...anthropicParams ,
561- settings : { enableReasoningEffort : true } ,
576+ settings : { enableReasoningEffort : true , modelMaxTokens : 4000 } ,
562577 model,
563578 } )
564579
@@ -585,18 +600,17 @@ describe("getModelParams", () => {
585600 it ( "should handle very small maxTokens for reasoning budget models" , ( ) => {
586601 const model : ModelInfo = {
587602 ...baseModel ,
588- maxTokens : 1000 , // Less than minimum reasoning budget
589603 requiredReasoningBudget : true ,
590604 }
591605
592606 const result = getModelParams ( {
593607 ...anthropicParams ,
594- settings : { } ,
608+ settings : { modelMaxTokens : 1000 } , // Less than minimum reasoning budget.
595609 model,
596610 } )
597611
598612 expect ( result . maxTokens ) . toBe ( 1000 )
599- expect ( result . reasoningBudget ) . toBe ( 1024 ) // Clamped to minimum
613+ expect ( result . reasoningBudget ) . toBe ( 1024 ) // Clamped to minimum.
600614 } )
601615
602616 it ( "should handle undefined settings" , ( ) => {
@@ -694,13 +708,12 @@ describe("getModelParams", () => {
694708 it ( "should return correct reasoning format for openrouter with reasoning budget" , ( ) => {
695709 const model : ModelInfo = {
696710 ...baseModel ,
697- maxTokens : 4000 ,
698711 requiredReasoningBudget : true ,
699712 }
700713
701714 const result = getModelParams ( {
702715 ...openrouterParams ,
703- settings : { } ,
716+ settings : { modelMaxTokens : 4000 } ,
704717 model,
705718 } )
706719
0 commit comments