@@ -267,76 +267,6 @@ describe("ProviderTransform.maxOutputTokens", () => {
267267 expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
268268 } )
269269 } )
270-
271- describe ( "openai-compatible with thinking options (snake_case)" , ( ) => {
272- test ( "returns 32k when budget_tokens + 32k <= modelLimit" , ( ) => {
273- const modelLimit = 100000
274- const options = {
275- thinking : {
276- type : "enabled" ,
277- budget_tokens : 10000 ,
278- } ,
279- }
280- const result = ProviderTransform . maxOutputTokens (
281- "@ai-sdk/openai-compatible" ,
282- options ,
283- modelLimit ,
284- OUTPUT_TOKEN_MAX ,
285- )
286- expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
287- } )
288-
289- test ( "returns modelLimit - budget_tokens when budget_tokens + 32k > modelLimit" , ( ) => {
290- const modelLimit = 50000
291- const options = {
292- thinking : {
293- type : "enabled" ,
294- budget_tokens : 30000 ,
295- } ,
296- }
297- const result = ProviderTransform . maxOutputTokens (
298- "@ai-sdk/openai-compatible" ,
299- options ,
300- modelLimit ,
301- OUTPUT_TOKEN_MAX ,
302- )
303- expect ( result ) . toBe ( 20000 )
304- } )
305-
306- test ( "returns 32k when thinking type is not enabled" , ( ) => {
307- const modelLimit = 100000
308- const options = {
309- thinking : {
310- type : "disabled" ,
311- budget_tokens : 10000 ,
312- } ,
313- }
314- const result = ProviderTransform . maxOutputTokens (
315- "@ai-sdk/openai-compatible" ,
316- options ,
317- modelLimit ,
318- OUTPUT_TOKEN_MAX ,
319- )
320- expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
321- } )
322-
323- test ( "returns 32k when budget_tokens is 0" , ( ) => {
324- const modelLimit = 100000
325- const options = {
326- thinking : {
327- type : "enabled" ,
328- budget_tokens : 0 ,
329- } ,
330- }
331- const result = ProviderTransform . maxOutputTokens (
332- "@ai-sdk/openai-compatible" ,
333- options ,
334- modelLimit ,
335- OUTPUT_TOKEN_MAX ,
336- )
337- expect ( result ) . toBe ( OUTPUT_TOKEN_MAX )
338- } )
339- } )
340270} )
341271
342272describe ( "ProviderTransform.schema - gemini array items" , ( ) => {
@@ -1564,67 +1494,6 @@ describe("ProviderTransform.variants", () => {
15641494 expect ( result . low ) . toEqual ( { reasoningEffort : "low" } )
15651495 expect ( result . high ) . toEqual ( { reasoningEffort : "high" } )
15661496 } )
1567-
1568- test ( "Claude via LiteLLM returns thinking with snake_case budget_tokens" , ( ) => {
1569- const model = createMockModel ( {
1570- id : "anthropic/claude-sonnet-4-5" ,
1571- providerID : "anthropic" ,
1572- api : {
1573- id : "claude-sonnet-4-5-20250929" ,
1574- url : "http://localhost:4000" ,
1575- npm : "@ai-sdk/openai-compatible" ,
1576- } ,
1577- } )
1578- const result = ProviderTransform . variants ( model )
1579- expect ( Object . keys ( result ) ) . toEqual ( [ "high" , "max" ] )
1580- expect ( result . high ) . toEqual ( {
1581- thinking : {
1582- type : "enabled" ,
1583- budget_tokens : 16000 ,
1584- } ,
1585- } )
1586- expect ( result . max ) . toEqual ( {
1587- thinking : {
1588- type : "enabled" ,
1589- budget_tokens : 31999 ,
1590- } ,
1591- } )
1592- } )
1593-
1594- test ( "Claude model (by model.id) via openai-compatible uses snake_case" , ( ) => {
1595- const model = createMockModel ( {
1596- id : "litellm/claude-3-opus" ,
1597- providerID : "litellm" ,
1598- api : {
1599- id : "claude-3-opus-20240229" ,
1600- url : "http://localhost:4000" ,
1601- npm : "@ai-sdk/openai-compatible" ,
1602- } ,
1603- } )
1604- const result = ProviderTransform . variants ( model )
1605- expect ( Object . keys ( result ) ) . toEqual ( [ "high" , "max" ] )
1606- expect ( result . high ) . toEqual ( {
1607- thinking : {
1608- type : "enabled" ,
1609- budget_tokens : 16000 ,
1610- } ,
1611- } )
1612- } )
1613-
1614- test ( "Anthropic model (by model.api.id) via openai-compatible uses snake_case" , ( ) => {
1615- const model = createMockModel ( {
1616- id : "custom/my-model" ,
1617- providerID : "custom" ,
1618- api : {
1619- id : "anthropic.claude-sonnet" ,
1620- url : "http://localhost:4000" ,
1621- npm : "@ai-sdk/openai-compatible" ,
1622- } ,
1623- } )
1624- const result = ProviderTransform . variants ( model )
1625- expect ( Object . keys ( result ) ) . toEqual ( [ "high" , "max" ] )
1626- expect ( result . high . thinking . budget_tokens ) . toBe ( 16000 )
1627- } )
16281497 } )
16291498
16301499 describe ( "@ai-sdk/azure" , ( ) => {
0 commit comments