File tree Expand file tree Collapse file tree 12 files changed +32
-37
lines changed
Expand file tree Collapse file tree 12 files changed +32
-37
lines changed Original file line number Diff line number Diff line change @@ -58,7 +58,7 @@ export const anthropicProvider: ProviderConfig = {
5858 throw new Error ( 'API key is required for Anthropic' )
5959 }
6060
61- const modelId = request . model || 'claude-3-7-sonnet-20250219'
61+ const modelId = request . model
6262 const useNativeStructuredOutputs = ! ! (
6363 request . responseFormat && supportsNativeStructuredOutputs ( modelId )
6464 )
@@ -174,7 +174,7 @@ export const anthropicProvider: ProviderConfig = {
174174 }
175175
176176 const payload : any = {
177- model : request . model || 'claude-3-7-sonnet-20250219' ,
177+ model : request . model ,
178178 messages,
179179 system : systemPrompt ,
180180 max_tokens : Number . parseInt ( String ( request . maxTokens ) ) || 1024 ,
@@ -608,7 +608,7 @@ export const anthropicProvider: ProviderConfig = {
608608 success : true ,
609609 output : {
610610 content : '' ,
611- model : request . model || 'claude-3-7-sonnet-20250219' ,
611+ model : request . model ,
612612 tokens : {
613613 prompt : tokens . prompt ,
614614 completion : tokens . completion ,
@@ -990,7 +990,7 @@ export const anthropicProvider: ProviderConfig = {
990990 success : true ,
991991 output : {
992992 content : '' ,
993- model : request . model || 'claude-3-7-sonnet-20250219' ,
993+ model : request . model ,
994994 tokens : {
995995 prompt : tokens . prompt ,
996996 completion : tokens . completion ,
@@ -1034,7 +1034,7 @@ export const anthropicProvider: ProviderConfig = {
10341034
10351035 return {
10361036 content,
1037- model : request . model || 'claude-3-7-sonnet-20250219' ,
1037+ model : request . model ,
10381038 tokens,
10391039 toolCalls :
10401040 toolCalls . length > 0
Original file line number Diff line number Diff line change @@ -39,7 +39,7 @@ export const azureOpenAIProvider: ProviderConfig = {
3939 request : ProviderRequest
4040 ) : Promise < ProviderResponse | StreamingExecution > => {
4141 logger . info ( 'Preparing Azure OpenAI request' , {
42- model : request . model || 'azure/gpt-4o' ,
42+ model : request . model ,
4343 hasSystemPrompt : ! ! request . systemPrompt ,
4444 hasMessages : ! ! request . messages ?. length ,
4545 hasTools : ! ! request . tools ?. length ,
@@ -95,7 +95,7 @@ export const azureOpenAIProvider: ProviderConfig = {
9595 } ) )
9696 : undefined
9797
98- const deploymentName = ( request . model || 'azure/gpt-4o' ) . replace ( 'azure/' , '' )
98+ const deploymentName = request . model . replace ( 'azure/' , '' )
9999 const payload : any = {
100100 model : deploymentName ,
101101 messages : allMessages ,
Original file line number Diff line number Diff line change @@ -73,7 +73,7 @@ export const cerebrasProvider: ProviderConfig = {
7373 : undefined
7474
7575 const payload : any = {
76- model : ( request . model || 'cerebras/llama-3.3-70b' ) . replace ( 'cerebras/' , '' ) ,
76+ model : request . model . replace ( 'cerebras/' , '' ) ,
7777 messages : allMessages ,
7878 }
7979 if ( request . temperature !== undefined ) payload . temperature = request . temperature
@@ -145,7 +145,7 @@ export const cerebrasProvider: ProviderConfig = {
145145 success : true ,
146146 output : {
147147 content : '' ,
148- model : request . model || 'cerebras/llama-3.3-70b' ,
148+ model : request . model ,
149149 tokens : { prompt : 0 , completion : 0 , total : 0 } ,
150150 toolCalls : undefined ,
151151 providerTiming : {
@@ -470,7 +470,7 @@ export const cerebrasProvider: ProviderConfig = {
470470 success : true ,
471471 output : {
472472 content : '' ,
473- model : request . model || 'cerebras/llama-3.3-70b' ,
473+ model : request . model ,
474474 tokens : {
475475 prompt : tokens . prompt ,
476476 completion : tokens . completion ,
Original file line number Diff line number Diff line change @@ -105,7 +105,7 @@ export const deepseekProvider: ProviderConfig = {
105105 : toolChoice . type === 'any'
106106 ? `force:${ toolChoice . any ?. name || 'unknown' } `
107107 : 'unknown' ,
108- model : request . model || 'deepseek-v3' ,
108+ model : request . model ,
109109 } )
110110 }
111111 }
@@ -145,7 +145,7 @@ export const deepseekProvider: ProviderConfig = {
145145 success : true ,
146146 output : {
147147 content : '' ,
148- model : request . model || 'deepseek-chat' ,
148+ model : request . model ,
149149 tokens : { prompt : 0 , completion : 0 , total : 0 } ,
150150 toolCalls : undefined ,
151151 providerTiming : {
@@ -469,7 +469,7 @@ export const deepseekProvider: ProviderConfig = {
469469 success : true ,
470470 output : {
471471 content : '' ,
472- model : request . model || 'deepseek-chat' ,
472+ model : request . model ,
473473 tokens : {
474474 prompt : tokens . prompt ,
475475 completion : tokens . completion ,
Original file line number Diff line number Diff line change @@ -28,17 +28,15 @@ export const googleProvider: ProviderConfig = {
2828 throw new Error ( 'API key is required for Google Gemini' )
2929 }
3030
31- const model = request . model || 'gemini-2.5-pro'
32-
33- logger . info ( 'Creating Google Gemini client' , { model } )
31+ logger . info ( 'Creating Google Gemini client' , { model : request . model } )
3432
3533 // Create client with API key
3634 const ai = new GoogleGenAI ( { apiKey : request . apiKey } )
3735
3836 // Use shared execution logic
3937 return executeGeminiRequest ( {
4038 ai,
41- model,
39+ model : request . model ,
4240 request,
4341 providerType : 'google' ,
4442 } )
Original file line number Diff line number Diff line change @@ -69,10 +69,7 @@ export const groqProvider: ProviderConfig = {
6969 : undefined
7070
7171 const payload : any = {
72- model : ( request . model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct' ) . replace (
73- 'groq/' ,
74- ''
75- ) ,
72+ model : request . model . replace ( 'groq/' , '' ) ,
7673 messages : allMessages ,
7774 }
7875
@@ -109,7 +106,7 @@ export const groqProvider: ProviderConfig = {
109106 toolChoice : payload . tool_choice ,
110107 forcedToolsCount : forcedTools . length ,
111108 hasFilteredTools,
112- model : request . model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct' ,
109+ model : request . model ,
113110 } )
114111 }
115112 }
@@ -149,7 +146,7 @@ export const groqProvider: ProviderConfig = {
149146 success : true ,
150147 output : {
151148 content : '' ,
152- model : request . model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct' ,
149+ model : request . model ,
153150 tokens : { prompt : 0 , completion : 0 , total : 0 } ,
154151 toolCalls : undefined ,
155152 providerTiming : {
@@ -425,7 +422,7 @@ export const groqProvider: ProviderConfig = {
425422 success : true ,
426423 output : {
427424 content : '' ,
428- model : request . model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct' ,
425+ model : request . model ,
429426 tokens : {
430427 prompt : tokens . prompt ,
431428 completion : tokens . completion ,
Original file line number Diff line number Diff line change @@ -36,7 +36,7 @@ export const mistralProvider: ProviderConfig = {
3636 request : ProviderRequest
3737 ) : Promise < ProviderResponse | StreamingExecution > => {
3838 logger . info ( 'Preparing Mistral request' , {
39- model : request . model || 'mistral-large-latest' ,
39+ model : request . model ,
4040 hasSystemPrompt : ! ! request . systemPrompt ,
4141 hasMessages : ! ! request . messages ?. length ,
4242 hasTools : ! ! request . tools ?. length ,
@@ -86,7 +86,7 @@ export const mistralProvider: ProviderConfig = {
8686 : undefined
8787
8888 const payload : any = {
89- model : request . model || 'mistral-large-latest' ,
89+ model : request . model ,
9090 messages : allMessages ,
9191 }
9292
@@ -126,7 +126,7 @@ export const mistralProvider: ProviderConfig = {
126126 : toolChoice . type === 'any'
127127 ? `force:${ toolChoice . any ?. name || 'unknown' } `
128128 : 'unknown' ,
129- model : request . model || 'mistral-large-latest' ,
129+ model : request . model ,
130130 } )
131131 }
132132 }
Original file line number Diff line number Diff line change @@ -33,7 +33,7 @@ export const openaiProvider: ProviderConfig = {
3333 request : ProviderRequest
3434 ) : Promise < ProviderResponse | StreamingExecution > => {
3535 logger . info ( 'Preparing OpenAI request' , {
36- model : request . model || 'gpt-4o' ,
36+ model : request . model ,
3737 hasSystemPrompt : ! ! request . systemPrompt ,
3838 hasMessages : ! ! request . messages ?. length ,
3939 hasTools : ! ! request . tools ?. length ,
@@ -76,7 +76,7 @@ export const openaiProvider: ProviderConfig = {
7676 : undefined
7777
7878 const payload : any = {
79- model : request . model || 'gpt-4o' ,
79+ model : request . model ,
8080 messages : allMessages ,
8181 }
8282
@@ -121,7 +121,7 @@ export const openaiProvider: ProviderConfig = {
121121 : toolChoice . type === 'any'
122122 ? `force:${ toolChoice . any ?. name || 'unknown' } `
123123 : 'unknown' ,
124- model : request . model || 'gpt-4o' ,
124+ model : request . model ,
125125 } )
126126 }
127127 }
Original file line number Diff line number Diff line change @@ -78,7 +78,7 @@ export const openRouterProvider: ProviderConfig = {
7878 baseURL : 'https://openrouter.ai/api/v1' ,
7979 } )
8080
81- const requestedModel = ( request . model || '' ) . replace ( / ^ o p e n r o u t e r \/ / , '' )
81+ const requestedModel = request . model . replace ( / ^ o p e n r o u t e r \/ / , '' )
8282
8383 logger . info ( 'Preparing OpenRouter request' , {
8484 model : requestedModel ,
Original file line number Diff line number Diff line change @@ -46,7 +46,7 @@ export const vertexProvider: ProviderConfig = {
4646 }
4747
4848 // Strip 'vertex/' prefix from model name if present
49- const model = ( request . model || 'vertex/gemini-2.5-pro' ) . replace ( 'vertex/' , '' )
49+ const model = request . model . replace ( 'vertex/' , '' )
5050
5151 logger . info ( 'Creating Vertex AI client' , {
5252 project : vertexProject ,
You can’t perform that action at this time.
0 commit comments