Skip to content

Commit f47d739

Browse files
committed
never silently use different model
1 parent 2a7f52f commit f47d739

File tree

12 files changed

+32
-37
lines changed

12 files changed

+32
-37
lines changed

apps/sim/providers/anthropic/index.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ export const anthropicProvider: ProviderConfig = {
5858
throw new Error('API key is required for Anthropic')
5959
}
6060

61-
const modelId = request.model || 'claude-3-7-sonnet-20250219'
61+
const modelId = request.model
6262
const useNativeStructuredOutputs = !!(
6363
request.responseFormat && supportsNativeStructuredOutputs(modelId)
6464
)
@@ -174,7 +174,7 @@ export const anthropicProvider: ProviderConfig = {
174174
}
175175

176176
const payload: any = {
177-
model: request.model || 'claude-3-7-sonnet-20250219',
177+
model: request.model,
178178
messages,
179179
system: systemPrompt,
180180
max_tokens: Number.parseInt(String(request.maxTokens)) || 1024,
@@ -608,7 +608,7 @@ export const anthropicProvider: ProviderConfig = {
608608
success: true,
609609
output: {
610610
content: '',
611-
model: request.model || 'claude-3-7-sonnet-20250219',
611+
model: request.model,
612612
tokens: {
613613
prompt: tokens.prompt,
614614
completion: tokens.completion,
@@ -990,7 +990,7 @@ export const anthropicProvider: ProviderConfig = {
990990
success: true,
991991
output: {
992992
content: '',
993-
model: request.model || 'claude-3-7-sonnet-20250219',
993+
model: request.model,
994994
tokens: {
995995
prompt: tokens.prompt,
996996
completion: tokens.completion,
@@ -1034,7 +1034,7 @@ export const anthropicProvider: ProviderConfig = {
10341034

10351035
return {
10361036
content,
1037-
model: request.model || 'claude-3-7-sonnet-20250219',
1037+
model: request.model,
10381038
tokens,
10391039
toolCalls:
10401040
toolCalls.length > 0

apps/sim/providers/azure-openai/index.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ export const azureOpenAIProvider: ProviderConfig = {
3939
request: ProviderRequest
4040
): Promise<ProviderResponse | StreamingExecution> => {
4141
logger.info('Preparing Azure OpenAI request', {
42-
model: request.model || 'azure/gpt-4o',
42+
model: request.model,
4343
hasSystemPrompt: !!request.systemPrompt,
4444
hasMessages: !!request.messages?.length,
4545
hasTools: !!request.tools?.length,
@@ -95,7 +95,7 @@ export const azureOpenAIProvider: ProviderConfig = {
9595
}))
9696
: undefined
9797

98-
const deploymentName = (request.model || 'azure/gpt-4o').replace('azure/', '')
98+
const deploymentName = request.model.replace('azure/', '')
9999
const payload: any = {
100100
model: deploymentName,
101101
messages: allMessages,

apps/sim/providers/cerebras/index.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ export const cerebrasProvider: ProviderConfig = {
7373
: undefined
7474

7575
const payload: any = {
76-
model: (request.model || 'cerebras/llama-3.3-70b').replace('cerebras/', ''),
76+
model: request.model.replace('cerebras/', ''),
7777
messages: allMessages,
7878
}
7979
if (request.temperature !== undefined) payload.temperature = request.temperature
@@ -145,7 +145,7 @@ export const cerebrasProvider: ProviderConfig = {
145145
success: true,
146146
output: {
147147
content: '',
148-
model: request.model || 'cerebras/llama-3.3-70b',
148+
model: request.model,
149149
tokens: { prompt: 0, completion: 0, total: 0 },
150150
toolCalls: undefined,
151151
providerTiming: {
@@ -470,7 +470,7 @@ export const cerebrasProvider: ProviderConfig = {
470470
success: true,
471471
output: {
472472
content: '',
473-
model: request.model || 'cerebras/llama-3.3-70b',
473+
model: request.model,
474474
tokens: {
475475
prompt: tokens.prompt,
476476
completion: tokens.completion,

apps/sim/providers/deepseek/index.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ export const deepseekProvider: ProviderConfig = {
105105
: toolChoice.type === 'any'
106106
? `force:${toolChoice.any?.name || 'unknown'}`
107107
: 'unknown',
108-
model: request.model || 'deepseek-v3',
108+
model: request.model,
109109
})
110110
}
111111
}
@@ -145,7 +145,7 @@ export const deepseekProvider: ProviderConfig = {
145145
success: true,
146146
output: {
147147
content: '',
148-
model: request.model || 'deepseek-chat',
148+
model: request.model,
149149
tokens: { prompt: 0, completion: 0, total: 0 },
150150
toolCalls: undefined,
151151
providerTiming: {
@@ -469,7 +469,7 @@ export const deepseekProvider: ProviderConfig = {
469469
success: true,
470470
output: {
471471
content: '',
472-
model: request.model || 'deepseek-chat',
472+
model: request.model,
473473
tokens: {
474474
prompt: tokens.prompt,
475475
completion: tokens.completion,

apps/sim/providers/google/index.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,15 @@ export const googleProvider: ProviderConfig = {
2828
throw new Error('API key is required for Google Gemini')
2929
}
3030

31-
const model = request.model || 'gemini-2.5-pro'
32-
33-
logger.info('Creating Google Gemini client', { model })
31+
logger.info('Creating Google Gemini client', { model: request.model })
3432

3533
// Create client with API key
3634
const ai = new GoogleGenAI({ apiKey: request.apiKey })
3735

3836
// Use shared execution logic
3937
return executeGeminiRequest({
4038
ai,
41-
model,
39+
model: request.model,
4240
request,
4341
providerType: 'google',
4442
})

apps/sim/providers/groq/index.ts

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -69,10 +69,7 @@ export const groqProvider: ProviderConfig = {
6969
: undefined
7070

7171
const payload: any = {
72-
model: (request.model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct').replace(
73-
'groq/',
74-
''
75-
),
72+
model: request.model.replace('groq/', ''),
7673
messages: allMessages,
7774
}
7875

@@ -109,7 +106,7 @@ export const groqProvider: ProviderConfig = {
109106
toolChoice: payload.tool_choice,
110107
forcedToolsCount: forcedTools.length,
111108
hasFilteredTools,
112-
model: request.model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct',
109+
model: request.model,
113110
})
114111
}
115112
}
@@ -149,7 +146,7 @@ export const groqProvider: ProviderConfig = {
149146
success: true,
150147
output: {
151148
content: '',
152-
model: request.model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct',
149+
model: request.model,
153150
tokens: { prompt: 0, completion: 0, total: 0 },
154151
toolCalls: undefined,
155152
providerTiming: {
@@ -425,7 +422,7 @@ export const groqProvider: ProviderConfig = {
425422
success: true,
426423
output: {
427424
content: '',
428-
model: request.model || 'groq/meta-llama/llama-4-scout-17b-16e-instruct',
425+
model: request.model,
429426
tokens: {
430427
prompt: tokens.prompt,
431428
completion: tokens.completion,

apps/sim/providers/mistral/index.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ export const mistralProvider: ProviderConfig = {
3636
request: ProviderRequest
3737
): Promise<ProviderResponse | StreamingExecution> => {
3838
logger.info('Preparing Mistral request', {
39-
model: request.model || 'mistral-large-latest',
39+
model: request.model,
4040
hasSystemPrompt: !!request.systemPrompt,
4141
hasMessages: !!request.messages?.length,
4242
hasTools: !!request.tools?.length,
@@ -86,7 +86,7 @@ export const mistralProvider: ProviderConfig = {
8686
: undefined
8787

8888
const payload: any = {
89-
model: request.model || 'mistral-large-latest',
89+
model: request.model,
9090
messages: allMessages,
9191
}
9292

@@ -126,7 +126,7 @@ export const mistralProvider: ProviderConfig = {
126126
: toolChoice.type === 'any'
127127
? `force:${toolChoice.any?.name || 'unknown'}`
128128
: 'unknown',
129-
model: request.model || 'mistral-large-latest',
129+
model: request.model,
130130
})
131131
}
132132
}

apps/sim/providers/openai/index.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ export const openaiProvider: ProviderConfig = {
3333
request: ProviderRequest
3434
): Promise<ProviderResponse | StreamingExecution> => {
3535
logger.info('Preparing OpenAI request', {
36-
model: request.model || 'gpt-4o',
36+
model: request.model,
3737
hasSystemPrompt: !!request.systemPrompt,
3838
hasMessages: !!request.messages?.length,
3939
hasTools: !!request.tools?.length,
@@ -76,7 +76,7 @@ export const openaiProvider: ProviderConfig = {
7676
: undefined
7777

7878
const payload: any = {
79-
model: request.model || 'gpt-4o',
79+
model: request.model,
8080
messages: allMessages,
8181
}
8282

@@ -121,7 +121,7 @@ export const openaiProvider: ProviderConfig = {
121121
: toolChoice.type === 'any'
122122
? `force:${toolChoice.any?.name || 'unknown'}`
123123
: 'unknown',
124-
model: request.model || 'gpt-4o',
124+
model: request.model,
125125
})
126126
}
127127
}

apps/sim/providers/openrouter/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ export const openRouterProvider: ProviderConfig = {
7878
baseURL: 'https://openrouter.ai/api/v1',
7979
})
8080

81-
const requestedModel = (request.model || '').replace(/^openrouter\//, '')
81+
const requestedModel = request.model.replace(/^openrouter\//, '')
8282

8383
logger.info('Preparing OpenRouter request', {
8484
model: requestedModel,

apps/sim/providers/vertex/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ export const vertexProvider: ProviderConfig = {
4646
}
4747

4848
// Strip 'vertex/' prefix from model name if present
49-
const model = (request.model || 'vertex/gemini-2.5-pro').replace('vertex/', '')
49+
const model = request.model.replace('vertex/', '')
5050

5151
logger.info('Creating Vertex AI client', {
5252
project: vertexProject,

0 commit comments

Comments
 (0)