Skip to content

Commit f8070f9

Browse files
authored
feat(models): added gpt-5.1 (#2007)
1 parent bc8947c commit f8070f9

File tree

4 files changed

+178
-4
lines changed

4 files changed

+178
-4
lines changed

apps/docs/content/docs/en/blocks/agent.mdx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,10 @@ The user prompt represents the primary input data for inference processing. This
4242

4343
The Agent block supports multiple LLM providers through a unified inference interface. Available models include:
4444

45-
- **OpenAI**: GPT-5, GPT-4o, o1, o3, o4-mini, gpt-4.1
46-
- **Anthropic**: Claude 3.7 Sonnet
45+
- **OpenAI**: GPT-5.1, GPT-5, GPT-4o, o1, o3, o4-mini, gpt-4.1
46+
- **Anthropic**: Claude 4.5 Sonnet, Claude Opus 4.1
4747
- **Google**: Gemini 2.5 Pro, Gemini 2.0 Flash
48-
- **Other Providers**: Groq, Cerebras, xAI, DeepSeek
48+
- **Other Providers**: Groq, Cerebras, xAI, Azure OpenAI, OpenRouter
4949
- **Local Models**: Ollama-compatible models
5050

5151
### Temperature

apps/sim/blocks/blocks/agent.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ Create a system prompt appropriately detailed for the request, using clear langu
210210
type: 'dropdown',
211211
placeholder: 'Select reasoning effort...',
212212
options: [
213+
{ label: 'none', id: 'none' },
213214
{ label: 'minimal', id: 'minimal' },
214215
{ label: 'low', id: 'low' },
215216
{ label: 'medium', id: 'medium' },

apps/sim/providers/models.ts

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,74 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
101101
temperature: { min: 0, max: 2 },
102102
},
103103
},
104+
{
105+
id: 'gpt-5.1',
106+
pricing: {
107+
input: 1.25,
108+
cachedInput: 0.125,
109+
output: 10.0,
110+
updatedAt: '2025-11-14',
111+
},
112+
capabilities: {
113+
reasoningEffort: {
114+
values: ['none', 'low', 'medium', 'high'],
115+
},
116+
verbosity: {
117+
values: ['low', 'medium', 'high'],
118+
},
119+
},
120+
},
121+
{
122+
id: 'gpt-5.1-mini',
123+
pricing: {
124+
input: 0.25,
125+
cachedInput: 0.025,
126+
output: 2.0,
127+
updatedAt: '2025-11-14',
128+
},
129+
capabilities: {
130+
reasoningEffort: {
131+
values: ['none', 'low', 'medium', 'high'],
132+
},
133+
verbosity: {
134+
values: ['low', 'medium', 'high'],
135+
},
136+
},
137+
},
138+
{
139+
id: 'gpt-5.1-nano',
140+
pricing: {
141+
input: 0.05,
142+
cachedInput: 0.005,
143+
output: 0.4,
144+
updatedAt: '2025-11-14',
145+
},
146+
capabilities: {
147+
reasoningEffort: {
148+
values: ['none', 'low', 'medium', 'high'],
149+
},
150+
verbosity: {
151+
values: ['low', 'medium', 'high'],
152+
},
153+
},
154+
},
155+
{
156+
id: 'gpt-5.1-codex',
157+
pricing: {
158+
input: 1.25,
159+
cachedInput: 0.125,
160+
output: 10.0,
161+
updatedAt: '2025-11-14',
162+
},
163+
capabilities: {
164+
reasoningEffort: {
165+
values: ['none', 'medium', 'high'],
166+
},
167+
verbosity: {
168+
values: ['low', 'medium', 'high'],
169+
},
170+
},
171+
},
104172
{
105173
id: 'gpt-5',
106174
pricing: {
@@ -253,6 +321,74 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
253321
temperature: { min: 0, max: 2 },
254322
},
255323
},
324+
{
325+
id: 'azure/gpt-5.1',
326+
pricing: {
327+
input: 1.25,
328+
cachedInput: 0.125,
329+
output: 10.0,
330+
updatedAt: '2025-11-14',
331+
},
332+
capabilities: {
333+
reasoningEffort: {
334+
values: ['none', 'low', 'medium', 'high'],
335+
},
336+
verbosity: {
337+
values: ['low', 'medium', 'high'],
338+
},
339+
},
340+
},
341+
{
342+
id: 'azure/gpt-5.1-mini',
343+
pricing: {
344+
input: 0.25,
345+
cachedInput: 0.025,
346+
output: 2.0,
347+
updatedAt: '2025-11-14',
348+
},
349+
capabilities: {
350+
reasoningEffort: {
351+
values: ['none', 'low', 'medium', 'high'],
352+
},
353+
verbosity: {
354+
values: ['low', 'medium', 'high'],
355+
},
356+
},
357+
},
358+
{
359+
id: 'azure/gpt-5.1-nano',
360+
pricing: {
361+
input: 0.05,
362+
cachedInput: 0.005,
363+
output: 0.4,
364+
updatedAt: '2025-11-14',
365+
},
366+
capabilities: {
367+
reasoningEffort: {
368+
values: ['none', 'low', 'medium', 'high'],
369+
},
370+
verbosity: {
371+
values: ['low', 'medium', 'high'],
372+
},
373+
},
374+
},
375+
{
376+
id: 'azure/gpt-5.1-codex',
377+
pricing: {
378+
input: 1.25,
379+
cachedInput: 0.125,
380+
output: 10.0,
381+
updatedAt: '2025-11-14',
382+
},
383+
capabilities: {
384+
reasoningEffort: {
385+
values: ['none', 'medium', 'high'],
386+
},
387+
verbosity: {
388+
values: ['low', 'medium', 'high'],
389+
},
390+
},
391+
},
256392
{
257393
id: 'azure/gpt-5',
258394
pricing: {

apps/sim/providers/utils.test.ts

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ const mockGetRotatingApiKey = vi.fn().mockReturnValue('rotating-server-key')
3535
const originalRequire = module.require
3636

3737
describe('getApiKey', () => {
38-
// Save original env and reset between tests
3938
const originalEnv = { ...process.env }
4039

4140
beforeEach(() => {
@@ -146,6 +145,15 @@ describe('Model Capabilities', () => {
146145
'deepseek-chat',
147146
'azure/gpt-4.1',
148147
'azure/model-router',
148+
// GPT-5.1 models don't support temperature (removed in our implementation)
149+
'gpt-5.1',
150+
'gpt-5.1-mini',
151+
'gpt-5.1-nano',
152+
'gpt-5.1-codex',
153+
'azure/gpt-5.1',
154+
'azure/gpt-5.1-mini',
155+
'azure/gpt-5.1-nano',
156+
'azure/gpt-5.1-codex',
149157
// GPT-5 models don't support temperature (removed in our implementation)
150158
'gpt-5',
151159
'gpt-5-mini',
@@ -218,6 +226,15 @@ describe('Model Capabilities', () => {
218226
expect(getMaxTemperature('azure/o3')).toBeUndefined()
219227
expect(getMaxTemperature('azure/o4-mini')).toBeUndefined()
220228
expect(getMaxTemperature('deepseek-r1')).toBeUndefined()
229+
// GPT-5.1 models don't support temperature
230+
expect(getMaxTemperature('gpt-5.1')).toBeUndefined()
231+
expect(getMaxTemperature('gpt-5.1-mini')).toBeUndefined()
232+
expect(getMaxTemperature('gpt-5.1-nano')).toBeUndefined()
233+
expect(getMaxTemperature('gpt-5.1-codex')).toBeUndefined()
234+
expect(getMaxTemperature('azure/gpt-5.1')).toBeUndefined()
235+
expect(getMaxTemperature('azure/gpt-5.1-mini')).toBeUndefined()
236+
expect(getMaxTemperature('azure/gpt-5.1-nano')).toBeUndefined()
237+
expect(getMaxTemperature('azure/gpt-5.1-codex')).toBeUndefined()
221238
// GPT-5 models don't support temperature
222239
expect(getMaxTemperature('gpt-5')).toBeUndefined()
223240
expect(getMaxTemperature('gpt-5-mini')).toBeUndefined()
@@ -306,6 +323,16 @@ describe('Model Capabilities', () => {
306323
)
307324

308325
it.concurrent('should have correct models in MODELS_WITH_REASONING_EFFORT', () => {
326+
// Should contain GPT-5.1 models that support reasoning effort
327+
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1')
328+
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1-mini')
329+
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1-nano')
330+
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1-codex')
331+
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1')
332+
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-mini')
333+
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-nano')
334+
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-codex')
335+
309336
// Should contain GPT-5 models that support reasoning effort
310337
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5')
311338
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-mini')
@@ -325,6 +352,16 @@ describe('Model Capabilities', () => {
325352
})
326353

327354
it.concurrent('should have correct models in MODELS_WITH_VERBOSITY', () => {
355+
// Should contain GPT-5.1 models that support verbosity
356+
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1')
357+
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1-mini')
358+
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1-nano')
359+
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1-codex')
360+
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1')
361+
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-mini')
362+
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-nano')
363+
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-codex')
364+
328365
// Should contain GPT-5 models that support verbosity
329366
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5')
330367
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-mini')

0 commit comments

Comments
 (0)