Skip to content

Commit 7457a24

Browse files
committed
fix: migrate all OpenAI models to Responses API with proper image support
- Unified all OpenAI models to use /v1/responses endpoint - Fixed image handling using structured format (input_text/input_image) - Added supportsTemperature capability to ModelInfo - Configured temperature support for each model (disabled for GPT-5, o1/o3/o4, codex-mini) - Removed all model-specific handlers and GPT-5 references - Updated package.json to use OpenAI SDK v5.12.2 Fixes #7012 - OpenAI image attachments not working
1 parent ca6f261 commit 7457a24

File tree

5 files changed

+139
-292
lines changed

5 files changed

+139
-292
lines changed

packages/types/src/model.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ export const modelInfoSchema = z.object({
4747
// Capability flag to indicate whether the model supports an output verbosity parameter
4848
supportsVerbosity: z.boolean().optional(),
4949
supportsReasoningBudget: z.boolean().optional(),
50+
// Capability flag to indicate whether the model supports temperature parameter
51+
supportsTemperature: z.boolean().optional(),
5052
requiredReasoningBudget: z.boolean().optional(),
5153
supportsReasoningEffort: z.boolean().optional(),
5254
supportedParameters: z.array(modelParametersSchema).optional(),

packages/types/src/providers/openai.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ export const openAiNativeModels = {
1919
description: "GPT-5: The best model for coding and agentic tasks across domains",
2020
// supportsVerbosity is a new capability; ensure ModelInfo includes it
2121
supportsVerbosity: true,
22+
supportsTemperature: false,
2223
},
2324
"gpt-5-mini-2025-08-07": {
2425
maxTokens: 128000,
@@ -32,6 +33,7 @@ export const openAiNativeModels = {
3233
cacheReadsPrice: 0.03,
3334
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
3435
supportsVerbosity: true,
36+
supportsTemperature: false,
3537
},
3638
"gpt-5-nano-2025-08-07": {
3739
maxTokens: 128000,
@@ -45,6 +47,7 @@ export const openAiNativeModels = {
4547
cacheReadsPrice: 0.01,
4648
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
4749
supportsVerbosity: true,
50+
supportsTemperature: false,
4851
},
4952
"gpt-4.1": {
5053
maxTokens: 32_768,
@@ -54,6 +57,7 @@ export const openAiNativeModels = {
5457
inputPrice: 2,
5558
outputPrice: 8,
5659
cacheReadsPrice: 0.5,
60+
supportsTemperature: true,
5761
},
5862
"gpt-4.1-mini": {
5963
maxTokens: 32_768,
@@ -63,6 +67,7 @@ export const openAiNativeModels = {
6367
inputPrice: 0.4,
6468
outputPrice: 1.6,
6569
cacheReadsPrice: 0.1,
70+
supportsTemperature: true,
6671
},
6772
"gpt-4.1-nano": {
6873
maxTokens: 32_768,
@@ -72,6 +77,7 @@ export const openAiNativeModels = {
7277
inputPrice: 0.1,
7378
outputPrice: 0.4,
7479
cacheReadsPrice: 0.025,
80+
supportsTemperature: true,
7581
},
7682
o3: {
7783
maxTokens: 100_000,
@@ -83,6 +89,7 @@ export const openAiNativeModels = {
8389
cacheReadsPrice: 0.5,
8490
supportsReasoningEffort: true,
8591
reasoningEffort: "medium",
92+
supportsTemperature: false,
8693
},
8794
"o3-high": {
8895
maxTokens: 100_000,
@@ -93,6 +100,7 @@ export const openAiNativeModels = {
93100
outputPrice: 8.0,
94101
cacheReadsPrice: 0.5,
95102
reasoningEffort: "high",
103+
supportsTemperature: false,
96104
},
97105
"o3-low": {
98106
maxTokens: 100_000,
@@ -103,6 +111,7 @@ export const openAiNativeModels = {
103111
outputPrice: 8.0,
104112
cacheReadsPrice: 0.5,
105113
reasoningEffort: "low",
114+
supportsTemperature: false,
106115
},
107116
"o4-mini": {
108117
maxTokens: 100_000,
@@ -114,6 +123,7 @@ export const openAiNativeModels = {
114123
cacheReadsPrice: 0.275,
115124
supportsReasoningEffort: true,
116125
reasoningEffort: "medium",
126+
supportsTemperature: false,
117127
},
118128
"o4-mini-high": {
119129
maxTokens: 100_000,
@@ -124,6 +134,7 @@ export const openAiNativeModels = {
124134
outputPrice: 4.4,
125135
cacheReadsPrice: 0.275,
126136
reasoningEffort: "high",
137+
supportsTemperature: false,
127138
},
128139
"o4-mini-low": {
129140
maxTokens: 100_000,
@@ -134,6 +145,7 @@ export const openAiNativeModels = {
134145
outputPrice: 4.4,
135146
cacheReadsPrice: 0.275,
136147
reasoningEffort: "low",
148+
supportsTemperature: false,
137149
},
138150
"o3-mini": {
139151
maxTokens: 100_000,
@@ -145,6 +157,7 @@ export const openAiNativeModels = {
145157
cacheReadsPrice: 0.55,
146158
supportsReasoningEffort: true,
147159
reasoningEffort: "medium",
160+
supportsTemperature: false,
148161
},
149162
"o3-mini-high": {
150163
maxTokens: 100_000,
@@ -155,6 +168,7 @@ export const openAiNativeModels = {
155168
outputPrice: 4.4,
156169
cacheReadsPrice: 0.55,
157170
reasoningEffort: "high",
171+
supportsTemperature: false,
158172
},
159173
"o3-mini-low": {
160174
maxTokens: 100_000,
@@ -165,6 +179,7 @@ export const openAiNativeModels = {
165179
outputPrice: 4.4,
166180
cacheReadsPrice: 0.55,
167181
reasoningEffort: "low",
182+
supportsTemperature: false,
168183
},
169184
o1: {
170185
maxTokens: 100_000,
@@ -174,6 +189,7 @@ export const openAiNativeModels = {
174189
inputPrice: 15,
175190
outputPrice: 60,
176191
cacheReadsPrice: 7.5,
192+
supportsTemperature: false,
177193
},
178194
"o1-preview": {
179195
maxTokens: 32_768,
@@ -183,6 +199,7 @@ export const openAiNativeModels = {
183199
inputPrice: 15,
184200
outputPrice: 60,
185201
cacheReadsPrice: 7.5,
202+
supportsTemperature: false,
186203
},
187204
"o1-mini": {
188205
maxTokens: 65_536,
@@ -192,6 +209,7 @@ export const openAiNativeModels = {
192209
inputPrice: 1.1,
193210
outputPrice: 4.4,
194211
cacheReadsPrice: 0.55,
212+
supportsTemperature: false,
195213
},
196214
"gpt-4o": {
197215
maxTokens: 16_384,
@@ -201,6 +219,7 @@ export const openAiNativeModels = {
201219
inputPrice: 2.5,
202220
outputPrice: 10,
203221
cacheReadsPrice: 1.25,
222+
supportsTemperature: true,
204223
},
205224
"gpt-4o-mini": {
206225
maxTokens: 16_384,
@@ -210,6 +229,7 @@ export const openAiNativeModels = {
210229
inputPrice: 0.15,
211230
outputPrice: 0.6,
212231
cacheReadsPrice: 0.075,
232+
supportsTemperature: true,
213233
},
214234
"codex-mini-latest": {
215235
maxTokens: 16_384,
@@ -219,6 +239,7 @@ export const openAiNativeModels = {
219239
inputPrice: 1.5,
220240
outputPrice: 6,
221241
cacheReadsPrice: 0,
242+
supportsTemperature: false,
222243
description:
223244
"Codex Mini: Cloud-based software engineering agent powered by codex-1, a version of o3 optimized for coding tasks. Trained with reinforcement learning to generate human-style code, adhere to instructions, and iteratively run tests.",
224245
},

pnpm-lock.yaml

Lines changed: 5 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)