Skip to content

Commit aa68b76

Browse files
committed
fix: migrate all OpenAI models to Responses API with proper image support
- Unified all OpenAI models to use /v1/responses endpoint - Fixed image handling using structured format (input_text/input_image) - Added supportsTemperature capability to ModelInfo - Configured temperature support for each model (disabled for GPT-5, o1/o3/o4, codex-mini) - Removed all model-specific handlers and GPT-5 references - Updated package.json to use OpenAI SDK v5.12.2 Fixes #7012 - OpenAI image attachments not working
1 parent b975ced commit aa68b76

File tree

5 files changed

+139
-292
lines changed

5 files changed

+139
-292
lines changed

packages/types/src/model.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ export const modelInfoSchema = z.object({
4747
// Capability flag to indicate whether the model supports an output verbosity parameter
4848
supportsVerbosity: z.boolean().optional(),
4949
supportsReasoningBudget: z.boolean().optional(),
50+
// Capability flag to indicate whether the model supports temperature parameter
51+
supportsTemperature: z.boolean().optional(),
5052
requiredReasoningBudget: z.boolean().optional(),
5153
supportsReasoningEffort: z.boolean().optional(),
5254
supportedParameters: z.array(modelParametersSchema).optional(),

packages/types/src/providers/openai.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ export const openAiNativeModels = {
3131
description: "GPT-5: The best model for coding and agentic tasks across domains",
3232
// supportsVerbosity is a new capability; ensure ModelInfo includes it
3333
supportsVerbosity: true,
34+
supportsTemperature: false,
3435
},
3536
"gpt-5-mini-2025-08-07": {
3637
maxTokens: 128000,
@@ -44,6 +45,7 @@ export const openAiNativeModels = {
4445
cacheReadsPrice: 0.03,
4546
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
4647
supportsVerbosity: true,
48+
supportsTemperature: false,
4749
},
4850
"gpt-5-nano-2025-08-07": {
4951
maxTokens: 128000,
@@ -57,6 +59,7 @@ export const openAiNativeModels = {
5759
cacheReadsPrice: 0.01,
5860
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
5961
supportsVerbosity: true,
62+
supportsTemperature: false,
6063
},
6164
"gpt-4.1": {
6265
maxTokens: 32_768,
@@ -66,6 +69,7 @@ export const openAiNativeModels = {
6669
inputPrice: 2,
6770
outputPrice: 8,
6871
cacheReadsPrice: 0.5,
72+
supportsTemperature: true,
6973
},
7074
"gpt-4.1-mini": {
7175
maxTokens: 32_768,
@@ -75,6 +79,7 @@ export const openAiNativeModels = {
7579
inputPrice: 0.4,
7680
outputPrice: 1.6,
7781
cacheReadsPrice: 0.1,
82+
supportsTemperature: true,
7883
},
7984
"gpt-4.1-nano": {
8085
maxTokens: 32_768,
@@ -84,6 +89,7 @@ export const openAiNativeModels = {
8489
inputPrice: 0.1,
8590
outputPrice: 0.4,
8691
cacheReadsPrice: 0.025,
92+
supportsTemperature: true,
8793
},
8894
o3: {
8995
maxTokens: 100_000,
@@ -95,6 +101,7 @@ export const openAiNativeModels = {
95101
cacheReadsPrice: 0.5,
96102
supportsReasoningEffort: true,
97103
reasoningEffort: "medium",
104+
supportsTemperature: false,
98105
},
99106
"o3-high": {
100107
maxTokens: 100_000,
@@ -105,6 +112,7 @@ export const openAiNativeModels = {
105112
outputPrice: 8.0,
106113
cacheReadsPrice: 0.5,
107114
reasoningEffort: "high",
115+
supportsTemperature: false,
108116
},
109117
"o3-low": {
110118
maxTokens: 100_000,
@@ -115,6 +123,7 @@ export const openAiNativeModels = {
115123
outputPrice: 8.0,
116124
cacheReadsPrice: 0.5,
117125
reasoningEffort: "low",
126+
supportsTemperature: false,
118127
},
119128
"o4-mini": {
120129
maxTokens: 100_000,
@@ -126,6 +135,7 @@ export const openAiNativeModels = {
126135
cacheReadsPrice: 0.275,
127136
supportsReasoningEffort: true,
128137
reasoningEffort: "medium",
138+
supportsTemperature: false,
129139
},
130140
"o4-mini-high": {
131141
maxTokens: 100_000,
@@ -136,6 +146,7 @@ export const openAiNativeModels = {
136146
outputPrice: 4.4,
137147
cacheReadsPrice: 0.275,
138148
reasoningEffort: "high",
149+
supportsTemperature: false,
139150
},
140151
"o4-mini-low": {
141152
maxTokens: 100_000,
@@ -146,6 +157,7 @@ export const openAiNativeModels = {
146157
outputPrice: 4.4,
147158
cacheReadsPrice: 0.275,
148159
reasoningEffort: "low",
160+
supportsTemperature: false,
149161
},
150162
"o3-mini": {
151163
maxTokens: 100_000,
@@ -157,6 +169,7 @@ export const openAiNativeModels = {
157169
cacheReadsPrice: 0.55,
158170
supportsReasoningEffort: true,
159171
reasoningEffort: "medium",
172+
supportsTemperature: false,
160173
},
161174
"o3-mini-high": {
162175
maxTokens: 100_000,
@@ -167,6 +180,7 @@ export const openAiNativeModels = {
167180
outputPrice: 4.4,
168181
cacheReadsPrice: 0.55,
169182
reasoningEffort: "high",
183+
supportsTemperature: false,
170184
},
171185
"o3-mini-low": {
172186
maxTokens: 100_000,
@@ -177,6 +191,7 @@ export const openAiNativeModels = {
177191
outputPrice: 4.4,
178192
cacheReadsPrice: 0.55,
179193
reasoningEffort: "low",
194+
supportsTemperature: false,
180195
},
181196
o1: {
182197
maxTokens: 100_000,
@@ -186,6 +201,7 @@ export const openAiNativeModels = {
186201
inputPrice: 15,
187202
outputPrice: 60,
188203
cacheReadsPrice: 7.5,
204+
supportsTemperature: false,
189205
},
190206
"o1-preview": {
191207
maxTokens: 32_768,
@@ -195,6 +211,7 @@ export const openAiNativeModels = {
195211
inputPrice: 15,
196212
outputPrice: 60,
197213
cacheReadsPrice: 7.5,
214+
supportsTemperature: false,
198215
},
199216
"o1-mini": {
200217
maxTokens: 65_536,
@@ -204,6 +221,7 @@ export const openAiNativeModels = {
204221
inputPrice: 1.1,
205222
outputPrice: 4.4,
206223
cacheReadsPrice: 0.55,
224+
supportsTemperature: false,
207225
},
208226
"gpt-4o": {
209227
maxTokens: 16_384,
@@ -213,6 +231,7 @@ export const openAiNativeModels = {
213231
inputPrice: 2.5,
214232
outputPrice: 10,
215233
cacheReadsPrice: 1.25,
234+
supportsTemperature: true,
216235
},
217236
"gpt-4o-mini": {
218237
maxTokens: 16_384,
@@ -222,6 +241,7 @@ export const openAiNativeModels = {
222241
inputPrice: 0.15,
223242
outputPrice: 0.6,
224243
cacheReadsPrice: 0.075,
244+
supportsTemperature: true,
225245
},
226246
"codex-mini-latest": {
227247
maxTokens: 16_384,
@@ -231,6 +251,7 @@ export const openAiNativeModels = {
231251
inputPrice: 1.5,
232252
outputPrice: 6,
233253
cacheReadsPrice: 0,
254+
supportsTemperature: false,
234255
description:
235256
"Codex Mini: Cloud-based software engineering agent powered by codex-1, a version of o3 optimized for coding tasks. Trained with reinforcement learning to generate human-style code, adhere to instructions, and iteratively run tests.",
236257
},

pnpm-lock.yaml

Lines changed: 5 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)