Skip to content

Commit 0c32602

Browse files
committed
revert: remove GPT-5 models, keep only nectarine experimental model
- Removed gpt-5-2025-08-07, gpt-5-mini-2025-08-07, gpt-5-nano-2025-08-07 - Kept nectarine-alpha-new-reasoning-effort-2025-07-25 experimental model - Reverted default model back to gpt-4o - Updated tests and changeset accordingly
1 parent fe82301 commit 0c32602

File tree

4 files changed

+10
-54
lines changed

4 files changed

+10
-54
lines changed

.changeset/gpt5-support.md

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,7 @@
33
"roo-cline": minor
44
---
55

6-
Add GPT-5 model support
6+
Add nectarine experimental model support
77

8-
- Added GPT-5 models (gpt-5-2025-08-07, gpt-5-mini-2025-08-07, gpt-5-nano-2025-08-07) to OpenAI Native provider
9-
- Added nectarine-alpha-new-reasoning-effort-2025-07-25 experimental model
10-
- Set gpt-5-2025-08-07 as the new default OpenAI Native model
11-
- Implemented GPT-5 specific handling with streaming and reasoning effort support
8+
- Added nectarine-alpha-new-reasoning-effort-2025-07-25 experimental model to OpenAI Native provider
9+
- Implemented nectarine-specific handling with streaming and reasoning effort support

packages/types/src/providers/openai.ts

Lines changed: 1 addition & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -3,39 +3,9 @@ import type { ModelInfo } from "../model.js"
33
// https://openai.com/api/pricing/
44
export type OpenAiNativeModelId = keyof typeof openAiNativeModels
55

6-
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5-2025-08-07"
6+
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4o"
77

88
export const openAiNativeModels = {
9-
"gpt-5-2025-08-07": {
10-
maxTokens: 128000,
11-
contextWindow: 400000,
12-
supportsImages: true,
13-
supportsPromptCache: true,
14-
supportsReasoningEffort: true,
15-
inputPrice: 1.25,
16-
outputPrice: 10.0,
17-
cacheReadsPrice: 0.125,
18-
},
19-
"gpt-5-mini-2025-08-07": {
20-
maxTokens: 128000,
21-
contextWindow: 400000,
22-
supportsImages: true,
23-
supportsPromptCache: true,
24-
supportsReasoningEffort: true,
25-
inputPrice: 0.25,
26-
outputPrice: 2.0,
27-
cacheReadsPrice: 0.025,
28-
},
29-
"gpt-5-nano-2025-08-07": {
30-
maxTokens: 128000,
31-
contextWindow: 400000,
32-
supportsImages: true,
33-
supportsPromptCache: true,
34-
supportsReasoningEffort: true,
35-
inputPrice: 0.05,
36-
outputPrice: 0.4,
37-
cacheReadsPrice: 0.005,
38-
},
399
"nectarine-alpha-new-reasoning-effort-2025-07-25": {
4010
maxTokens: 128000,
4111
contextWindow: 400000,
@@ -51,7 +21,6 @@ export const openAiNativeModels = {
5121
contextWindow: 1_047_576,
5222
supportsImages: true,
5323
supportsPromptCache: true,
54-
supportsReasoningEffort: true,
5524
inputPrice: 2,
5625
outputPrice: 8,
5726
cacheReadsPrice: 0.5,
@@ -61,7 +30,6 @@ export const openAiNativeModels = {
6130
contextWindow: 1_047_576,
6231
supportsImages: true,
6332
supportsPromptCache: true,
64-
supportsReasoningEffort: true,
6533
inputPrice: 0.4,
6634
outputPrice: 1.6,
6735
cacheReadsPrice: 0.1,
@@ -71,7 +39,6 @@ export const openAiNativeModels = {
7139
contextWindow: 1_047_576,
7240
supportsImages: true,
7341
supportsPromptCache: true,
74-
supportsReasoningEffort: true,
7542
inputPrice: 0.1,
7643
outputPrice: 0.4,
7744
cacheReadsPrice: 0.025,
@@ -84,7 +51,6 @@ export const openAiNativeModels = {
8451
inputPrice: 2.0,
8552
outputPrice: 8.0,
8653
cacheReadsPrice: 0.5,
87-
supportsReasoningEffort: true,
8854
reasoningEffort: "medium",
8955
},
9056
"o3-high": {
@@ -115,7 +81,6 @@ export const openAiNativeModels = {
11581
inputPrice: 1.1,
11682
outputPrice: 4.4,
11783
cacheReadsPrice: 0.275,
118-
supportsReasoningEffort: true,
11984
reasoningEffort: "medium",
12085
},
12186
"o4-mini-high": {
@@ -146,7 +111,6 @@ export const openAiNativeModels = {
146111
inputPrice: 1.1,
147112
outputPrice: 4.4,
148113
cacheReadsPrice: 0.55,
149-
supportsReasoningEffort: true,
150114
reasoningEffort: "medium",
151115
},
152116
"o3-mini-high": {
@@ -174,7 +138,6 @@ export const openAiNativeModels = {
174138
contextWindow: 200_000,
175139
supportsImages: true,
176140
supportsPromptCache: true,
177-
supportsReasoningEffort: true,
178141
inputPrice: 15,
179142
outputPrice: 60,
180143
cacheReadsPrice: 7.5,
@@ -184,7 +147,6 @@ export const openAiNativeModels = {
184147
contextWindow: 128_000,
185148
supportsImages: true,
186149
supportsPromptCache: true,
187-
supportsReasoningEffort: true,
188150
inputPrice: 15,
189151
outputPrice: 60,
190152
cacheReadsPrice: 7.5,
@@ -194,7 +156,6 @@ export const openAiNativeModels = {
194156
contextWindow: 128_000,
195157
supportsImages: true,
196158
supportsPromptCache: true,
197-
supportsReasoningEffort: true,
198159
inputPrice: 1.1,
199160
outputPrice: 4.4,
200161
cacheReadsPrice: 0.55,
@@ -204,7 +165,6 @@ export const openAiNativeModels = {
204165
contextWindow: 128_000,
205166
supportsImages: true,
206167
supportsPromptCache: true,
207-
supportsReasoningEffort: true,
208168
inputPrice: 75,
209169
outputPrice: 150,
210170
cacheReadsPrice: 37.5,
@@ -214,7 +174,6 @@ export const openAiNativeModels = {
214174
contextWindow: 128_000,
215175
supportsImages: true,
216176
supportsPromptCache: true,
217-
supportsReasoningEffort: true,
218177
inputPrice: 2.5,
219178
outputPrice: 10,
220179
cacheReadsPrice: 1.25,
@@ -224,7 +183,6 @@ export const openAiNativeModels = {
224183
contextWindow: 128_000,
225184
supportsImages: true,
226185
supportsPromptCache: true,
227-
supportsReasoningEffort: true,
228186
inputPrice: 0.15,
229187
outputPrice: 0.6,
230188
cacheReadsPrice: 0.075,

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@ describe("OpenAiNativeHandler", () => {
455455
openAiNativeApiKey: "test-api-key",
456456
})
457457
const modelInfo = handlerWithoutModel.getModel()
458-
expect(modelInfo.id).toBe("gpt-5-2025-08-07") // Default model
458+
expect(modelInfo.id).toBe("gpt-4o") // Default model
459459
expect(modelInfo.info).toBeDefined()
460460
})
461461
})

src/api/providers/openai-native.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
5353
yield* this.handleReasonerMessage(model, id, systemPrompt, messages)
5454
} else if (model.id.startsWith("o1")) {
5555
yield* this.handleO1FamilyMessage(model, systemPrompt, messages)
56-
} else if (this.isGPT5Model(model.id)) {
57-
yield* this.handleGPT5Message(model, systemPrompt, messages)
56+
} else if (this.isNectarineModel(model.id)) {
57+
yield* this.handleNectarineMessage(model, systemPrompt, messages)
5858
} else {
5959
yield* this.handleDefaultModelMessage(model, systemPrompt, messages)
6060
}
@@ -131,7 +131,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
131131
yield* this.handleStreamResponse(stream, model)
132132
}
133133

134-
private async *handleGPT5Message(
134+
private async *handleNectarineMessage(
135135
model: OpenAiNativeModel,
136136
systemPrompt: string,
137137
messages: Anthropic.Messages.MessageParam[],
@@ -150,8 +150,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
150150
yield* this.handleStreamResponse(stream, model)
151151
}
152152

153-
private isGPT5Model(modelId: string): boolean {
154-
return modelId.includes("gpt-5") || modelId.includes("gpt5") || modelId.includes("nectarine")
153+
private isNectarineModel(modelId: string): boolean {
154+
return modelId.includes("nectarine")
155155
}
156156

157157
private async *handleStreamResponse(

0 commit comments

Comments
 (0)