Skip to content

Commit 0e7abee

Browse files
committed
feat: add GPT-5 model support
- Added GPT-5 models (gpt-5-2025-08-07, gpt-5-mini-2025-08-07, gpt-5-nano-2025-08-07) - Added nectarine-alpha-new-reasoning-effort-2025-07-25 experimental model - Set gpt-5-2025-08-07 as default OpenAI Native model - Implemented GPT-5 specific handling with streaming and reasoning effort support
1 parent 7ea1ae5 commit 0e7abee

File tree

3 files changed

+70
-1
lines changed

3 files changed

+70
-1
lines changed

.changeset/gpt5-support.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
---
2+
"@roo-code/types": minor
3+
"roo-cline": minor
4+
---
5+
6+
Add GPT-5 model support
7+
8+
- Added GPT-5 models (gpt-5-2025-08-07, gpt-5-mini-2025-08-07, gpt-5-nano-2025-08-07) to OpenAI Native provider
9+
- Added nectarine-alpha-new-reasoning-effort-2025-07-25 experimental model
10+
- Set gpt-5-2025-08-07 as the new default OpenAI Native model
11+
- Implemented GPT-5 specific handling with streaming and reasoning effort support

packages/types/src/providers/openai.ts

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,45 @@ import type { ModelInfo } from "../model.js"
33
// https://openai.com/api/pricing/
44
export type OpenAiNativeModelId = keyof typeof openAiNativeModels
55

6-
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4.1"
6+
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5-2025-08-07"
77

88
export const openAiNativeModels = {
9+
"gpt-5-2025-08-07": {
10+
maxTokens: 128000,
11+
contextWindow: 256000,
12+
supportsImages: true,
13+
supportsPromptCache: true,
14+
inputPrice: 1.25,
15+
outputPrice: 10.0,
16+
cacheReadsPrice: 0.125,
17+
},
18+
"gpt-5-mini-2025-08-07": {
19+
maxTokens: 128000,
20+
contextWindow: 256000,
21+
supportsImages: true,
22+
supportsPromptCache: true,
23+
inputPrice: 0.25,
24+
outputPrice: 2.0,
25+
cacheReadsPrice: 0.025,
26+
},
27+
"gpt-5-nano-2025-08-07": {
28+
maxTokens: 128000,
29+
contextWindow: 256000,
30+
supportsImages: true,
31+
supportsPromptCache: true,
32+
inputPrice: 0.05,
33+
outputPrice: 0.4,
34+
cacheReadsPrice: 0.005,
35+
},
36+
"nectarine-alpha-new-reasoning-effort-2025-07-25": {
37+
maxTokens: 128000,
38+
contextWindow: 256000,
39+
supportsImages: true,
40+
supportsPromptCache: true,
41+
inputPrice: 0,
42+
outputPrice: 0,
43+
cacheReadsPrice: 0,
44+
},
945
"gpt-4.1": {
1046
maxTokens: 32_768,
1147
contextWindow: 1_047_576,

src/api/providers/openai-native.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
5353
yield* this.handleReasonerMessage(model, id, systemPrompt, messages)
5454
} else if (model.id.startsWith("o1")) {
5555
yield* this.handleO1FamilyMessage(model, systemPrompt, messages)
56+
} else if (this.isGPT5Model(model.id)) {
57+
yield* this.handleGPT5Message(model, systemPrompt, messages)
5658
} else {
5759
yield* this.handleDefaultModelMessage(model, systemPrompt, messages)
5860
}
@@ -123,6 +125,26 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
123125
yield* this.handleStreamResponse(stream, model)
124126
}
125127

128+
private async *handleGPT5Message(
129+
model: OpenAiNativeModel,
130+
systemPrompt: string,
131+
messages: Anthropic.Messages.MessageParam[],
132+
): ApiStream {
133+
const stream = await this.client.chat.completions.create({
134+
model: model.id,
135+
temperature: 1,
136+
messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
137+
stream: true,
138+
stream_options: { include_usage: true },
139+
})
140+
141+
yield* this.handleStreamResponse(stream, model)
142+
}
143+
144+
private isGPT5Model(modelId: string): boolean {
145+
return modelId.includes("gpt-5") || modelId.includes("gpt5") || modelId.includes("nectarine")
146+
}
147+
126148
private async *handleStreamResponse(
127149
stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>,
128150
model: OpenAiNativeModel,

0 commit comments

Comments
 (0)