Skip to content

Commit 7c5e5a0

Browse files
committed
feat: add GPT-5 models with updated context windows
- Added gpt-5-2025-08-07, gpt-5-mini-2025-08-07, gpt-5-nano-2025-08-07 models - All GPT-5 models configured with 400,000 context window - Updated nectarine model context window to 256,000 - All models configured with reasoning effort support - Set gpt-5-2025-08-07 as default OpenAI Native model - Added GPT-5 model handling in openai-native.ts - Updated tests to reflect new default model
1 parent 0c32602 commit 7c5e5a0

File tree

3 files changed

+38
-4
lines changed

3 files changed

+38
-4
lines changed

packages/types/src/providers/openai.ts

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,30 @@ import type { ModelInfo } from "../model.js"
33
// https://openai.com/api/pricing/
44
export type OpenAiNativeModelId = keyof typeof openAiNativeModels
55

6-
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4o"
6+
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5-2025-08-07"
77

88
export const openAiNativeModels = {
9-
"nectarine-alpha-new-reasoning-effort-2025-07-25": {
9+
"gpt-5-2025-08-07": {
10+
maxTokens: 128000,
11+
contextWindow: 400000,
12+
supportsImages: true,
13+
supportsPromptCache: true,
14+
supportsReasoningEffort: true,
15+
inputPrice: 0,
16+
outputPrice: 0,
17+
cacheReadsPrice: 0,
18+
},
19+
"gpt-5-mini-2025-08-07": {
20+
maxTokens: 128000,
21+
contextWindow: 400000,
22+
supportsImages: true,
23+
supportsPromptCache: true,
24+
supportsReasoningEffort: true,
25+
inputPrice: 0,
26+
outputPrice: 0,
27+
cacheReadsPrice: 0,
28+
},
29+
"gpt-5-nano-2025-08-07": {
1030
maxTokens: 128000,
1131
contextWindow: 400000,
1232
supportsImages: true,
@@ -16,6 +36,16 @@ export const openAiNativeModels = {
1636
outputPrice: 0,
1737
cacheReadsPrice: 0,
1838
},
39+
"nectarine-alpha-new-reasoning-effort-2025-07-25": {
40+
maxTokens: 128000,
41+
contextWindow: 256000,
42+
supportsImages: true,
43+
supportsPromptCache: true,
44+
supportsReasoningEffort: true,
45+
inputPrice: 0,
46+
outputPrice: 0,
47+
cacheReadsPrice: 0,
48+
},
1949
"gpt-4.1": {
2050
maxTokens: 32_768,
2151
contextWindow: 1_047_576,

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@ describe("OpenAiNativeHandler", () => {
455455
openAiNativeApiKey: "test-api-key",
456456
})
457457
const modelInfo = handlerWithoutModel.getModel()
458-
expect(modelInfo.id).toBe("gpt-4o") // Default model
458+
expect(modelInfo.id).toBe("gpt-5-2025-08-07") // Default model
459459
expect(modelInfo.info).toBeDefined()
460460
})
461461
})

src/api/providers/openai-native.ts

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
5353
yield* this.handleReasonerMessage(model, id, systemPrompt, messages)
5454
} else if (model.id.startsWith("o1")) {
5555
yield* this.handleO1FamilyMessage(model, systemPrompt, messages)
56-
} else if (this.isNectarineModel(model.id)) {
56+
} else if (this.isNectarineModel(model.id) || this.isGpt5Model(model.id)) {
5757
yield* this.handleNectarineMessage(model, systemPrompt, messages)
5858
} else {
5959
yield* this.handleDefaultModelMessage(model, systemPrompt, messages)
@@ -154,6 +154,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
154154
return modelId.includes("nectarine")
155155
}
156156

157+
private isGpt5Model(modelId: string): boolean {
158+
return modelId.startsWith("gpt-5")
159+
}
160+
157161
private async *handleStreamResponse(
158162
stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>,
159163
model: OpenAiNativeModel,

0 commit comments

Comments
 (0)