Skip to content

Commit 137d3f4

Browse files
Add OpenAI GPT-5.4 mini and nano models (#11946)
1 parent 08f3a2b commit 137d3f4

File tree

5 files changed

+105
-0
lines changed

5 files changed

+105
-0
lines changed
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
"roo-cline": patch
3+
"@roo-code/types": patch
4+
---
5+
6+
Add support for OpenAI `gpt-5.4-mini` and `gpt-5.4-nano` models.

packages/types/src/providers/openai-codex.ts

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,21 @@ export const openAiCodexModels = {
187187
supportsTemperature: false,
188188
description: "GPT-5.4: Most capable model via ChatGPT subscription",
189189
},
190+
"gpt-5.4-mini": {
191+
maxTokens: 128000,
192+
contextWindow: 400000,
193+
includedTools: ["apply_patch"],
194+
excludedTools: ["apply_diff", "write_to_file"],
195+
supportsImages: true,
196+
supportsPromptCache: true,
197+
supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
198+
reasoningEffort: "none",
199+
inputPrice: 0,
200+
outputPrice: 0,
201+
supportsVerbosity: true,
202+
supportsTemperature: false,
203+
description: "GPT-5.4 Mini: Lower-cost GPT-5.4 model via ChatGPT subscription",
204+
},
190205
"gpt-5.2": {
191206
maxTokens: 128000,
192207
contextWindow: 400000,

packages/types/src/providers/openai.ts

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,43 @@ export const openAiNativeModels = {
5050
],
5151
description: "GPT-5.4: Our most capable model for professional work",
5252
},
53+
"gpt-5.4-mini": {
54+
maxTokens: 128000,
55+
contextWindow: 400000,
56+
includedTools: ["apply_patch"],
57+
excludedTools: ["apply_diff", "write_to_file"],
58+
supportsImages: true,
59+
supportsPromptCache: true,
60+
supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
61+
reasoningEffort: "none",
62+
inputPrice: 0.75,
63+
outputPrice: 4.5,
64+
cacheReadsPrice: 0.075,
65+
supportsVerbosity: true,
66+
supportsTemperature: false,
67+
tiers: [
68+
{ name: "flex", contextWindow: 400000, inputPrice: 0.375, outputPrice: 2.25, cacheReadsPrice: 0.0375 },
69+
{ name: "priority", contextWindow: 400000, inputPrice: 1.5, outputPrice: 9.0, cacheReadsPrice: 0.15 },
70+
],
71+
description: "GPT-5.4 Mini: A faster, lower-cost GPT-5.4 model for coding and agentic workflows",
72+
},
73+
"gpt-5.4-nano": {
74+
maxTokens: 128000,
75+
contextWindow: 400000,
76+
includedTools: ["apply_patch"],
77+
excludedTools: ["apply_diff", "write_to_file"],
78+
supportsImages: true,
79+
supportsPromptCache: true,
80+
supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
81+
reasoningEffort: "none",
82+
inputPrice: 0.2,
83+
outputPrice: 1.25,
84+
cacheReadsPrice: 0.02,
85+
supportsVerbosity: true,
86+
supportsTemperature: false,
87+
tiers: [{ name: "flex", contextWindow: 400000, inputPrice: 0.1, outputPrice: 0.625, cacheReadsPrice: 0.01 }],
88+
description: "GPT-5.4 Nano: The smallest GPT-5.4 model for high-volume, low-latency tasks",
89+
},
5390
"gpt-5.2": {
5491
maxTokens: 128000,
5592
contextWindow: 400000,

src/api/providers/__tests__/openai-codex.spec.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,12 @@ describe("OpenAiCodexHandler.getModel", () => {
3333
expect(model.info.maxTokens).toBe(8192)
3434
expect(model.info.supportsImages).toBe(false)
3535
})
36+
37+
it("should use GPT-5.4 Mini capabilities when selected", () => {
38+
const handler = new OpenAiCodexHandler({ apiModelId: "gpt-5.4-mini" })
39+
const model = handler.getModel()
40+
41+
expect(model.id).toBe("gpt-5.4-mini")
42+
expect(model.info).toBeDefined()
43+
})
3644
})

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,45 @@ describe("OpenAiNativeHandler", () => {
264264
expect(modelInfo.info.reasoningEffort).toBe("none")
265265
})
266266

267+
it("should return GPT-5.4 Mini model info when selected", () => {
268+
const gpt54MiniHandler = new OpenAiNativeHandler({
269+
...mockOptions,
270+
apiModelId: "gpt-5.4-mini",
271+
})
272+
273+
const modelInfo = gpt54MiniHandler.getModel()
274+
expect(modelInfo.id).toBe("gpt-5.4-mini")
275+
expect(modelInfo.info.maxTokens).toBe(128000)
276+
expect(modelInfo.info.contextWindow).toBe(400000)
277+
expect(modelInfo.info.supportsVerbosity).toBe(true)
278+
expect(modelInfo.info.supportsReasoningEffort).toEqual(["none", "low", "medium", "high", "xhigh"])
279+
expect(modelInfo.info.reasoningEffort).toBe("none")
280+
expect(modelInfo.info.longContextPricing).toBeUndefined()
281+
})
282+
283+
it("should return GPT-5.4 Nano model info when selected", () => {
284+
const gpt54NanoHandler = new OpenAiNativeHandler({
285+
...mockOptions,
286+
apiModelId: "gpt-5.4-nano",
287+
})
288+
289+
const modelInfo = gpt54NanoHandler.getModel()
290+
expect(modelInfo.id).toBe("gpt-5.4-nano")
291+
expect(modelInfo.info.maxTokens).toBe(128000)
292+
expect(modelInfo.info.contextWindow).toBe(400000)
293+
expect(modelInfo.info.supportsVerbosity).toBe(true)
294+
expect(modelInfo.info.supportsReasoningEffort).toEqual(["none", "low", "medium", "high", "xhigh"])
295+
expect(modelInfo.info.reasoningEffort).toBe("none")
296+
expect(modelInfo.info.outputPrice).toBe(1.25)
297+
expect(modelInfo.info.longContextPricing).toBeUndefined()
298+
expect(modelInfo.info.tiers).toEqual([
299+
expect.objectContaining({
300+
name: "flex",
301+
outputPrice: 0.625,
302+
}),
303+
])
304+
})
305+
267306
it("should return GPT-5.3 Chat model info when selected", () => {
268307
const chatHandler = new OpenAiNativeHandler({
269308
...mockOptions,

0 commit comments

Comments
 (0)