Skip to content

Commit 2122977

Browse files
feat: add GLM-4.5 and OpenAI gpt-oss models to Fireworks provider (#6784)
* feat: add GLM-4.5 and OpenAI gpt-oss models to Fireworks provider - Added GLM-4.5 (355B/32B active) and GLM-4.5-Air (106B/12B active) models from Z.ai - Added gpt-oss-20b and gpt-oss-120b models from OpenAI - All models configured with 128K context window - Added comprehensive test coverage for all new models Fixes #6753 * fix: update GLM-4.5 model IDs to use p instead of hyphen - Changed glm-4-5 to glm-4p5 - Changed glm-4-5-air to glm-4p5-air - Updated corresponding test cases --------- Co-authored-by: Roo Code <[email protected]>
1 parent 15b0f50 commit 2122977

File tree

2 files changed

+128
-0
lines changed

2 files changed

+128
-0
lines changed

packages/types/src/providers/fireworks.ts

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@ export type FireworksModelId =
66
| "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct"
77
| "accounts/fireworks/models/deepseek-r1-0528"
88
| "accounts/fireworks/models/deepseek-v3"
9+
| "accounts/fireworks/models/glm-4p5"
10+
| "accounts/fireworks/models/glm-4p5-air"
11+
| "accounts/fireworks/models/gpt-oss-20b"
12+
| "accounts/fireworks/models/gpt-oss-120b"
913

1014
export const fireworksDefaultModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct"
1115

@@ -58,4 +62,44 @@ export const fireworksModels = {
5862
description:
5963
"A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us.",
6064
},
65+
"accounts/fireworks/models/glm-4p5": {
66+
maxTokens: 16384,
67+
contextWindow: 128000,
68+
supportsImages: false,
69+
supportsPromptCache: false,
70+
inputPrice: 0.55,
71+
outputPrice: 2.19,
72+
description:
73+
"Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities.",
74+
},
75+
"accounts/fireworks/models/glm-4p5-air": {
76+
maxTokens: 16384,
77+
contextWindow: 128000,
78+
supportsImages: false,
79+
supportsPromptCache: false,
80+
inputPrice: 0.55,
81+
outputPrice: 2.19,
82+
description:
83+
"Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities.",
84+
},
85+
"accounts/fireworks/models/gpt-oss-20b": {
86+
maxTokens: 16384,
87+
contextWindow: 128000,
88+
supportsImages: false,
89+
supportsPromptCache: false,
90+
inputPrice: 0.07,
91+
outputPrice: 0.3,
92+
description:
93+
"OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows.",
94+
},
95+
"accounts/fireworks/models/gpt-oss-120b": {
96+
maxTokens: 16384,
97+
contextWindow: 128000,
98+
supportsImages: false,
99+
supportsPromptCache: false,
100+
inputPrice: 0.15,
101+
outputPrice: 0.6,
102+
description:
103+
"OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs.",
104+
},
61105
} as const satisfies Record<string, ModelInfo>

src/api/providers/__tests__/fireworks.spec.ts

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,90 @@ describe("FireworksHandler", () => {
179179
)
180180
})
181181

182+
it("should return GLM-4.5 model with correct configuration", () => {
183+
const testModelId: FireworksModelId = "accounts/fireworks/models/glm-4p5"
184+
const handlerWithModel = new FireworksHandler({
185+
apiModelId: testModelId,
186+
fireworksApiKey: "test-fireworks-api-key",
187+
})
188+
const model = handlerWithModel.getModel()
189+
expect(model.id).toBe(testModelId)
190+
expect(model.info).toEqual(
191+
expect.objectContaining({
192+
maxTokens: 16384,
193+
contextWindow: 128000,
194+
supportsImages: false,
195+
supportsPromptCache: false,
196+
inputPrice: 0.55,
197+
outputPrice: 2.19,
198+
description: expect.stringContaining("Z.ai GLM-4.5 with 355B total parameters"),
199+
}),
200+
)
201+
})
202+
203+
it("should return GLM-4.5-Air model with correct configuration", () => {
204+
const testModelId: FireworksModelId = "accounts/fireworks/models/glm-4p5-air"
205+
const handlerWithModel = new FireworksHandler({
206+
apiModelId: testModelId,
207+
fireworksApiKey: "test-fireworks-api-key",
208+
})
209+
const model = handlerWithModel.getModel()
210+
expect(model.id).toBe(testModelId)
211+
expect(model.info).toEqual(
212+
expect.objectContaining({
213+
maxTokens: 16384,
214+
contextWindow: 128000,
215+
supportsImages: false,
216+
supportsPromptCache: false,
217+
inputPrice: 0.55,
218+
outputPrice: 2.19,
219+
description: expect.stringContaining("Z.ai GLM-4.5-Air with 106B total parameters"),
220+
}),
221+
)
222+
})
223+
224+
it("should return gpt-oss-20b model with correct configuration", () => {
225+
const testModelId: FireworksModelId = "accounts/fireworks/models/gpt-oss-20b"
226+
const handlerWithModel = new FireworksHandler({
227+
apiModelId: testModelId,
228+
fireworksApiKey: "test-fireworks-api-key",
229+
})
230+
const model = handlerWithModel.getModel()
231+
expect(model.id).toBe(testModelId)
232+
expect(model.info).toEqual(
233+
expect.objectContaining({
234+
maxTokens: 16384,
235+
contextWindow: 128000,
236+
supportsImages: false,
237+
supportsPromptCache: false,
238+
inputPrice: 0.07,
239+
outputPrice: 0.3,
240+
description: expect.stringContaining("OpenAI gpt-oss-20b: Compact model for local/edge deployments"),
241+
}),
242+
)
243+
})
244+
245+
it("should return gpt-oss-120b model with correct configuration", () => {
246+
const testModelId: FireworksModelId = "accounts/fireworks/models/gpt-oss-120b"
247+
const handlerWithModel = new FireworksHandler({
248+
apiModelId: testModelId,
249+
fireworksApiKey: "test-fireworks-api-key",
250+
})
251+
const model = handlerWithModel.getModel()
252+
expect(model.id).toBe(testModelId)
253+
expect(model.info).toEqual(
254+
expect.objectContaining({
255+
maxTokens: 16384,
256+
contextWindow: 128000,
257+
supportsImages: false,
258+
supportsPromptCache: false,
259+
inputPrice: 0.15,
260+
outputPrice: 0.6,
261+
description: expect.stringContaining("OpenAI gpt-oss-120b: Production-grade, general-purpose model"),
262+
}),
263+
)
264+
})
265+
182266
it("completePrompt method should return text from Fireworks API", async () => {
183267
const expectedResponse = "This is a test response from Fireworks"
184268
mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] })

0 commit comments

Comments
 (0)