Skip to content

Commit 883be32

Browse files
shariqriazzellipsis-dev[bot]cte
authored
feat: Add Groq and Chutes API providers (#3034)
Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> Co-authored-by: Chris Estreich <[email protected]>
1 parent 6666d43 commit 883be32

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+935
-0
lines changed

src/api/index.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ import { RequestyHandler } from "./providers/requesty"
2323
import { HumanRelayHandler } from "./providers/human-relay"
2424
import { FakeAIHandler } from "./providers/fake-ai"
2525
import { XAIHandler } from "./providers/xai"
26+
import { GroqHandler } from "./providers/groq"
27+
import { ChutesHandler } from "./providers/chutes"
2628

2729
export interface SingleCompletionHandler {
2830
completePrompt(prompt: string): Promise<string>
@@ -88,6 +90,10 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
8890
return new FakeAIHandler(options)
8991
case "xai":
9092
return new XAIHandler(options)
93+
case "groq":
94+
return new GroqHandler(options)
95+
case "chutes":
96+
return new ChutesHandler(options)
9197
default:
9298
return new AnthropicHandler(options)
9399
}
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
// npx jest src/api/providers/__tests__/chutes.test.ts
2+
3+
import OpenAI from "openai"
4+
import { Anthropic } from "@anthropic-ai/sdk"
5+
6+
import { ChutesModelId, chutesDefaultModelId, chutesModels } from "../../../shared/api"
7+
8+
import { ChutesHandler } from "../chutes"
9+
10+
jest.mock("openai", () => {
11+
const createMock = jest.fn()
12+
return jest.fn(() => ({ chat: { completions: { create: createMock } } }))
13+
})
14+
15+
describe("ChutesHandler", () => {
16+
let handler: ChutesHandler
17+
let mockCreate: jest.Mock
18+
19+
beforeEach(() => {
20+
jest.clearAllMocks()
21+
mockCreate = (OpenAI as unknown as jest.Mock)().chat.completions.create
22+
handler = new ChutesHandler({})
23+
})
24+
25+
test("should use the correct Chutes base URL", () => {
26+
new ChutesHandler({})
27+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://llm.chutes.ai/v1" }))
28+
})
29+
30+
test("should use the provided API key", () => {
31+
const chutesApiKey = "test-chutes-api-key"
32+
new ChutesHandler({ chutesApiKey })
33+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: chutesApiKey }))
34+
})
35+
36+
test("should return default model when no model is specified", () => {
37+
const model = handler.getModel()
38+
expect(model.id).toBe(chutesDefaultModelId)
39+
expect(model.info).toEqual(chutesModels[chutesDefaultModelId])
40+
})
41+
42+
test("should return specified model when valid model is provided", () => {
43+
const testModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1"
44+
const handlerWithModel = new ChutesHandler({ apiModelId: testModelId })
45+
const model = handlerWithModel.getModel()
46+
47+
expect(model.id).toBe(testModelId)
48+
expect(model.info).toEqual(chutesModels[testModelId])
49+
})
50+
51+
test("completePrompt method should return text from Chutes API", async () => {
52+
const expectedResponse = "This is a test response from Chutes"
53+
mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] })
54+
const result = await handler.completePrompt("test prompt")
55+
expect(result).toBe(expectedResponse)
56+
})
57+
58+
test("should handle errors in completePrompt", async () => {
59+
const errorMessage = "Chutes API error"
60+
mockCreate.mockRejectedValueOnce(new Error(errorMessage))
61+
await expect(handler.completePrompt("test prompt")).rejects.toThrow(`Chutes completion error: ${errorMessage}`)
62+
})
63+
64+
test("createMessage should yield text content from stream", async () => {
65+
const testContent = "This is test content from Chutes stream"
66+
67+
mockCreate.mockImplementationOnce(() => {
68+
return {
69+
[Symbol.asyncIterator]: () => ({
70+
next: jest
71+
.fn()
72+
.mockResolvedValueOnce({
73+
done: false,
74+
value: { choices: [{ delta: { content: testContent } }] },
75+
})
76+
.mockResolvedValueOnce({ done: true }),
77+
}),
78+
}
79+
})
80+
81+
const stream = handler.createMessage("system prompt", [])
82+
const firstChunk = await stream.next()
83+
84+
expect(firstChunk.done).toBe(false)
85+
expect(firstChunk.value).toEqual({ type: "text", text: testContent })
86+
})
87+
88+
test("createMessage should yield usage data from stream", async () => {
89+
mockCreate.mockImplementationOnce(() => {
90+
return {
91+
[Symbol.asyncIterator]: () => ({
92+
next: jest
93+
.fn()
94+
.mockResolvedValueOnce({
95+
done: false,
96+
value: { choices: [{ delta: {} }], usage: { prompt_tokens: 10, completion_tokens: 20 } },
97+
})
98+
.mockResolvedValueOnce({ done: true }),
99+
}),
100+
}
101+
})
102+
103+
const stream = handler.createMessage("system prompt", [])
104+
const firstChunk = await stream.next()
105+
106+
expect(firstChunk.done).toBe(false)
107+
expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 })
108+
})
109+
110+
test("createMessage should pass correct parameters to Chutes client", async () => {
111+
const modelId: ChutesModelId = "deepseek-ai/DeepSeek-R1"
112+
const modelInfo = chutesModels[modelId]
113+
const handlerWithModel = new ChutesHandler({ apiModelId: modelId })
114+
115+
mockCreate.mockImplementationOnce(() => {
116+
return {
117+
[Symbol.asyncIterator]: () => ({
118+
async next() {
119+
return { done: true }
120+
},
121+
}),
122+
}
123+
})
124+
125+
const systemPrompt = "Test system prompt for Chutes"
126+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for Chutes" }]
127+
128+
const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
129+
await messageGenerator.next()
130+
131+
expect(mockCreate).toHaveBeenCalledWith(
132+
expect.objectContaining({
133+
model: modelId,
134+
max_tokens: modelInfo.maxTokens,
135+
temperature: 0.5,
136+
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
137+
stream: true,
138+
stream_options: { include_usage: true },
139+
}),
140+
)
141+
})
142+
})
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
// npx jest src/api/providers/__tests__/groq.test.ts
2+
3+
import OpenAI from "openai"
4+
import { Anthropic } from "@anthropic-ai/sdk"
5+
6+
import { GroqModelId, groqDefaultModelId, groqModels } from "../../../shared/api"
7+
8+
import { GroqHandler } from "../groq"
9+
10+
jest.mock("openai", () => {
11+
const createMock = jest.fn()
12+
return jest.fn(() => ({ chat: { completions: { create: createMock } } }))
13+
})
14+
15+
describe("GroqHandler", () => {
16+
let handler: GroqHandler
17+
let mockCreate: jest.Mock
18+
19+
beforeEach(() => {
20+
jest.clearAllMocks()
21+
mockCreate = (OpenAI as unknown as jest.Mock)().chat.completions.create
22+
handler = new GroqHandler({})
23+
})
24+
25+
test("should use the correct Groq base URL", () => {
26+
new GroqHandler({})
27+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.groq.com/openai/v1" }))
28+
})
29+
30+
test("should use the provided API key", () => {
31+
const groqApiKey = "test-groq-api-key"
32+
new GroqHandler({ groqApiKey })
33+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: groqApiKey }))
34+
})
35+
36+
test("should return default model when no model is specified", () => {
37+
const model = handler.getModel()
38+
expect(model.id).toBe(groqDefaultModelId) // Use groqDefaultModelId
39+
expect(model.info).toEqual(groqModels[groqDefaultModelId]) // Use groqModels
40+
})
41+
42+
test("should return specified model when valid model is provided", () => {
43+
const testModelId: GroqModelId = "llama-3.3-70b-versatile" // Use a valid Groq model ID and type
44+
const handlerWithModel = new GroqHandler({ apiModelId: testModelId }) // Instantiate GroqHandler
45+
const model = handlerWithModel.getModel()
46+
47+
expect(model.id).toBe(testModelId)
48+
expect(model.info).toEqual(groqModels[testModelId]) // Use groqModels
49+
})
50+
51+
test("completePrompt method should return text from Groq API", async () => {
52+
const expectedResponse = "This is a test response from Groq"
53+
mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] })
54+
const result = await handler.completePrompt("test prompt")
55+
expect(result).toBe(expectedResponse)
56+
})
57+
58+
test("should handle errors in completePrompt", async () => {
59+
const errorMessage = "Groq API error"
60+
mockCreate.mockRejectedValueOnce(new Error(errorMessage))
61+
await expect(handler.completePrompt("test prompt")).rejects.toThrow(`Groq completion error: ${errorMessage}`)
62+
})
63+
64+
test("createMessage should yield text content from stream", async () => {
65+
const testContent = "This is test content from Groq stream"
66+
67+
mockCreate.mockImplementationOnce(() => {
68+
return {
69+
[Symbol.asyncIterator]: () => ({
70+
next: jest
71+
.fn()
72+
.mockResolvedValueOnce({
73+
done: false,
74+
value: { choices: [{ delta: { content: testContent } }] },
75+
})
76+
.mockResolvedValueOnce({ done: true }),
77+
}),
78+
}
79+
})
80+
81+
const stream = handler.createMessage("system prompt", [])
82+
const firstChunk = await stream.next()
83+
84+
expect(firstChunk.done).toBe(false)
85+
expect(firstChunk.value).toEqual({ type: "text", text: testContent })
86+
})
87+
88+
test("createMessage should yield usage data from stream", async () => {
89+
mockCreate.mockImplementationOnce(() => {
90+
return {
91+
[Symbol.asyncIterator]: () => ({
92+
next: jest
93+
.fn()
94+
.mockResolvedValueOnce({
95+
done: false,
96+
value: { choices: [{ delta: {} }], usage: { prompt_tokens: 10, completion_tokens: 20 } },
97+
})
98+
.mockResolvedValueOnce({ done: true }),
99+
}),
100+
}
101+
})
102+
103+
const stream = handler.createMessage("system prompt", [])
104+
const firstChunk = await stream.next()
105+
106+
expect(firstChunk.done).toBe(false)
107+
expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 })
108+
})
109+
110+
test("createMessage should pass correct parameters to Groq client", async () => {
111+
const modelId: GroqModelId = "llama-3.1-8b-instant"
112+
const modelInfo = groqModels[modelId]
113+
const handlerWithModel = new GroqHandler({ apiModelId: modelId })
114+
115+
mockCreate.mockImplementationOnce(() => {
116+
return {
117+
[Symbol.asyncIterator]: () => ({
118+
async next() {
119+
return { done: true }
120+
},
121+
}),
122+
}
123+
})
124+
125+
const systemPrompt = "Test system prompt for Groq"
126+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for Groq" }]
127+
128+
const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
129+
await messageGenerator.next()
130+
131+
expect(mockCreate).toHaveBeenCalledWith(
132+
expect.objectContaining({
133+
model: modelId,
134+
max_tokens: modelInfo.maxTokens,
135+
temperature: 0.5,
136+
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
137+
stream: true,
138+
stream_options: { include_usage: true },
139+
}),
140+
)
141+
})
142+
})

0 commit comments

Comments
 (0)