Skip to content

Commit 181e070

Browse files
committed
feat: Add Groq and Chutes API providers
- Implemented provider logic for Groq and Chutes in `src/api/providers/`. - Added corresponding unit tests. - Integrated providers into the API index, exports, schemas, and types. - Updated `ApiOptions.tsx` and related constants/hooks in `webview-ui` to display settings for Groq and Chutes. - Added missing translation keys for Groq and Chutes settings labels to `webview-ui/src/i18n/locales/en/settings.json`. - Synchronized new translation keys across all other supported languages in `webview-ui`. - Removed redundant Groq translation keys from `src/i18n/locales/en/common.json`. Fixes UI bug where raw translation keys were displayed for Groq/Chutes settings.
1 parent 60270d1 commit 181e070

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+1390
-2
lines changed

src/api/index.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ import { RequestyHandler } from "./providers/requesty"
2323
import { HumanRelayHandler } from "./providers/human-relay"
2424
import { FakeAIHandler } from "./providers/fake-ai"
2525
import { XAIHandler } from "./providers/xai"
26+
import { GroqHandler } from "./providers/groq"
27+
import { ChutesHandler } from "./providers/chutes"
2628

2729
export interface SingleCompletionHandler {
2830
completePrompt(prompt: string): Promise<string>
@@ -88,7 +90,13 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
8890
return new FakeAIHandler(options)
8991
case "xai":
9092
return new XAIHandler(options)
93+
case "groq":
94+
return new GroqHandler(options)
95+
case "chutes":
96+
return new ChutesHandler(options)
9197
default:
98+
// Ensure the default case handles unknown providers gracefully or throws an error
99+
// For now, defaulting to Anthropic as before
92100
return new AnthropicHandler(options)
93101
}
94102
}
Lines changed: 207 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,207 @@
1+
import { ChutesHandler } from "../chutes" // Import ChutesHandler
2+
// TODO: Update imports for Chutes once defined in shared/api.ts
3+
import { ChutesModelId, chutesDefaultModelId, chutesModels } from "../../../shared/api"
4+
import OpenAI from "openai"
5+
import { Anthropic } from "@anthropic-ai/sdk"
6+
7+
// Mock OpenAI client
8+
jest.mock("openai", () => {
9+
const createMock = jest.fn()
10+
return jest.fn(() => ({
11+
chat: {
12+
completions: {
13+
create: createMock,
14+
},
15+
},
16+
}))
17+
})
18+
19+
// Test suite for ChutesHandler
20+
describe("ChutesHandler", () => {
21+
let handler: ChutesHandler // Use ChutesHandler type
22+
let mockCreate: jest.Mock
23+
24+
beforeEach(() => {
25+
// Reset all mocks
26+
jest.clearAllMocks()
27+
28+
// Get the mock create function
29+
mockCreate = (OpenAI as unknown as jest.Mock)().chat.completions.create
30+
31+
// Create handler with mock
32+
handler = new ChutesHandler({}) // Instantiate ChutesHandler
33+
})
34+
35+
test("should use the correct Chutes base URL", () => {
36+
// Instantiate handler inside the test to ensure clean state for this check
37+
new ChutesHandler({})
38+
expect(OpenAI).toHaveBeenCalledWith(
39+
expect.objectContaining({
40+
baseURL: "https://llm.chutes.ai/v1", // Verify Chutes base URL
41+
}),
42+
)
43+
})
44+
45+
test("should use the provided API key", () => {
46+
// Clear mocks before this specific test
47+
jest.clearAllMocks()
48+
49+
// Create a handler with our API key
50+
const chutesApiKey = "test-chutes-api-key" // Use chutesApiKey
51+
new ChutesHandler({ chutesApiKey }) // Instantiate ChutesHandler
52+
53+
// Verify the OpenAI constructor was called with our API key
54+
expect(OpenAI).toHaveBeenCalledWith(
55+
expect.objectContaining({
56+
apiKey: chutesApiKey,
57+
}),
58+
)
59+
})
60+
61+
test("should return default model when no model is specified", () => {
62+
const model = handler.getModel()
63+
expect(model.id).toBe(chutesDefaultModelId) // Use chutesDefaultModelId
64+
expect(model.info).toEqual(chutesModels[chutesDefaultModelId]) // Use chutesModels
65+
})
66+
67+
test("should return specified model when valid model is provided", () => {
68+
// Using an actual model ID from the Chutes API response
69+
const testModelId: ChutesModelId = "Qwen/Qwen2.5-72B-Instruct"
70+
const handlerWithModel = new ChutesHandler({ apiModelId: testModelId }) // Instantiate ChutesHandler
71+
const model = handlerWithModel.getModel()
72+
73+
expect(model.id).toBe(testModelId)
74+
expect(model.info).toEqual(chutesModels[testModelId]) // Use chutesModels
75+
})
76+
77+
test("completePrompt method should return text from Chutes API", async () => {
78+
const expectedResponse = "This is a test response from Chutes"
79+
80+
mockCreate.mockResolvedValueOnce({
81+
choices: [
82+
{
83+
message: {
84+
content: expectedResponse,
85+
},
86+
},
87+
],
88+
})
89+
90+
const result = await handler.completePrompt("test prompt")
91+
expect(result).toBe(expectedResponse)
92+
})
93+
94+
test("should handle errors in completePrompt", async () => {
95+
const errorMessage = "Chutes API error"
96+
mockCreate.mockRejectedValueOnce(new Error(errorMessage))
97+
98+
await expect(handler.completePrompt("test prompt")).rejects.toThrow(`Chutes AI completion error: ${errorMessage}`) // Updated error message prefix
99+
})
100+
101+
test("createMessage should yield text content from stream", async () => {
102+
const testContent = "This is test content from Chutes stream"
103+
104+
// Setup mock for streaming response
105+
mockCreate.mockImplementationOnce(() => {
106+
return {
107+
[Symbol.asyncIterator]: () => ({
108+
next: jest
109+
.fn()
110+
.mockResolvedValueOnce({
111+
done: false,
112+
value: {
113+
choices: [{ delta: { content: testContent } }],
114+
},
115+
})
116+
.mockResolvedValueOnce({ done: true }),
117+
}),
118+
}
119+
})
120+
121+
// Create and consume the stream
122+
const stream = handler.createMessage("system prompt", [])
123+
const firstChunk = await stream.next()
124+
125+
// Verify the content
126+
expect(firstChunk.done).toBe(false)
127+
expect(firstChunk.value).toEqual({
128+
type: "text",
129+
text: testContent,
130+
})
131+
})
132+
133+
test("createMessage should yield usage data from stream", async () => {
134+
// Setup mock for streaming response that includes usage data
135+
mockCreate.mockImplementationOnce(() => {
136+
return {
137+
[Symbol.asyncIterator]: () => ({
138+
next: jest
139+
.fn()
140+
.mockResolvedValueOnce({
141+
done: false,
142+
value: {
143+
choices: [{ delta: {} }], // Needs to have choices array to avoid error
144+
usage: { // Assuming standard OpenAI usage fields
145+
prompt_tokens: 10,
146+
completion_tokens: 20,
147+
},
148+
},
149+
})
150+
.mockResolvedValueOnce({ done: true }),
151+
}),
152+
}
153+
})
154+
155+
// Create and consume the stream
156+
const stream = handler.createMessage("system prompt", [])
157+
const firstChunk = await stream.next()
158+
159+
// Verify the usage data
160+
expect(firstChunk.done).toBe(false)
161+
expect(firstChunk.value).toEqual({ // Updated expected usage structure
162+
type: "usage",
163+
inputTokens: 10,
164+
outputTokens: 20,
165+
cacheReadTokens: 0, // Assuming 0 for Chutes
166+
cacheWriteTokens: 0, // Assuming 0 for Chutes
167+
})
168+
})
169+
170+
test("createMessage should pass correct parameters to Chutes client", async () => {
171+
// Setup a handler with specific model
172+
const modelId: ChutesModelId = "deepseek-ai/DeepSeek-R1" // Use an actual Chutes model ID and type
173+
const modelInfo = chutesModels[modelId] // Use chutesModels
174+
const handlerWithModel = new ChutesHandler({ apiModelId: modelId }) // Instantiate ChutesHandler
175+
176+
// Setup mock for streaming response
177+
mockCreate.mockImplementationOnce(() => {
178+
return {
179+
[Symbol.asyncIterator]: () => ({
180+
async next() {
181+
return { done: true }
182+
},
183+
}),
184+
}
185+
})
186+
187+
// System prompt and messages
188+
const systemPrompt = "Test system prompt for Chutes"
189+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for Chutes" }]
190+
191+
// Start generating a message
192+
const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
193+
await messageGenerator.next() // Start the generator
194+
195+
// Check that all parameters were passed correctly
196+
expect(mockCreate).toHaveBeenCalledWith(
197+
expect.objectContaining({
198+
model: modelId,
199+
max_tokens: modelInfo.maxTokens, // Assuming standard max_tokens
200+
temperature: 0.5, // Using CHUTES_DEFAULT_TEMPERATURE
201+
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
202+
stream: true,
203+
stream_options: { include_usage: true }, // Assuming standard support
204+
}),
205+
)
206+
})
207+
})

0 commit comments

Comments
 (0)