Skip to content

Commit 250d940

Browse files
committed
fix(api): remove chutes models <= 100k context, fix tests
1 parent 4105ee5 commit 250d940

File tree

3 files changed

+18
-282
lines changed

3 files changed

+18
-282
lines changed

src/api/providers/__tests__/chutes.test.ts

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ describe("ChutesHandler", () => {
6666

6767
test("should return specified model when valid model is provided", () => {
6868
// Using an actual model ID from the Chutes API response
69-
const testModelId: ChutesModelId = "Qwen/Qwen2.5-72B-Instruct"
69+
const testModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1"
7070
const handlerWithModel = new ChutesHandler({ apiModelId: testModelId }) // Instantiate ChutesHandler
7171
const model = handlerWithModel.getModel()
7272

@@ -95,7 +95,9 @@ describe("ChutesHandler", () => {
9595
const errorMessage = "Chutes API error"
9696
mockCreate.mockRejectedValueOnce(new Error(errorMessage))
9797

98-
await expect(handler.completePrompt("test prompt")).rejects.toThrow(`Chutes AI completion error: ${errorMessage}`) // Updated error message prefix
98+
await expect(handler.completePrompt("test prompt")).rejects.toThrow(
99+
`Chutes AI completion error: ${errorMessage}`,
100+
) // Updated error message prefix
99101
})
100102

101103
test("createMessage should yield text content from stream", async () => {
@@ -141,7 +143,8 @@ describe("ChutesHandler", () => {
141143
done: false,
142144
value: {
143145
choices: [{ delta: {} }], // Needs to have choices array to avoid error
144-
usage: { // Assuming standard OpenAI usage fields
146+
usage: {
147+
// Assuming standard OpenAI usage fields
145148
prompt_tokens: 10,
146149
completion_tokens: 20,
147150
},
@@ -158,7 +161,8 @@ describe("ChutesHandler", () => {
158161

159162
// Verify the usage data
160163
expect(firstChunk.done).toBe(false)
161-
expect(firstChunk.value).toEqual({ // Updated expected usage structure
164+
expect(firstChunk.value).toEqual({
165+
// Updated expected usage structure
162166
type: "usage",
163167
inputTokens: 10,
164168
outputTokens: 20,
@@ -204,4 +208,4 @@ describe("ChutesHandler", () => {
204208
}),
205209
)
206210
})
207-
})
211+
})

src/api/providers/__tests__/groq.test.ts

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ describe("GroqHandler", () => {
6464
})
6565

6666
test("should return specified model when valid model is provided", () => {
67-
const testModelId: GroqModelId = "llama3-70b-8192" // Use a valid Groq model ID and type
67+
const testModelId: GroqModelId = "llama-3.3-70b-versatile" // Use a valid Groq model ID and type
6868
const handlerWithModel = new GroqHandler({ apiModelId: testModelId }) // Instantiate GroqHandler
6969
const model = handlerWithModel.getModel()
7070

@@ -143,7 +143,8 @@ describe("GroqHandler", () => {
143143
done: false,
144144
value: {
145145
choices: [{ delta: {} }], // Needs to have choices array to avoid error
146-
usage: { // Assuming standard OpenAI usage fields
146+
usage: {
147+
// Assuming standard OpenAI usage fields
147148
prompt_tokens: 10,
148149
completion_tokens: 20,
149150
},
@@ -160,7 +161,8 @@ describe("GroqHandler", () => {
160161

161162
// Verify the usage data
162163
expect(firstChunk.done).toBe(false)
163-
expect(firstChunk.value).toEqual({ // Updated expected usage structure
164+
expect(firstChunk.value).toEqual({
165+
// Updated expected usage structure
164166
type: "usage",
165167
inputTokens: 10,
166168
outputTokens: 20,
@@ -171,7 +173,7 @@ describe("GroqHandler", () => {
171173

172174
test("createMessage should pass correct parameters to Groq client", async () => {
173175
// Setup a handler with specific model
174-
const modelId: GroqModelId = "llama3-8b-8192" // Use a valid Groq model ID and type
176+
const modelId: GroqModelId = "llama-3.1-8b-instant" // Use a valid Groq model ID and type
175177
const modelInfo = groqModels[modelId] // Use groqModels
176178
const handlerWithModel = new GroqHandler({ apiModelId: modelId }) // Instantiate GroqHandler
177179

@@ -206,4 +208,4 @@ describe("GroqHandler", () => {
206208
}),
207209
)
208210
})
209-
})
211+
})

0 commit comments

Comments
 (0)