Skip to content

Commit c3e22d5

Browse files
committed
Updates max tokens limit to 64,000 across test files
Increases maximum token limit from 8,192 to 64,000 in test expectations for multiple AI providers: - Anthropic - Glama - Requesty - Vertex This aligns test expectations with newer model capabilities that support larger token limits.
1 parent ed1ea26 commit c3e22d5

File tree

4 files changed

+6
-6
lines changed

4 files changed

+6
-6
lines changed

src/api/providers/__tests__/anthropic.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ describe("AnthropicHandler", () => {
255255
})
256256

257257
const result = handler.getModel()
258-
expect(result.maxTokens).toBe(8192)
258+
expect(result.maxTokens).toBe(64_000)
259259
expect(result.thinking).toBeUndefined()
260260
expect(result.temperature).toBe(0)
261261
})

src/api/providers/__tests__/glama.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ describe("GlamaHandler", () => {
181181
model: mockOptions.apiModelId,
182182
messages: [{ role: "user", content: "Test prompt" }],
183183
temperature: 0,
184-
max_tokens: 8192,
184+
max_tokens: 64_000,
185185
}),
186186
)
187187
})
@@ -233,7 +233,7 @@ describe("GlamaHandler", () => {
233233
const modelInfo = handler.getModel()
234234
expect(modelInfo.id).toBe(mockOptions.apiModelId)
235235
expect(modelInfo.info).toBeDefined()
236-
expect(modelInfo.info.maxTokens).toBe(8192)
236+
expect(modelInfo.info.maxTokens).toBe(64_000)
237237
expect(modelInfo.info.contextWindow).toBe(200_000)
238238
})
239239
})

src/api/providers/__tests__/requesty.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ describe("RequestyHandler", () => {
1818
requestyApiKey: "test-key",
1919
requestyModelId: "test-model",
2020
requestyModelInfo: {
21-
maxTokens: 8192,
21+
maxTokens: 64_000,
2222
contextWindow: 200_000,
2323
supportsImages: true,
2424
supportsComputerUse: true,

src/api/providers/__tests__/vertex.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ describe("VertexHandler", () => {
309309
},
310310
],
311311
generationConfig: {
312-
maxOutputTokens: 8192,
312+
maxOutputTokens: 64_000,
313313
temperature: 0,
314314
},
315315
})
@@ -914,7 +914,7 @@ describe("VertexHandler", () => {
914914
})
915915

916916
const result = handler.getModel()
917-
expect(result.maxTokens).toBe(8192)
917+
expect(result.maxTokens).toBe(64_000)
918918
expect(result.thinking).toBeUndefined()
919919
expect(result.temperature).toBe(0)
920920
})

0 commit comments

Comments
 (0)