diff --git a/src/api/providers/__tests__/anthropic.test.ts b/src/api/providers/__tests__/anthropic.test.ts index fe186e3d8f..cb893e1e62 100644 --- a/src/api/providers/__tests__/anthropic.test.ts +++ b/src/api/providers/__tests__/anthropic.test.ts @@ -255,7 +255,7 @@ describe("AnthropicHandler", () => { }) const result = handler.getModel() - expect(result.maxTokens).toBe(8192) + expect(result.maxTokens).toBe(64_000) expect(result.thinking).toBeUndefined() expect(result.temperature).toBe(0) }) diff --git a/src/api/providers/__tests__/glama.test.ts b/src/api/providers/__tests__/glama.test.ts index 5e017ccd0a..a1666e21d3 100644 --- a/src/api/providers/__tests__/glama.test.ts +++ b/src/api/providers/__tests__/glama.test.ts @@ -181,7 +181,7 @@ describe("GlamaHandler", () => { model: mockOptions.apiModelId, messages: [{ role: "user", content: "Test prompt" }], temperature: 0, - max_tokens: 8192, + max_tokens: 64_000, }), ) }) @@ -233,7 +233,7 @@ describe("GlamaHandler", () => { const modelInfo = handler.getModel() expect(modelInfo.id).toBe(mockOptions.apiModelId) expect(modelInfo.info).toBeDefined() - expect(modelInfo.info.maxTokens).toBe(8192) + expect(modelInfo.info.maxTokens).toBe(64_000) expect(modelInfo.info.contextWindow).toBe(200_000) }) }) diff --git a/src/api/providers/__tests__/requesty.test.ts b/src/api/providers/__tests__/requesty.test.ts index 2b3da4a7ad..3b857dcd3d 100644 --- a/src/api/providers/__tests__/requesty.test.ts +++ b/src/api/providers/__tests__/requesty.test.ts @@ -18,7 +18,7 @@ describe("RequestyHandler", () => { requestyApiKey: "test-key", requestyModelId: "test-model", requestyModelInfo: { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, diff --git a/src/api/providers/__tests__/vertex.test.ts b/src/api/providers/__tests__/vertex.test.ts index 6c4e891d0b..a865cdb01c 100644 --- a/src/api/providers/__tests__/vertex.test.ts +++ b/src/api/providers/__tests__/vertex.test.ts @@ -309,7 +309,7 @@ describe("VertexHandler", () => { }, ], generationConfig: { - maxOutputTokens: 8192, + maxOutputTokens: 64_000, temperature: 0, }, }) @@ -914,7 +914,7 @@ describe("VertexHandler", () => { }) const result = handler.getModel() - expect(result.maxTokens).toBe(8192) + expect(result.maxTokens).toBe(64_000) expect(result.thinking).toBeUndefined() expect(result.temperature).toBe(0) }) diff --git a/src/shared/api.ts b/src/shared/api.ts index 4d71d947ba..2c0482d003 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -27,7 +27,7 @@ export const anthropicModels = { thinking: true, }, "claude-3-7-sonnet-20250219": { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -166,7 +166,7 @@ export const bedrockModels = { cachableFields: ["system"], }, "anthropic.claude-3-7-sonnet-20250219-v1:0": { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -425,7 +425,7 @@ export const bedrockModels = { // https://glama.ai/models export const glamaDefaultModelId = "anthropic/claude-3-7-sonnet" export const glamaDefaultModelInfo: ModelInfo = { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -442,7 +442,7 @@ export const glamaDefaultModelInfo: ModelInfo = { // https://requesty.ai/router-2 export const requestyDefaultModelId = "anthropic/claude-3-7-sonnet-latest" export const requestyDefaultModelInfo: ModelInfo = { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -459,7 +459,7 @@ export const requestyDefaultModelInfo: ModelInfo = { // https://openrouter.ai/models?order=newest&supported_parameters=tools export const openRouterDefaultModelId = "anthropic/claude-3.7-sonnet" export const openRouterDefaultModelInfo: ModelInfo = { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -550,7 +550,7 @@ export const vertexModels = { outputPrice: 5, }, "claude-3-7-sonnet@20250219:thinking": { - maxTokens: 64_000, + maxTokens: 128_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -562,7 +562,7 @@ export const vertexModels = { thinking: true, }, "claude-3-7-sonnet@20250219": { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true,