Skip to content

Commit 4812569

Browse files
committed
fix: update provider tests to expect custom maxTokens for all models
- Fixed getModelMaxOutputTokens to always prioritize custom maxTokens - Updated anthropic, anthropic-vertex, and openrouter tests to expect the new behavior - Removed incorrect test that enforced old behavior for non-reasoning models - All tests now pass with the corrected implementation
1 parent 28b2f56 commit 4812569

File tree

4 files changed

+9
-9
lines changed

4 files changed

+9
-9
lines changed

src/api/providers/__tests__/anthropic-vertex.spec.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -705,7 +705,7 @@ describe("VertexHandler", () => {
705705
expect(result.temperature).toBe(1.0)
706706
})
707707

708-
it("does not honor custom maxTokens for non-thinking models", () => {
708+
it("honors custom maxTokens for all models", () => {
709709
const handler = new AnthropicVertexHandler({
710710
apiKey: "test-api-key",
711711
apiModelId: "claude-3-7-sonnet@20250219",
@@ -714,7 +714,7 @@ describe("VertexHandler", () => {
714714
})
715715

716716
const result = handler.getModel()
717-
expect(result.maxTokens).toBe(8192)
717+
expect(result.maxTokens).toBe(32_768)
718718
expect(result.reasoningBudget).toBeUndefined()
719719
expect(result.temperature).toBe(0)
720720
})

src/api/providers/__tests__/anthropic.spec.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ describe("AnthropicHandler", () => {
251251
expect(result.temperature).toBe(1.0)
252252
})
253253

254-
it("does not honor custom maxTokens for non-thinking models", () => {
254+
it("honors custom maxTokens for all models", () => {
255255
const handler = new AnthropicHandler({
256256
apiKey: "test-api-key",
257257
apiModelId: "claude-3-7-sonnet-20250219",
@@ -260,7 +260,7 @@ describe("AnthropicHandler", () => {
260260
})
261261

262262
const result = handler.getModel()
263-
expect(result.maxTokens).toBe(8192)
263+
expect(result.maxTokens).toBe(32_768)
264264
expect(result.reasoningBudget).toBeUndefined()
265265
expect(result.temperature).toBe(0)
266266
})

src/api/providers/__tests__/openrouter.spec.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ describe("OpenRouterHandler", () => {
8989
expect(result.info.supportsPromptCache).toBe(true)
9090
})
9191

92-
it("honors custom maxTokens for thinking models", async () => {
92+
it("honors custom maxTokens for all models", async () => {
9393
const handler = new OpenRouterHandler({
9494
openRouterApiKey: "test-key",
9595
openRouterModelId: "anthropic/claude-3.7-sonnet:thinking",
@@ -98,20 +98,20 @@ describe("OpenRouterHandler", () => {
9898
})
9999

100100
const result = await handler.fetchModel()
101-
expect(result.maxTokens).toBe(128000) // Use actual implementation value
101+
expect(result.maxTokens).toBe(32_768) // Should use custom maxTokens
102102
expect(result.reasoningBudget).toBeUndefined() // Use actual implementation value
103103
expect(result.temperature).toBe(0) // Use actual implementation value
104104
})
105105

106-
it("does not honor custom maxTokens for non-thinking models", async () => {
106+
it("honors custom maxTokens for non-thinking models", async () => {
107107
const handler = new OpenRouterHandler({
108108
...mockOptions,
109109
modelMaxTokens: 32_768,
110110
modelMaxThinkingTokens: 16_384,
111111
})
112112

113113
const result = await handler.fetchModel()
114-
expect(result.maxTokens).toBe(8192)
114+
expect(result.maxTokens).toBe(32_768)
115115
expect(result.reasoningBudget).toBeUndefined()
116116
expect(result.temperature).toBe(0)
117117
})

src/shared/api.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ export const getModelMaxOutputTokens = ({
7070
return settings.claudeCodeMaxOutputTokens || CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS
7171
}
7272

73-
// Check for user-configured modelMaxTokens FIRST (new logic)
73+
// Check for user-configured modelMaxTokens
7474
if (settings?.modelMaxTokens && settings.modelMaxTokens > 0) {
7575
return settings.modelMaxTokens
7676
}

0 commit comments

Comments
 (0)