Skip to content

Commit 13d4dc0

Browse files
committed
Fix tests
1 parent 8639da0 commit 13d4dc0

File tree

4 files changed

+11
-12
lines changed

4 files changed

+11
-12
lines changed

src/api/providers/__tests__/gemini.test.ts

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -74,14 +74,7 @@ describe("GeminiHandler", () => {
7474
expect(chunks.length).toBe(3)
7575
expect(chunks[0]).toEqual({ type: "text", text: "Hello" })
7676
expect(chunks[1]).toEqual({ type: "text", text: " world!" })
77-
expect(chunks[2]).toEqual({
78-
type: "usage",
79-
inputTokens: 10,
80-
outputTokens: 5,
81-
cacheReadTokens: undefined,
82-
cacheWriteTokens: undefined,
83-
thinkingTokens: undefined,
84-
})
77+
expect(chunks[2]).toEqual({ type: "usage", inputTokens: 10, outputTokens: 5 })
8578

8679
// Verify the call to generateContentStream
8780
expect(handler["client"].models.generateContentStream).toHaveBeenCalledWith(

src/api/providers/__tests__/openrouter.test.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,14 @@ describe("OpenRouterHandler", () => {
5454
id: mockOptions.openRouterModelId,
5555
info: mockOptions.openRouterModelInfo,
5656
maxTokens: 1000,
57-
reasoning: undefined,
58-
temperature: 0,
5957
thinking: undefined,
58+
temperature: 0,
59+
reasoningEffort: undefined,
6060
topP: undefined,
61+
promptCache: {
62+
supported: false,
63+
optional: false,
64+
},
6165
})
6266
})
6367

src/api/providers/gemini.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
148148
if (lastUsageMetadata) {
149149
const inputTokens = lastUsageMetadata.promptTokenCount ?? 0
150150
const outputTokens = lastUsageMetadata.candidatesTokenCount ?? 0
151-
const cacheWriteTokens = cacheWrite ? inputTokens : 0
151+
const cacheWriteTokens = cacheWrite ? inputTokens : undefined
152152
const cacheReadTokens = lastUsageMetadata.cachedContentTokenCount
153153
const reasoningTokens = lastUsageMetadata.thoughtsTokenCount
154154

src/api/providers/openrouter.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import {
77
openRouterDefaultModelId,
88
openRouterDefaultModelInfo,
99
PROMPT_CACHING_MODELS,
10+
OPTIONAL_PROMPT_CACHING_MODELS,
1011
} from "../../shared/api"
1112
import { convertToOpenAiMessages } from "../transform/openai-format"
1213
import { ApiStreamChunk } from "../transform/stream"
@@ -195,11 +196,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
195196
return {
196197
id,
197198
info,
199+
// maxTokens, thinking, temperature, reasoningEffort
198200
...getModelParams({ options: this.options, model: info, defaultTemperature }),
199201
topP,
200202
promptCache: {
201203
supported: PROMPT_CACHING_MODELS.has(id),
202-
optional: PROMPT_CACHING_MODELS.has(id),
204+
optional: OPTIONAL_PROMPT_CACHING_MODELS.has(id),
203205
},
204206
}
205207
}

0 commit comments

Comments
 (0)