Skip to content

Commit 9555fb8

Browse files
committed
Fix some tests
1 parent 5d40802 commit 9555fb8

File tree

8 files changed

+73
-66
lines changed

8 files changed

+73
-66
lines changed

src/api/providers/__tests__/glama.test.ts

Lines changed: 6 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
// npx jest src/api/providers/__tests__/glama.test.ts
22

33
import { Anthropic } from "@anthropic-ai/sdk"
4-
import axios from "axios"
54

65
import { GlamaHandler } from "../glama"
76
import { ApiHandlerOptions } from "../../../shared/api"
@@ -116,40 +115,15 @@ describe("GlamaHandler", () => {
116115
]
117116

118117
it("should handle streaming responses", async () => {
119-
// Mock axios for token usage request
120-
const mockAxios = jest.spyOn(axios, "get").mockResolvedValueOnce({
121-
data: {
122-
tokenUsage: {
123-
promptTokens: 10,
124-
completionTokens: 5,
125-
cacheCreationInputTokens: 0,
126-
cacheReadInputTokens: 0,
127-
},
128-
totalCostUsd: "0.00",
129-
},
130-
})
131-
132118
const stream = handler.createMessage(systemPrompt, messages)
133119
const chunks: any[] = []
120+
134121
for await (const chunk of stream) {
135122
chunks.push(chunk)
136123
}
137124

138-
expect(chunks.length).toBe(2) // Text chunk and usage chunk
139-
expect(chunks[0]).toEqual({
140-
type: "text",
141-
text: "Test response",
142-
})
143-
expect(chunks[1]).toEqual({
144-
type: "usage",
145-
inputTokens: 10,
146-
outputTokens: 5,
147-
cacheWriteTokens: 0,
148-
cacheReadTokens: 0,
149-
totalCost: 0,
150-
})
151-
152-
mockAxios.mockRestore()
125+
expect(chunks.length).toBe(1)
126+
expect(chunks[0]).toEqual({ type: "text", text: "Test response" })
153127
})
154128

155129
it("should handle API errors", async () => {
@@ -204,16 +178,16 @@ describe("GlamaHandler", () => {
204178
mockCreate.mockClear()
205179

206180
const nonAnthropicOptions = {
207-
apiModelId: "openai/gpt-4",
208-
glamaModelId: "openai/gpt-4",
209181
glamaApiKey: "test-key",
182+
glamaModelId: "openai/gpt-4o",
210183
}
184+
211185
const nonAnthropicHandler = new GlamaHandler(nonAnthropicOptions)
212186

213187
await nonAnthropicHandler.completePrompt("Test prompt")
214188
expect(mockCreate).toHaveBeenCalledWith(
215189
expect.objectContaining({
216-
model: "openai/gpt-4",
190+
model: "openai/gpt-4o",
217191
messages: [{ role: "user", content: "Test prompt" }],
218192
temperature: 0,
219193
}),

src/api/providers/__tests__/openrouter.test.ts

Lines changed: 36 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const mockOpenRouterModelInfo: ModelInfo = {
2323
describe("OpenRouterHandler", () => {
2424
const mockOptions: ApiHandlerOptions = {
2525
openRouterApiKey: "test-key",
26-
openRouterModelId: "test-model",
26+
openRouterModelId: "anthropic/claude-3.7-sonnet",
2727
}
2828

2929
beforeEach(() => {
@@ -45,55 +45,54 @@ describe("OpenRouterHandler", () => {
4545
})
4646

4747
describe("getModel", () => {
48-
it("returns correct model info when options are provided", () => {
48+
it("returns correct model info when options are provided", async () => {
4949
const handler = new OpenRouterHandler(mockOptions)
50-
const result = handler.getModel()
50+
const result = await handler.fetchModel()
5151

52-
expect(result).toEqual({
52+
expect(result).toMatchObject({
5353
id: mockOptions.openRouterModelId,
54-
maxTokens: 1000,
54+
maxTokens: 8192,
5555
thinking: undefined,
5656
temperature: 0,
5757
reasoningEffort: undefined,
5858
topP: undefined,
5959
promptCache: {
60-
supported: false,
60+
supported: true,
6161
optional: false,
6262
},
6363
})
6464
})
6565

66-
it("returns default model info when options are not provided", () => {
66+
it("returns default model info when options are not provided", async () => {
6767
const handler = new OpenRouterHandler({})
68-
const result = handler.getModel()
69-
68+
const result = await handler.fetchModel()
7069
expect(result.id).toBe("anthropic/claude-3.7-sonnet")
7170
expect(result.info.supportsPromptCache).toBe(true)
7271
})
7372

74-
it("honors custom maxTokens for thinking models", () => {
73+
it("honors custom maxTokens for thinking models", async () => {
7574
const handler = new OpenRouterHandler({
7675
openRouterApiKey: "test-key",
77-
openRouterModelId: "test-model",
76+
openRouterModelId: "anthropic/claude-3.7-sonnet:thinking",
7877
modelMaxTokens: 32_768,
7978
modelMaxThinkingTokens: 16_384,
8079
})
8180

82-
const result = handler.getModel()
81+
const result = await handler.fetchModel()
8382
expect(result.maxTokens).toBe(32_768)
8483
expect(result.thinking).toEqual({ type: "enabled", budget_tokens: 16_384 })
8584
expect(result.temperature).toBe(1.0)
8685
})
8786

88-
it("does not honor custom maxTokens for non-thinking models", () => {
87+
it("does not honor custom maxTokens for non-thinking models", async () => {
8988
const handler = new OpenRouterHandler({
9089
...mockOptions,
9190
modelMaxTokens: 32_768,
9291
modelMaxThinkingTokens: 16_384,
9392
})
9493

95-
const result = handler.getModel()
96-
expect(result.maxTokens).toBe(1000)
94+
const result = await handler.fetchModel()
95+
expect(result.maxTokens).toBe(8192)
9796
expect(result.thinking).toBeUndefined()
9897
expect(result.temperature).toBe(0)
9998
})
@@ -106,7 +105,7 @@ describe("OpenRouterHandler", () => {
106105
const mockStream = {
107106
async *[Symbol.asyncIterator]() {
108107
yield {
109-
id: "test-id",
108+
id: mockOptions.openRouterModelId,
110109
choices: [{ delta: { content: "test response" } }],
111110
}
112111
yield {
@@ -139,16 +138,29 @@ describe("OpenRouterHandler", () => {
139138
expect(chunks[0]).toEqual({ type: "text", text: "test response" })
140139
expect(chunks[1]).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20, totalCost: 0.001 })
141140

142-
// Verify OpenAI client was called with correct parameters
141+
// Verify OpenAI client was called with correct parameters.
143142
expect(mockCreate).toHaveBeenCalledWith(
144143
expect.objectContaining({
145-
model: mockOptions.openRouterModelId,
146-
temperature: 0,
147-
messages: expect.arrayContaining([
148-
{ role: "system", content: systemPrompt },
149-
{ role: "user", content: "test message" },
150-
]),
144+
max_tokens: 8192,
145+
messages: [
146+
{
147+
content: [
148+
{ cache_control: { type: "ephemeral" }, text: "test system prompt", type: "text" },
149+
],
150+
role: "system",
151+
},
152+
{
153+
content: [{ cache_control: { type: "ephemeral" }, text: "test message", type: "text" }],
154+
role: "user",
155+
},
156+
],
157+
model: "anthropic/claude-3.7-sonnet",
151158
stream: true,
159+
stream_options: { include_usage: true },
160+
temperature: 0,
161+
thinking: undefined,
162+
top_p: undefined,
163+
transforms: ["middle-out"],
152164
}),
153165
)
154166
})
@@ -255,7 +267,7 @@ describe("OpenRouterHandler", () => {
255267

256268
expect(mockCreate).toHaveBeenCalledWith({
257269
model: mockOptions.openRouterModelId,
258-
max_tokens: 1000,
270+
max_tokens: 8192,
259271
thinking: undefined,
260272
temperature: 0,
261273
messages: [{ role: "user", content: "test prompt" }],

src/api/providers/__tests__/unbound.test.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
1-
import { UnboundHandler } from "../unbound"
2-
import { ApiHandlerOptions } from "../../../shared/api"
1+
// npx jest src/api/providers/__tests__/unbound.test.ts
2+
33
import { Anthropic } from "@anthropic-ai/sdk"
44

5+
import { ApiHandlerOptions } from "../../../shared/api"
6+
7+
import { UnboundHandler } from "../unbound"
8+
59
// Mock OpenAI client
610
const mockCreate = jest.fn()
711
const mockWithResponse = jest.fn()

src/api/providers/glama.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ export class GlamaHandler extends RouterProvider implements SingleCompletionHand
2121
name: "unbound",
2222
baseURL: "https://glama.ai/api/gateway/openai/v1",
2323
apiKey: options.glamaApiKey,
24-
modelId: options.glamaModelId ?? glamaDefaultModelId,
24+
modelId: options.glamaModelId,
25+
defaultModelId: glamaDefaultModelId,
2526
defaultModelInfo: glamaDefaultModelInfo,
2627
})
2728
}

src/api/providers/openrouter.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
193193
}
194194
}
195195

196-
private async fetchModel() {
196+
public async fetchModel() {
197197
this.models = await getModels("openrouter")
198198
return this.getModel()
199199
}

src/api/providers/requesty.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ export class RequestyHandler extends OpenAiHandler {
3535

3636
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
3737
this.models = await getModels("requesty")
38-
return super.createMessage(systemPrompt, messages)
38+
yield* super.createMessage(systemPrompt, messages)
3939
}
4040

4141
override getModel(): { id: string; info: ModelInfo } {

src/api/providers/router-provider.ts

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@ type RouterProviderOptions = {
88
name: RouterName
99
baseURL: string
1010
apiKey?: string
11-
modelId: string
11+
modelId?: string
12+
defaultModelId: string
1213
defaultModelInfo: ModelInfo
1314
options: ApiHandlerOptions
1415
}
@@ -17,16 +18,26 @@ export abstract class RouterProvider extends BaseProvider {
1718
protected readonly options: ApiHandlerOptions
1819
protected readonly name: RouterName
1920
protected models: ModelRecord = {}
20-
protected readonly modelId: string
21+
protected readonly modelId?: string
22+
protected readonly defaultModelId: string
2123
protected readonly defaultModelInfo: ModelInfo
2224
protected readonly client: OpenAI
2325

24-
constructor({ options, name, baseURL, apiKey = "not-provided", modelId, defaultModelInfo }: RouterProviderOptions) {
26+
constructor({
27+
options,
28+
name,
29+
baseURL,
30+
apiKey = "not-provided",
31+
modelId,
32+
defaultModelId,
33+
defaultModelInfo,
34+
}: RouterProviderOptions) {
2535
super()
2636

2737
this.options = options
2838
this.name = name
2939
this.modelId = modelId
40+
this.defaultModelId = defaultModelId
3041
this.defaultModelInfo = defaultModelInfo
3142

3243
this.client = new OpenAI({ baseURL, apiKey })
@@ -38,7 +49,11 @@ export abstract class RouterProvider extends BaseProvider {
3849
}
3950

4051
override getModel(): { id: string; info: ModelInfo } {
41-
return { id: this.modelId, info: this.models[this.modelId] ?? this.defaultModelInfo }
52+
const id = this.modelId ?? this.defaultModelId
53+
54+
return this.models[id]
55+
? { id, info: this.models[id] }
56+
: { id: this.defaultModelId, info: this.defaultModelInfo }
4257
}
4358

4459
protected supportsTemperature(modelId: string): boolean {

src/api/providers/unbound.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ export class UnboundHandler extends RouterProvider implements SingleCompletionHa
2323
name: "unbound",
2424
baseURL: "https://api.getunbound.ai/v1",
2525
apiKey: options.unboundApiKey,
26-
modelId: options.unboundModelId ?? unboundDefaultModelId,
26+
modelId: options.unboundModelId,
27+
defaultModelId: unboundDefaultModelId,
2728
defaultModelInfo: unboundDefaultModelInfo,
2829
})
2930
}

0 commit comments

Comments
 (0)