Skip to content

Commit 344b2e8

Browse files
committed
Fix tests
1 parent 795179a commit 344b2e8

File tree

5 files changed

+45
-59
lines changed

5 files changed

+45
-59
lines changed

src/api/providers/__tests__/deepseek.test.ts

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -140,12 +140,8 @@ describe("DeepSeekHandler", () => {
140140

141141
it("should set includeMaxTokens to true", () => {
142142
// Create a new handler and verify OpenAI client was called with includeMaxTokens
143-
new DeepSeekHandler(mockOptions)
144-
expect(OpenAI).toHaveBeenCalledWith(
145-
expect.objectContaining({
146-
apiKey: mockOptions.deepSeekApiKey,
147-
}),
148-
)
143+
const _handler = new DeepSeekHandler(mockOptions)
144+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: mockOptions.deepSeekApiKey }))
149145
})
150146
})
151147

src/api/providers/__tests__/requesty.test.ts

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,9 @@ import { RequestyHandler } from "../requesty"
77
import { ApiHandlerOptions } from "../../../shared/api"
88

99
jest.mock("openai")
10+
1011
jest.mock("delay", () => jest.fn(() => Promise.resolve()))
12+
1113
jest.mock("../fetchers/modelCache", () => ({
1214
getModels: jest.fn().mockImplementation(() => {
1315
return Promise.resolve({
@@ -150,7 +152,7 @@ describe("RequestyHandler", () => {
150152
// Verify OpenAI client was called with correct parameters
151153
expect(mockCreate).toHaveBeenCalledWith(
152154
expect.objectContaining({
153-
max_tokens: undefined,
155+
max_tokens: 8192,
154156
messages: [
155157
{
156158
role: "system",
@@ -164,7 +166,7 @@ describe("RequestyHandler", () => {
164166
model: "coding/claude-4-sonnet",
165167
stream: true,
166168
stream_options: { include_usage: true },
167-
temperature: undefined,
169+
temperature: 0,
168170
}),
169171
)
170172
})
@@ -198,9 +200,9 @@ describe("RequestyHandler", () => {
198200

199201
expect(mockCreate).toHaveBeenCalledWith({
200202
model: mockOptions.requestyModelId,
201-
max_tokens: undefined,
203+
max_tokens: 8192,
202204
messages: [{ role: "system", content: "test prompt" }],
203-
temperature: undefined,
205+
temperature: 0,
204206
})
205207
})
206208

src/api/providers/mistral.ts

Lines changed: 31 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -18,60 +18,50 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
1818

1919
constructor(options: ApiHandlerOptions) {
2020
super()
21+
2122
if (!options.mistralApiKey) {
2223
throw new Error("Mistral API key is required")
2324
}
2425

25-
// Set default model ID if not provided
26-
this.options = {
27-
...options,
28-
apiModelId: options.apiModelId || mistralDefaultModelId,
29-
}
26+
// Set default model ID if not provided.
27+
const apiModelId = options.apiModelId || mistralDefaultModelId
28+
this.options = { ...options, apiModelId }
3029

31-
const baseUrl = this.getBaseUrl()
32-
console.debug(`[Roo Code] MistralHandler using baseUrl: ${baseUrl}`)
3330
this.client = new Mistral({
34-
serverURL: baseUrl,
31+
serverURL: apiModelId.startsWith("codestral-")
32+
? this.options.mistralCodestralUrl || "https://codestral.mistral.ai"
33+
: "https://api.mistral.ai",
3534
apiKey: this.options.mistralApiKey,
3635
})
3736
}
3837

39-
private getBaseUrl(): string {
40-
const modelId = this.options.apiModelId ?? mistralDefaultModelId
41-
console.debug(`[Roo Code] MistralHandler using modelId: ${modelId}`)
42-
if (modelId?.startsWith("codestral-")) {
43-
return this.options.mistralCodestralUrl || "https://codestral.mistral.ai"
44-
}
45-
return "https://api.mistral.ai"
46-
}
47-
4838
override async *createMessage(
4939
systemPrompt: string,
5040
messages: Anthropic.Messages.MessageParam[],
5141
metadata?: ApiHandlerCreateMessageMetadata,
5242
): ApiStream {
53-
const { id: model } = this.getModel()
43+
const { id: model, maxTokens, temperature } = this.getModel()
5444

5545
const response = await this.client.chat.stream({
56-
model: this.options.apiModelId || mistralDefaultModelId,
46+
model,
5747
messages: [{ role: "system", content: systemPrompt }, ...convertToMistralMessages(messages)],
58-
maxTokens: this.options.includeMaxTokens ? this.getModel().info.maxTokens : undefined,
59-
temperature: this.options.modelTemperature ?? MISTRAL_DEFAULT_TEMPERATURE,
48+
maxTokens,
49+
temperature,
6050
})
6151

6252
for await (const chunk of response) {
6353
const delta = chunk.data.choices[0]?.delta
54+
6455
if (delta?.content) {
6556
let content: string = ""
57+
6658
if (typeof delta.content === "string") {
6759
content = delta.content
6860
} else if (Array.isArray(delta.content)) {
6961
content = delta.content.map((c) => (c.type === "text" ? c.text : "")).join("")
7062
}
71-
yield {
72-
type: "text",
73-
text: content,
74-
}
63+
64+
yield { type: "text", text: content }
7565
}
7666

7767
if (chunk.data.usage) {
@@ -84,35 +74,39 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
8474
}
8575
}
8676

87-
override getModel(): { id: MistralModelId; info: ModelInfo } {
88-
const modelId = this.options.apiModelId
89-
if (modelId && modelId in mistralModels) {
90-
const id = modelId as MistralModelId
91-
return { id, info: mistralModels[id] }
92-
}
93-
return {
94-
id: mistralDefaultModelId,
95-
info: mistralModels[mistralDefaultModelId],
96-
}
77+
override getModel() {
78+
const id = this.options.apiModelId ?? mistralDefaultModelId
79+
const info = mistralModels[id as MistralModelId] ?? mistralModels[mistralDefaultModelId]
80+
81+
// @TODO: Move this to the `getModelParams` function.
82+
const maxTokens = this.options.includeMaxTokens ? info.maxTokens : undefined
83+
const temperature = this.options.modelTemperature ?? MISTRAL_DEFAULT_TEMPERATURE
84+
85+
return { id, info, maxTokens, temperature }
9786
}
9887

9988
async completePrompt(prompt: string): Promise<string> {
10089
try {
90+
const { id: model, temperature } = this.getModel()
91+
10192
const response = await this.client.chat.complete({
102-
model: this.options.apiModelId || mistralDefaultModelId,
93+
model,
10394
messages: [{ role: "user", content: prompt }],
104-
temperature: this.options.modelTemperature ?? MISTRAL_DEFAULT_TEMPERATURE,
95+
temperature,
10596
})
10697

10798
const content = response.choices?.[0]?.message.content
99+
108100
if (Array.isArray(content)) {
109101
return content.map((c) => (c.type === "text" ? c.text : "")).join("")
110102
}
103+
111104
return content || ""
112105
} catch (error) {
113106
if (error instanceof Error) {
114107
throw new Error(`Mistral completion error: ${error.message}`)
115108
}
109+
116110
throw error
117111
}
118112
}

src/api/providers/openai.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
154154
...(reasoning && reasoning),
155155
}
156156

157+
// @TODO: Move this to the `getModelParams` function.
157158
if (this.options.includeMaxTokens) {
158159
requestOptions.max_tokens = modelInfo.maxTokens
159160
}

src/api/providers/requesty.ts

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
116116
model,
117117
max_tokens,
118118
temperature,
119-
reasoning_effort,
120-
thinking,
119+
...(reasoning_effort && { reasoning_effort }),
120+
...(thinking && { thinking }),
121121
stream: true,
122122
stream_options: { include_usage: true },
123123
requesty: { trace_id: metadata?.taskId, extra: { mode: metadata?.mode } },
@@ -148,20 +148,13 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
148148
}
149149

150150
async completePrompt(prompt: string): Promise<string> {
151-
const model = await this.fetchModel()
151+
const { id: model, maxTokens: max_tokens, temperature } = await this.fetchModel()
152152

153153
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [{ role: "system", content: prompt }]
154154

155-
let maxTokens = undefined
156-
if (this.options.includeMaxTokens) {
157-
maxTokens = model.info.maxTokens
158-
}
159-
160-
const temperature = this.options.modelTemperature
161-
162155
const completionParams: RequestyChatCompletionParams = {
163-
model: model.id,
164-
max_tokens: maxTokens,
156+
model,
157+
max_tokens,
165158
messages: openAiMessages,
166159
temperature: temperature,
167160
}

0 commit comments

Comments
 (0)