Skip to content

Commit b433d1f

Browse files
roomote[bot]roomotedaniel-lxs
authored
fix: handle Mistral thinking content as reasoning chunks (#7106)
* fix: handle Mistral thinking content as reasoning chunks - Add TypeScript interfaces for Mistral content types (text and thinking) - Update createMessage to yield reasoning chunks for thinking content - Update completePrompt to filter out thinking content in non-streaming mode - Add comprehensive tests for reasoning content handling - Follow the pattern used by other providers (Anthropic, OpenAI, Gemini, etc.) Fixes #6842 * fix: resolve TypeScript type issue in completePrompt method * fix: handle Mistral thinking content chunks in streaming responses - Added ContentChunkWithThinking type helper to handle thinking chunks - Properly converts thinking content to reasoning chunks in streaming - Filters out thinking content in non-streaming completePrompt responses - Confirmed that Mistral API does send thinking chunks with type 'thinking' - Works with Mistral SDK v1.9.18 --------- Co-authored-by: Roo Code <[email protected]> Co-authored-by: daniel-lxs <[email protected]>
1 parent 8e4c0ae commit b433d1f

File tree

4 files changed

+186
-20
lines changed

4 files changed

+186
-20
lines changed

pnpm-lock.yaml

Lines changed: 6 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/api/providers/__tests__/mistral.spec.ts

Lines changed: 143 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
// Mock Mistral client - must come before other imports
22
const mockCreate = vi.fn()
3+
const mockComplete = vi.fn()
34
vi.mock("@mistralai/mistralai", () => {
45
return {
56
Mistral: vi.fn().mockImplementation(() => ({
@@ -21,6 +22,17 @@ vi.mock("@mistralai/mistralai", () => {
2122
}
2223
return stream
2324
}),
25+
complete: mockComplete.mockImplementation(async (_options) => {
26+
return {
27+
choices: [
28+
{
29+
message: {
30+
content: "Test response",
31+
},
32+
},
33+
],
34+
}
35+
}),
2436
},
2537
})),
2638
}
@@ -29,7 +41,7 @@ vi.mock("@mistralai/mistralai", () => {
2941
import type { Anthropic } from "@anthropic-ai/sdk"
3042
import { MistralHandler } from "../mistral"
3143
import type { ApiHandlerOptions } from "../../../shared/api"
32-
import type { ApiStreamTextChunk } from "../../transform/stream"
44+
import type { ApiStreamTextChunk, ApiStreamReasoningChunk } from "../../transform/stream"
3345

3446
describe("MistralHandler", () => {
3547
let handler: MistralHandler
@@ -44,6 +56,7 @@ describe("MistralHandler", () => {
4456
}
4557
handler = new MistralHandler(mockOptions)
4658
mockCreate.mockClear()
59+
mockComplete.mockClear()
4760
})
4861

4962
describe("constructor", () => {
@@ -122,5 +135,134 @@ describe("MistralHandler", () => {
122135
mockCreate.mockRejectedValueOnce(new Error("API Error"))
123136
await expect(handler.createMessage(systemPrompt, messages).next()).rejects.toThrow("API Error")
124137
})
138+
139+
it("should handle thinking content as reasoning chunks", async () => {
140+
// Mock stream with thinking content matching new SDK structure
141+
mockCreate.mockImplementationOnce(async (_options) => {
142+
const stream = {
143+
[Symbol.asyncIterator]: async function* () {
144+
yield {
145+
data: {
146+
choices: [
147+
{
148+
delta: {
149+
content: [
150+
{
151+
type: "thinking",
152+
thinking: [{ type: "text", text: "Let me think about this..." }],
153+
},
154+
{ type: "text", text: "Here's the answer" },
155+
],
156+
},
157+
index: 0,
158+
},
159+
],
160+
},
161+
}
162+
},
163+
}
164+
return stream
165+
})
166+
167+
const iterator = handler.createMessage(systemPrompt, messages)
168+
const results: (ApiStreamTextChunk | ApiStreamReasoningChunk)[] = []
169+
170+
for await (const chunk of iterator) {
171+
if ("text" in chunk) {
172+
results.push(chunk as ApiStreamTextChunk | ApiStreamReasoningChunk)
173+
}
174+
}
175+
176+
expect(results).toHaveLength(2)
177+
expect(results[0]).toEqual({ type: "reasoning", text: "Let me think about this..." })
178+
expect(results[1]).toEqual({ type: "text", text: "Here's the answer" })
179+
})
180+
181+
it("should handle mixed content arrays correctly", async () => {
182+
// Mock stream with mixed content matching new SDK structure
183+
mockCreate.mockImplementationOnce(async (_options) => {
184+
const stream = {
185+
[Symbol.asyncIterator]: async function* () {
186+
yield {
187+
data: {
188+
choices: [
189+
{
190+
delta: {
191+
content: [
192+
{ type: "text", text: "First text" },
193+
{
194+
type: "thinking",
195+
thinking: [{ type: "text", text: "Some reasoning" }],
196+
},
197+
{ type: "text", text: "Second text" },
198+
],
199+
},
200+
index: 0,
201+
},
202+
],
203+
},
204+
}
205+
},
206+
}
207+
return stream
208+
})
209+
210+
const iterator = handler.createMessage(systemPrompt, messages)
211+
const results: (ApiStreamTextChunk | ApiStreamReasoningChunk)[] = []
212+
213+
for await (const chunk of iterator) {
214+
if ("text" in chunk) {
215+
results.push(chunk as ApiStreamTextChunk | ApiStreamReasoningChunk)
216+
}
217+
}
218+
219+
expect(results).toHaveLength(3)
220+
expect(results[0]).toEqual({ type: "text", text: "First text" })
221+
expect(results[1]).toEqual({ type: "reasoning", text: "Some reasoning" })
222+
expect(results[2]).toEqual({ type: "text", text: "Second text" })
223+
})
224+
})
225+
226+
describe("completePrompt", () => {
227+
it("should complete prompt successfully", async () => {
228+
const prompt = "Test prompt"
229+
const result = await handler.completePrompt(prompt)
230+
231+
expect(mockComplete).toHaveBeenCalledWith({
232+
model: mockOptions.apiModelId,
233+
messages: [{ role: "user", content: prompt }],
234+
temperature: 0,
235+
})
236+
237+
expect(result).toBe("Test response")
238+
})
239+
240+
it("should filter out thinking content in completePrompt", async () => {
241+
mockComplete.mockImplementationOnce(async (_options) => {
242+
return {
243+
choices: [
244+
{
245+
message: {
246+
content: [
247+
{ type: "thinking", text: "Let me think..." },
248+
{ type: "text", text: "Answer part 1" },
249+
{ type: "text", text: "Answer part 2" },
250+
],
251+
},
252+
},
253+
],
254+
}
255+
})
256+
257+
const prompt = "Test prompt"
258+
const result = await handler.completePrompt(prompt)
259+
260+
expect(result).toBe("Answer part 1Answer part 2")
261+
})
262+
263+
it("should handle errors in completePrompt", async () => {
264+
mockComplete.mockRejectedValueOnce(new Error("API Error"))
265+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Mistral completion error: API Error")
266+
})
125267
})
126268
})

src/api/providers/mistral.ts

Lines changed: 36 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,14 @@ import { ApiStream } from "../transform/stream"
1111
import { BaseProvider } from "./base-provider"
1212
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1313

14+
// Type helper to handle thinking chunks from Mistral API
15+
// The SDK includes ThinkChunk but TypeScript has trouble with the discriminated union
16+
type ContentChunkWithThinking = {
17+
type: string
18+
text?: string
19+
thinking?: Array<{ type: string; text?: string }>
20+
}
21+
1422
export class MistralHandler extends BaseProvider implements SingleCompletionHandler {
1523
protected options: ApiHandlerOptions
1624
private client: Mistral
@@ -48,26 +56,38 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
4856
temperature,
4957
})
5058

51-
for await (const chunk of response) {
52-
const delta = chunk.data.choices[0]?.delta
59+
for await (const event of response) {
60+
const delta = event.data.choices[0]?.delta
5361

5462
if (delta?.content) {
55-
let content: string = ""
56-
5763
if (typeof delta.content === "string") {
58-
content = delta.content
64+
// Handle string content as text
65+
yield { type: "text", text: delta.content }
5966
} else if (Array.isArray(delta.content)) {
60-
content = delta.content.map((c) => (c.type === "text" ? c.text : "")).join("")
67+
// Handle array of content chunks
68+
// The SDK v1.9.18 supports ThinkChunk with type "thinking"
69+
for (const chunk of delta.content as ContentChunkWithThinking[]) {
70+
if (chunk.type === "thinking" && chunk.thinking) {
71+
// Handle thinking content as reasoning chunks
72+
// ThinkChunk has a 'thinking' property that contains an array of text/reference chunks
73+
for (const thinkingPart of chunk.thinking) {
74+
if (thinkingPart.type === "text" && thinkingPart.text) {
75+
yield { type: "reasoning", text: thinkingPart.text }
76+
}
77+
}
78+
} else if (chunk.type === "text" && chunk.text) {
79+
// Handle text content normally
80+
yield { type: "text", text: chunk.text }
81+
}
82+
}
6183
}
62-
63-
yield { type: "text", text: content }
6484
}
6585

66-
if (chunk.data.usage) {
86+
if (event.data.usage) {
6787
yield {
6888
type: "usage",
69-
inputTokens: chunk.data.usage.promptTokens || 0,
70-
outputTokens: chunk.data.usage.completionTokens || 0,
89+
inputTokens: event.data.usage.promptTokens || 0,
90+
outputTokens: event.data.usage.completionTokens || 0,
7191
}
7292
}
7393
}
@@ -97,7 +117,11 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
97117
const content = response.choices?.[0]?.message.content
98118

99119
if (Array.isArray(content)) {
100-
return content.map((c) => (c.type === "text" ? c.text : "")).join("")
120+
// Only return text content, filter out thinking content for non-streaming
121+
return (content as ContentChunkWithThinking[])
122+
.filter((c) => c.type === "text" && c.text)
123+
.map((c) => c.text || "")
124+
.join("")
101125
}
102126

103127
return content || ""

src/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,7 @@
429429
"@aws-sdk/credential-providers": "^3.848.0",
430430
"@google/genai": "^1.0.0",
431431
"@lmstudio/sdk": "^1.1.1",
432-
"@mistralai/mistralai": "^1.3.6",
432+
"@mistralai/mistralai": "^1.9.18",
433433
"@modelcontextprotocol/sdk": "^1.9.0",
434434
"@qdrant/js-client-rest": "^1.14.0",
435435
"@roo-code/cloud": "^0.19.0",

0 commit comments

Comments
 (0)