Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,7 @@ export type ZaiApiLine = z.infer<typeof zaiApiLineSchema>
const zaiSchema = apiModelIdProviderModelSchema.extend({
zaiApiKey: z.string().optional(),
zaiApiLine: zaiApiLineSchema.optional(),
zaiEnableThinking: z.boolean().optional(),
})

const fireworksSchema = apiModelIdProviderModelSchema.extend({
Expand Down
92 changes: 92 additions & 0 deletions src/api/providers/__tests__/zai.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -262,5 +262,97 @@ describe("ZAiHandler", () => {
undefined,
)
})

it("should include enable_thinking parameter when zaiEnableThinking is true", async () => {
const handlerWithThinking = new ZAiHandler({
zaiApiKey: "test-zai-api-key",
zaiApiLine: "international",
zaiEnableThinking: true,
})

mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
async next() {
return { done: true }
},
}),
}
})

const systemPrompt = "Test system prompt"
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }]

const messageGenerator = handlerWithThinking.createMessage(systemPrompt, messages)
await messageGenerator.next()

expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
enable_thinking: true,
}),
undefined,
)
})

it("should not include enable_thinking parameter when zaiEnableThinking is false", async () => {
const handlerWithoutThinking = new ZAiHandler({
zaiApiKey: "test-zai-api-key",
zaiApiLine: "international",
zaiEnableThinking: false,
})

mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
async next() {
return { done: true }
},
}),
}
})

const systemPrompt = "Test system prompt"
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }]

const messageGenerator = handlerWithoutThinking.createMessage(systemPrompt, messages)
await messageGenerator.next()

expect(mockCreate).toHaveBeenCalledWith(
expect.not.objectContaining({
enable_thinking: true,
}),
undefined,
)
})

it("should not include enable_thinking parameter when zaiEnableThinking is undefined", async () => {
const handlerDefault = new ZAiHandler({
zaiApiKey: "test-zai-api-key",
zaiApiLine: "international",
})

mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
async next() {
return { done: true }
},
}),
}
})

const systemPrompt = "Test system prompt"
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }]

const messageGenerator = handlerDefault.createMessage(systemPrompt, messages)
await messageGenerator.next()

expect(mockCreate).toHaveBeenCalledWith(
expect.not.objectContaining({
enable_thinking: true,
}),
undefined,
)
})
})
})
43 changes: 43 additions & 0 deletions src/api/providers/zai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,12 @@ import {
ZAI_DEFAULT_TEMPERATURE,
zaiApiLineConfigs,
} from "@roo-code/types"
import { Anthropic } from "@anthropic-ai/sdk"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[P3] Type-only import: This symbol is used purely for types. Importing it as a value pulls in '@anthropic-ai/sdk' at runtime unnecessarily.

Suggested change
import { Anthropic } from "@anthropic-ai/sdk"
import type { Anthropic } from "@anthropic-ai/sdk"

import OpenAI from "openai"

import type { ApiHandlerOptions } from "../../shared/api"
import type { ApiHandlerCreateMessageMetadata } from "../index"
import { convertToOpenAiMessages } from "../transform/openai-format"

import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"

Expand All @@ -29,4 +33,43 @@ export class ZAiHandler extends BaseOpenAiCompatibleProvider<InternationalZAiMod
defaultTemperature: ZAI_DEFAULT_TEMPERATURE,
})
}

protected override createStream(
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[],
metadata?: ApiHandlerCreateMessageMetadata,
requestOptions?: OpenAI.RequestOptions,
) {
const {
id: model,
info: { maxTokens: max_tokens },
} = this.getModel()

const temperature = this.options.modelTemperature ?? ZAI_DEFAULT_TEMPERATURE

// Build base parameters
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model,
max_tokens,
temperature,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
stream: true,
stream_options: { include_usage: true },
}

// Add thinking parameter for models that support it (GLM-4.6, etc.)
// Only add if explicitly enabled via the zaiEnableThinking setting
if (this.options.zaiEnableThinking === true) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[P2] Scope 'enable_thinking' to supported models. As written, any model will receive the flag when the setting is on, which can trigger 400s for models that don't implement thinking mode. Consider gating by model id (e.g., /^glm-4.6/) or a capability map so unsupported models are unaffected.

// Z AI uses a custom parameter for thinking mode
// This follows the pattern used by other providers with thinking support
;(params as any).enable_thinking = true
}

try {
return this.client.chat.completions.create(params, requestOptions)
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
throw new Error(`Z AI completion error: ${errorMessage}`)
}
}
}
19 changes: 18 additions & 1 deletion webview-ui/src/components/settings/providers/ZAi.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { useCallback } from "react"
import { VSCodeTextField, VSCodeDropdown, VSCodeOption } from "@vscode/webview-ui-toolkit/react"
import { VSCodeTextField, VSCodeDropdown, VSCodeOption, VSCodeCheckbox } from "@vscode/webview-ui-toolkit/react"

import { zaiApiLineConfigs, zaiApiLineSchema, type ProviderSettings } from "@roo-code/types"

Expand Down Expand Up @@ -28,6 +28,13 @@ export const ZAi = ({ apiConfiguration, setApiConfigurationField }: ZAiProps) =>
[setApiConfigurationField],
)

const handleCheckboxChange = useCallback(
(field: keyof ProviderSettings) => () => {
setApiConfigurationField(field, !apiConfiguration?.[field])
},
[setApiConfigurationField, apiConfiguration],
)

return (
<>
<div>
Expand Down Expand Up @@ -73,6 +80,16 @@ export const ZAi = ({ apiConfiguration, setApiConfigurationField }: ZAiProps) =>
</VSCodeButtonLink>
)}
</div>
<div>
<VSCodeCheckbox
checked={apiConfiguration?.zaiEnableThinking || false}
onChange={handleCheckboxChange("zaiEnableThinking")}>
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[P3] Event-driven checkbox: Flipping based on current state can race with queued updates in React. Prefer using the event's checked value to set an explicit boolean, e.g. onChange={(e) => setApiConfigurationField('zaiEnableThinking', (e.target as HTMLInputElement).checked)}.

<label className="font-medium">{t("settings:providers.zaiEnableThinking")}</label>
</VSCodeCheckbox>
<div className="text-xs text-vscode-descriptionForeground mt-1 ml-6">
{t("settings:providers.zaiEnableThinkingDescription")}
</div>
</div>
</>
)
}
2 changes: 2 additions & 0 deletions webview-ui/src/i18n/locales/en/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,8 @@
"getZaiApiKey": "Get Z AI API Key",
"zaiEntrypoint": "Z AI Entrypoint",
"zaiEntrypointDescription": "Please select the appropriate API entrypoint based on your location. If you are in China, choose open.bigmodel.cn. Otherwise, choose api.z.ai.",
"zaiEnableThinking": "Enable Thinking Mode",
"zaiEnableThinkingDescription": "Enable thinking/reasoning mode for models that support it (e.g., GLM-4.6, DeepSeek v3.2). This allows models to show their reasoning process before providing answers.",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[P2] Provider-specific copy: This toggle is under the Z AI provider. Mentioning DeepSeek here can confuse users. Suggest: "Enable thinking/reasoning mode for supported Z AI models (e.g., GLM-4.6). This allows models to show their reasoning process before providing answers."

"geminiApiKey": "Gemini API Key",
"getGroqApiKey": "Get Groq API Key",
"groqApiKey": "Groq API Key",
Expand Down
Loading