Skip to content
Closed
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
4dce299
feat: add toggle functionality for enabling/disabling all global MCP …
seedlord May 20, 2025
6a015ab
Merge branch 'RooVetGit:main' into feat/3676-mcp-reload-on-enable
seedlord May 20, 2025
74384e2
Update src/services/mcp/McpHub.ts
seedlord May 20, 2025
783cbc6
Update McpHub.test.ts
seedlord May 20, 2025
ac26ff6
revert mcp changes
seedlord May 21, 2025
03d96ab
Merge branch 'main' into feat/3676-mcp-reload-on-enable
seedlord May 21, 2025
a7a96fb
Add commands to reload and toggle MCP servers; update UI and localiza…
seedlord May 21, 2025
e8f99e2
Add tests for toggleAllServersDisabled and restartAllMcpServers metho…
seedlord May 21, 2025
7d5dd40
Merge branch 'main' into feat/3676-mcp-reload
seedlord May 21, 2025
cc278a6
Add new commands for reloading and toggling MCP server states
seedlord May 21, 2025
0123cad
Merge branch 'RooCodeInc:main' into feat/3676-mcp-reload-on-enable
seedlord May 22, 2025
8856dab
refactor: update command identifiers to include extension prefix
seedlord May 22, 2025
8d937c3
refactor: optimize server configuration updates using Promise.all
seedlord May 22, 2025
fe0a70e
revert Promise.all for toggleAllServerDisabled
seedlord May 22, 2025
999d27b
fixed Merge branch 'main' of https://github.com/RooVetGit/Roo-Code in…
seedlord May 22, 2025
442ad40
Merge branch 'main' of https://github.com/RooVetGit/Roo-Code into fea…
seedlord May 22, 2025
fe34086
Merge branch 'feat/3676-mcp-reload-on-enable' of https://github.com/s…
seedlord May 22, 2025
f0596ac
revert eslint changes
seedlord May 22, 2025
77e9ae1
Merge branch 'feat/3676-mcp-reload-on-enable' of https://github.com/s…
seedlord May 22, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,16 @@
"command": "roo.acceptInput",
"title": "%command.acceptInput.title%",
"category": "%configuration.title%"
},
{
"command": "extension.reloadAllMcpServers",
"title": "Reload All MCP Servers",
"icon": "$(sync)"
},
{
"command": "extension.toggleAllMcpServersDisabled",
"title": "Toggle All MCP Servers Enabled/Disabled",
"icon": "$(power)"
}
],
"menus": {
Expand Down
29 changes: 29 additions & 0 deletions src/activate/registerCommands.ts
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,35 @@ const getCommandsMap = ({ context, outputChannel, provider }: RegisterCommandOpt

visibleProvider.postMessageToWebview({ type: "acceptInput" })
},
"extension.reloadAllMcpServers": async () => {
const visibleProvider = getVisibleProviderOrLog(outputChannel)
if (!visibleProvider) {
return
}
try {
await visibleProvider.getMcpHub()?.restartAllMcpServers()
} catch (error) {
outputChannel.appendLine(`Failed to reload all MCP servers: ${error}`)
vscode.window.showErrorMessage(`Failed to reload all MCP servers: ${error}`)
}
},
"extension.toggleAllMcpServersDisabled": async () => {
const visibleProvider = getVisibleProviderOrLog(outputChannel)
if (!visibleProvider) {
return
}
try {
const mcpHub = visibleProvider.getMcpHub()
if (mcpHub) {
const allServers = mcpHub.getAllServers()
const anyEnabled = allServers.some((server) => !server.disabled)
await mcpHub.toggleAllServersDisabled(anyEnabled)
}
} catch (error) {
outputChannel.appendLine(`Failed to toggle all MCP servers: ${error}`)
vscode.window.showErrorMessage(`Failed to toggle all MCP servers: ${error}`)
}
},
})

export const openClineInNewTab = async ({ context, outputChannel }: Omit<RegisterCommandOptions, "provider">) => {
Expand Down
161 changes: 78 additions & 83 deletions src/api/providers/lmstudio.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,108 +25,103 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
}

override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
]

// -------------------------
// Track token usage
// -------------------------
const toContentBlocks = (
blocks: Anthropic.Messages.MessageParam[] | string,
): Anthropic.Messages.ContentBlockParam[] => {
if (typeof blocks === "string") {
return [{ type: "text", text: blocks }]
}
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
]

// -------------------------
// Track token usage
// -------------------------
const toContentBlocks = (
blocks: Anthropic.Messages.MessageParam[] | string,
): Anthropic.Messages.ContentBlockParam[] => {
if (typeof blocks === "string") {
return [{ type: "text", text: blocks }]
}

const result: Anthropic.Messages.ContentBlockParam[] = []
for (const msg of blocks) {
if (typeof msg.content === "string") {
result.push({ type: "text", text: msg.content })
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text") {
result.push({ type: "text", text: part.text })
const result: Anthropic.Messages.ContentBlockParam[] = []
for (const msg of blocks) {
if (typeof msg.content === "string") {
result.push({ type: "text", text: msg.content })
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text") {
result.push({ type: "text", text: part.text })
}
}
}
}
return result
}
return result
}

let inputTokens = 0
try {
inputTokens = await this.countTokens([
{ type: "text", text: systemPrompt },
...toContentBlocks(messages),
])
} catch (err) {
console.error("[LmStudio] Failed to count input tokens:", err)
inputTokens = 0
}
let inputTokens = 0
try {
inputTokens = await this.countTokens([{ type: "text", text: systemPrompt }, ...toContentBlocks(messages)])
} catch (err) {
console.error("[LmStudio] Failed to count input tokens:", err)
inputTokens = 0
}

let assistantText = ""
let assistantText = ""

try {
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = {
model: this.getModel().id,
messages: openAiMessages,
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
stream: true,
}
try {
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = {
model: this.getModel().id,
messages: openAiMessages,
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
stream: true,
}

if (this.options.lmStudioSpeculativeDecodingEnabled && this.options.lmStudioDraftModelId) {
params.draft_model = this.options.lmStudioDraftModelId
}
if (this.options.lmStudioSpeculativeDecodingEnabled && this.options.lmStudioDraftModelId) {
params.draft_model = this.options.lmStudioDraftModelId
}

const results = await this.client.chat.completions.create(params)
const results = await this.client.chat.completions.create(params)

const matcher = new XmlMatcher(
"think",
(chunk) =>
({
type: chunk.matched ? "reasoning" : "text",
text: chunk.data,
}) as const,
)
const matcher = new XmlMatcher(
"think",
(chunk) =>
({
type: chunk.matched ? "reasoning" : "text",
text: chunk.data,
}) as const,
)

for await (const chunk of results) {
const delta = chunk.choices[0]?.delta
for await (const chunk of results) {
const delta = chunk.choices[0]?.delta

if (delta?.content) {
assistantText += delta.content
for (const processedChunk of matcher.update(delta.content)) {
yield processedChunk
if (delta?.content) {
assistantText += delta.content
for (const processedChunk of matcher.update(delta.content)) {
yield processedChunk
}
}
}
}

for (const processedChunk of matcher.final()) {
yield processedChunk
}
for (const processedChunk of matcher.final()) {
yield processedChunk
}


let outputTokens = 0
try {
outputTokens = await this.countTokens([{ type: "text", text: assistantText }])
} catch (err) {
console.error("[LmStudio] Failed to count output tokens:", err)
outputTokens = 0
}
let outputTokens = 0
try {
outputTokens = await this.countTokens([{ type: "text", text: assistantText }])
} catch (err) {
console.error("[LmStudio] Failed to count output tokens:", err)
outputTokens = 0
}

yield {
type: "usage",
inputTokens,
outputTokens,
} as const
} catch (error) {
throw new Error(
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
)
yield {
type: "usage",
inputTokens,
outputTokens,
} as const
} catch (error) {
throw new Error(
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
)
}
}
}


override getModel(): { id: string; info: ModelInfo } {
return {
Expand Down
11 changes: 11 additions & 0 deletions src/core/webview/webviewMessageHandler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1255,5 +1255,16 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We
await provider.postStateToWebview()
break
}
case "executeVSCodeCommand": {
if (message.command) {
try {
await vscode.commands.executeCommand(message.command)
} catch (error) {
provider.log(`Failed to execute VS Code command ${message.command}: ${error}`)
vscode.window.showErrorMessage(`Failed to execute command: ${message.command}`)
}
}
break
}
}
}
2 changes: 2 additions & 0 deletions src/schemas/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ const commandIds = [
"setCustomStoragePath",
"focusInput",
"acceptInput",
"extension.reloadAllMcpServers",
"extension.toggleAllMcpServersDisabled",
] as const

export type CommandId = (typeof commandIds)[number]
Expand Down
48 changes: 48 additions & 0 deletions src/services/mcp/McpHub.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1306,4 +1306,52 @@ export class McpHub {
}
this.disposables.forEach((d) => d.dispose())
}

/**
* Enables or disables all global MCP servers at once.
* When activated, the configuration is reloaded.
* @param disabled true = disable all, false = enable all
*/
public async toggleAllServersDisabled(disabled: boolean): Promise<void> {
// Collect all global server names
const allServers = this.getAllServers()

// Set the Disabled flag for all servers
for (const server of allServers) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As also noted by a previous comment, updating server configurations sequentially in this loop:
for (const server of allServers) { await this.updateServerConfig(...); ... }
could be slow if there are many servers.

It might be a good idea to use Promise.all for the updateServerConfig calls to perform these updates concurrently, which could improve performance.

await this.updateServerConfig(server.name, { disabled }, server.source)
const conn = this.findConnection(server.name, server.source)
if (conn) {
conn.server.disabled = disabled
}
}

// If activated, reload configuration
if (!disabled) {
// Re-initialize all servers, both global and project
await this.initializeMcpServers("global")
await this.initializeMcpServers("project")
}

await this.notifyWebviewOfServerChanges()
}

/**
* Restarts all currently active MCP servers.
* This will trigger a popup for each server being restarted.
*/
public async restartAllMcpServers(): Promise<void> {
const allServers = this.getAllServers() // Get all servers, regardless of disabled state
const restartPromises = allServers.map(async (server) => {
// Only restart if not disabled
if (!server.disabled) {
try {
await this.restartConnection(server.name, server.source)
} catch (error) {
this.showErrorMessage(`Failed to restart MCP server ${server.name}`, error)
}
}
})
await Promise.all(restartPromises)
await this.notifyWebviewOfServerChanges()
}
}
Loading
Loading