-
Notifications
You must be signed in to change notification settings - Fork 2.5k
fix: reduce CPU load from file watchers and improve performance #7596
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -37,7 +37,7 @@ export class FileWatcher implements IFileWatcher { | |
| private ignoreController: RooIgnoreController | ||
| private accumulatedEvents: Map<string, { uri: vscode.Uri; type: "create" | "change" | "delete" }> = new Map() | ||
| private batchProcessDebounceTimer?: NodeJS.Timeout | ||
| private readonly BATCH_DEBOUNCE_DELAY_MS = 500 | ||
| private readonly BATCH_DEBOUNCE_DELAY_MS = 1000 // Increased from 500ms to reduce CPU load | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider extracting these timing constants (500ms, 1000ms) to named constants at the top of the file. This would make them easier to find and modify, and the names could document their purpose (e.g., |
||
| private readonly FILE_PROCESSING_CONCURRENCY_LIMIT = 10 | ||
| private readonly batchSegmentThreshold: number | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -626,52 +626,70 @@ async function execRipgrep(rgPath: string, args: string[], limit: number): Promi | |
| const rgProcess = childProcess.spawn(rgPath, args) | ||
| let output = "" | ||
| let results: string[] = [] | ||
| let processKilled = false | ||
|
|
||
| // Set timeout to avoid hanging | ||
| // Set timeout to avoid hanging - increased timeout for slower systems | ||
| const timeoutId = setTimeout(() => { | ||
| rgProcess.kill() | ||
| console.warn("ripgrep timed out, returning partial results") | ||
| resolve(results.slice(0, limit)) | ||
| }, 10_000) | ||
| if (!processKilled) { | ||
| processKilled = true | ||
| rgProcess.kill("SIGTERM") | ||
| console.warn("ripgrep timed out, returning partial results") | ||
| resolve(results.slice(0, limit)) | ||
| } | ||
| }, 15_000) // Increased timeout to 15 seconds | ||
|
|
||
| // Process stdout data as it comes in | ||
| rgProcess.stdout.on("data", (data) => { | ||
| // Early exit if we've already killed the process | ||
| if (processKilled) return | ||
|
|
||
| output += data.toString() | ||
| processRipgrepOutput() | ||
|
|
||
| // Kill the process if we've reached the limit | ||
| if (results.length >= limit) { | ||
| rgProcess.kill() | ||
| clearTimeout(timeoutId) // Clear the timeout when we kill the process due to reaching the limit | ||
| if (results.length >= limit && !processKilled) { | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this race condition handling intentional? The |
||
| processKilled = true | ||
| clearTimeout(timeoutId) | ||
| rgProcess.kill("SIGTERM") | ||
| // Resolve immediately when limit is reached | ||
| resolve(results.slice(0, limit)) | ||
| } | ||
| }) | ||
|
|
||
| // Process stderr but don't fail on non-zero exit codes | ||
| rgProcess.stderr.on("data", (data) => { | ||
| console.error(`ripgrep stderr: ${data}`) | ||
| // Only log errors if not killed intentionally | ||
| if (!processKilled) { | ||
| console.error(`ripgrep stderr: ${data}`) | ||
| } | ||
| }) | ||
|
|
||
| // Handle process completion | ||
| rgProcess.on("close", (code) => { | ||
| // Clear the timeout to avoid memory leaks | ||
| clearTimeout(timeoutId) | ||
|
|
||
| // Process any remaining output | ||
| processRipgrepOutput(true) | ||
| // Only process if not already resolved | ||
| if (!processKilled) { | ||
| // Process any remaining output | ||
| processRipgrepOutput(true) | ||
|
|
||
| // Log non-zero exit codes but don't fail | ||
| if (code !== 0 && code !== null && code !== 143 /* SIGTERM */) { | ||
| console.warn(`ripgrep process exited with code ${code}, returning partial results`) | ||
| } | ||
| // Log non-zero exit codes but don't fail | ||
| if (code !== 0 && code !== null && code !== 143 /* SIGTERM */) { | ||
| console.warn(`ripgrep process exited with code ${code}, returning partial results`) | ||
| } | ||
|
|
||
| resolve(results.slice(0, limit)) | ||
| resolve(results.slice(0, limit)) | ||
| } | ||
| }) | ||
|
|
||
| // Handle process errors | ||
| rgProcess.on("error", (error) => { | ||
| // Clear the timeout to avoid memory leaks | ||
| clearTimeout(timeoutId) | ||
| reject(new Error(`ripgrep process error: ${error.message}`)) | ||
| if (!processKilled) { | ||
| reject(new Error(`ripgrep process error: ${error.message}`)) | ||
| } | ||
| }) | ||
|
|
||
| // Helper function to process output buffer | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -286,11 +286,11 @@ export class McpHub { | |
| clearTimeout(existingTimer) | ||
| } | ||
|
|
||
| // Set new timer | ||
| // Set new timer with longer debounce to reduce CPU usage | ||
| const timer = setTimeout(async () => { | ||
| this.configChangeDebounceTimers.delete(key) | ||
| await this.handleConfigFileChange(filePath, source) | ||
| }, 500) // 500ms debounce | ||
| }, 1000) // Increased to 1000ms debounce to reduce CPU load | ||
|
|
||
| this.configChangeDebounceTimers.set(key, timer) | ||
| } | ||
|
|
@@ -1033,15 +1033,14 @@ export class McpHub { | |
| if (manageConnectingState) { | ||
| this.isConnecting = true | ||
| } | ||
| this.removeAllFileWatchers() | ||
| // Filter connections by source | ||
| const currentConnections = this.connections.filter( | ||
| (conn) => conn.server.source === source || (!conn.server.source && source === "global"), | ||
| ) | ||
| const currentNames = new Set(currentConnections.map((conn) => conn.server.name)) | ||
| const newNames = new Set(Object.keys(newServers)) | ||
|
|
||
| // Delete removed servers | ||
| // Delete removed servers (this will also clean up their file watchers) | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I notice the |
||
| for (const name of currentNames) { | ||
| if (!newNames.has(name)) { | ||
| await this.deleteConnection(name, source) | ||
|
|
@@ -1065,22 +1064,17 @@ export class McpHub { | |
| if (!currentConnection) { | ||
| // New server | ||
| try { | ||
| // Only setup file watcher for enabled servers | ||
| if (!validatedConfig.disabled) { | ||
| this.setupFileWatcher(name, validatedConfig, source) | ||
| } | ||
| // connectToServer will handle file watcher setup for enabled servers | ||
| await this.connectToServer(name, validatedConfig, source) | ||
| } catch (error) { | ||
| this.showErrorMessage(`Failed to connect to new MCP server ${name}`, error) | ||
| } | ||
| } else if (!deepEqual(JSON.parse(currentConnection.server.config), config)) { | ||
| // Existing server with changed config | ||
| try { | ||
| // Only setup file watcher for enabled servers | ||
| if (!validatedConfig.disabled) { | ||
| this.setupFileWatcher(name, validatedConfig, source) | ||
| } | ||
| // Delete connection first (this cleans up old file watchers) | ||
| await this.deleteConnection(name, source) | ||
| // connectToServer will handle file watcher setup for enabled servers | ||
| await this.connectToServer(name, validatedConfig, source) | ||
| } catch (error) { | ||
| this.showErrorMessage(`Failed to reconnect MCP server ${name}`, error) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The debounce values here (500ms) differ from other components (1000ms in file-watcher and McpHub). Could we consider making these configurable through VS Code settings? This would allow users to tune the delays based on their system performance - slower systems might benefit from longer delays, while faster systems could use shorter ones.