From b8a8cad1a7550f585b7ccaf8fa32dcdfe45a5cd8 Mon Sep 17 00:00:00 2001 From: Josh Arnold Date: Sun, 14 Sep 2025 14:23:51 -0400 Subject: [PATCH] Support custom preparationBatchSize defined via SourceKit's options --- Documentation/Configuration File.md | 1 + Sources/SKOptions/SourceKitLSPOptions.swift | 11 +++++++++-- .../PreparationTaskDescription.swift | 14 ++++++++------ Sources/SemanticIndex/SemanticIndexManager.swift | 16 +++++++++++++--- Sources/SourceKitLSP/Workspace.swift | 1 + config.schema.json | 5 +++++ 6 files changed, 37 insertions(+), 11 deletions(-) diff --git a/Documentation/Configuration File.md b/Documentation/Configuration File.md index 27e31a060..4be996715 100644 --- a/Documentation/Configuration File.md +++ b/Documentation/Configuration File.md @@ -59,3 +59,4 @@ The structure of the file is currently not guaranteed to be stable. Options may - `workDoneProgressDebounceDuration: number`: When a task is started that should be displayed to the client as a work done progress, how many milliseconds to wait before actually starting the work done progress. This prevents flickering of the work done progress in the client for short-lived index tasks which end within this duration. - `sourcekitdRequestTimeout: number`: The maximum duration that a sourcekitd request should be allowed to execute before being declared as timed out. In general, editors should cancel requests that they are no longer interested in, but in case editors don't cancel requests, this ensures that a long-running non-cancelled request is not blocking sourcekitd and thus most semantic functionality. In particular, VS Code does not cancel the semantic tokens request, which can cause a long-running AST build that blocks sourcekitd. - `semanticServiceRestartTimeout: number`: If a request to sourcekitd or clangd exceeds this timeout, we assume that the semantic service provider is hanging for some reason and won't recover. To restore semantic functionality, we terminate and restart it. +- `preparationBatchSize: integer`: The number of targets to prepare in parallel. If nil, SourceKit-LSP will choose a batch size. diff --git a/Sources/SKOptions/SourceKitLSPOptions.swift b/Sources/SKOptions/SourceKitLSPOptions.swift index 026f0fb23..a049f9e97 100644 --- a/Sources/SKOptions/SourceKitLSPOptions.swift +++ b/Sources/SKOptions/SourceKitLSPOptions.swift @@ -433,6 +433,10 @@ public struct SourceKitLSPOptions: Sendable, Codable, Equatable { return .seconds(300) } + /// The number of targets to prepare in parallel. + /// If nil, SourceKit-LSP will choose a batch size. + public var preparationBatchSize: Int? = nil + public init( swiftPM: SwiftPMOptions? = .init(), fallbackBuildSystem: FallbackBuildSystemOptions? = .init(), @@ -451,7 +455,8 @@ public struct SourceKitLSPOptions: Sendable, Codable, Equatable { swiftPublishDiagnosticsDebounceDuration: Double? = nil, workDoneProgressDebounceDuration: Double? = nil, sourcekitdRequestTimeout: Double? = nil, - semanticServiceRestartTimeout: Double? = nil + semanticServiceRestartTimeout: Double? = nil, + preparationBatchSize: Int? = nil ) { self.swiftPM = swiftPM self.fallbackBuildSystem = fallbackBuildSystem @@ -471,6 +476,7 @@ public struct SourceKitLSPOptions: Sendable, Codable, Equatable { self.workDoneProgressDebounceDuration = workDoneProgressDebounceDuration self.sourcekitdRequestTimeout = sourcekitdRequestTimeout self.semanticServiceRestartTimeout = semanticServiceRestartTimeout + self.preparationBatchSize = preparationBatchSize } public init?(fromLSPAny lspAny: LSPAny?) throws { @@ -535,7 +541,8 @@ public struct SourceKitLSPOptions: Sendable, Codable, Equatable { workDoneProgressDebounceDuration: override?.workDoneProgressDebounceDuration ?? base.workDoneProgressDebounceDuration, sourcekitdRequestTimeout: override?.sourcekitdRequestTimeout ?? base.sourcekitdRequestTimeout, - semanticServiceRestartTimeout: override?.semanticServiceRestartTimeout ?? base.semanticServiceRestartTimeout + semanticServiceRestartTimeout: override?.semanticServiceRestartTimeout ?? base.semanticServiceRestartTimeout, + preparationBatchSize: override?.preparationBatchSize ?? base.preparationBatchSize ) } diff --git a/Sources/SemanticIndex/PreparationTaskDescription.swift b/Sources/SemanticIndex/PreparationTaskDescription.swift index 5e5d932b1..5def2b7d1 100644 --- a/Sources/SemanticIndex/PreparationTaskDescription.swift +++ b/Sources/SemanticIndex/PreparationTaskDescription.swift @@ -48,6 +48,8 @@ package struct PreparationTaskDescription: IndexTaskDescription { /// Hooks that should be called when the preparation task finishes. private let hooks: IndexHooks + private let purpose: TargetPreparationPurpose + /// The task is idempotent because preparing the same target twice produces the same result as preparing it once. package var isIdempotent: Bool { true } @@ -69,13 +71,15 @@ package struct PreparationTaskDescription: IndexTaskDescription { @escaping @Sendable ( _ message: String, _ type: WindowMessageType, _ structure: LanguageServerProtocol.StructuredLogKind ) -> Void, - hooks: IndexHooks + hooks: IndexHooks, + purpose: TargetPreparationPurpose ) { self.targetsToPrepare = targetsToPrepare self.buildServerManager = buildServerManager self.preparationUpToDateTracker = preparationUpToDateTracker self.logMessageToIndexLog = logMessageToIndexLog self.hooks = hooks + self.purpose = purpose } package func execute() async { @@ -121,11 +125,9 @@ package struct PreparationTaskDescription: IndexTaskDescription { to currentlyExecutingTasks: [PreparationTaskDescription] ) -> [TaskDependencyAction] { return currentlyExecutingTasks.compactMap { (other) -> TaskDependencyAction? in - if other.targetsToPrepare.count > self.targetsToPrepare.count { - // If there is an prepare operation with more targets already running, suspend it. - // The most common use case for this is if we prepare all targets simultaneously during the initial preparation - // when a project is opened and need a single target indexed for user interaction. We should suspend the - // workspace-wide preparation and just prepare the currently needed target. + if other.purpose == .forIndexing && self.purpose == .forEditorFunctionality { + // If we're running a background indexing operation but need a target indexed for user interaction, + // we should prioritize the latter. return .cancelAndRescheduleDependency(other) } return .waitAndElevatePriorityOfDependency(other) diff --git a/Sources/SemanticIndex/SemanticIndexManager.swift b/Sources/SemanticIndex/SemanticIndexManager.swift index be73209ee..84c6f8231 100644 --- a/Sources/SemanticIndex/SemanticIndexManager.swift +++ b/Sources/SemanticIndex/SemanticIndexManager.swift @@ -154,7 +154,7 @@ private struct InProgressPrepareForEditorTask { } /// The reason why a target is being prepared. This is used to determine the `IndexProgressStatus`. -private enum TargetPreparationPurpose: Comparable { +package enum TargetPreparationPurpose: Comparable { /// We are preparing the target so we can index files in it. case forIndexing @@ -232,6 +232,9 @@ package final actor SemanticIndexManager { /// The parameter is the number of files that were scheduled to be indexed. private let indexTasksWereScheduled: @Sendable (_ numberOfFileScheduled: Int) -> Void + /// The size of the batches in which the `SemanticIndexManager` should dispatch preparation tasks. + private let preparationBatchSize: Int? + /// Callback that is called when `progressStatus` might have changed. private let indexProgressStatusDidChange: @Sendable () -> Void @@ -271,6 +274,7 @@ package final actor SemanticIndexManager { updateIndexStoreTimeout: Duration, hooks: IndexHooks, indexTaskScheduler: TaskScheduler, + preparationBatchSize: Int?, logMessageToIndexLog: @escaping @Sendable ( _ message: String, _ type: WindowMessageType, _ structure: LanguageServerProtocol.StructuredLogKind @@ -283,6 +287,7 @@ package final actor SemanticIndexManager { self.updateIndexStoreTimeout = updateIndexStoreTimeout self.hooks = hooks self.indexTaskScheduler = indexTaskScheduler + self.preparationBatchSize = preparationBatchSize self.logMessageToIndexLog = logMessageToIndexLog self.indexTasksWereScheduled = indexTasksWereScheduled self.indexProgressStatusDidChange = indexProgressStatusDidChange @@ -672,7 +677,8 @@ package final actor SemanticIndexManager { buildServerManager: self.buildServerManager, preparationUpToDateTracker: preparationUpToDateTracker, logMessageToIndexLog: logMessageToIndexLog, - hooks: hooks + hooks: hooks, + purpose: purpose ) ) if Task.isCancelled { @@ -926,7 +932,11 @@ package final actor SemanticIndexManager { // TODO: When we can index multiple targets concurrently in SwiftPM, increase the batch size to half the // processor count, so we can get parallelism during preparation. // (https://github.com/swiftlang/sourcekit-lsp/issues/1262) - for targetsBatch in sortedTargets.partition(intoBatchesOfSize: 1) { + let defaultBatchSize = 1 + let batchSize = max(preparationBatchSize ?? defaultBatchSize, 1) + let partitionedTargets = sortedTargets.partition(intoBatchesOfSize: batchSize) + + for targetsBatch in partitionedTargets { let preparationTaskID = UUID() let filesToIndex = targetsBatch.flatMap({ filesByTarget[$0]! }) diff --git a/Sources/SourceKitLSP/Workspace.swift b/Sources/SourceKitLSP/Workspace.swift index 2292c12c2..6900bb737 100644 --- a/Sources/SourceKitLSP/Workspace.swift +++ b/Sources/SourceKitLSP/Workspace.swift @@ -157,6 +157,7 @@ package final class Workspace: Sendable, BuildServerManagerDelegate { updateIndexStoreTimeout: options.indexOrDefault.updateIndexStoreTimeoutOrDefault, hooks: hooks.indexHooks, indexTaskScheduler: indexTaskScheduler, + preparationBatchSize: options.preparationBatchSize, logMessageToIndexLog: { [weak sourceKitLSPServer] in sourceKitLSPServer?.logMessageToIndexLog(message: $0, type: $1, structure: $2) }, diff --git a/config.schema.json b/config.schema.json index d22dfee11..e9897debb 100644 --- a/config.schema.json +++ b/config.schema.json @@ -181,6 +181,11 @@ }, "type" : "object" }, + "preparationBatchSize" : { + "description" : "The number of targets to prepare in parallel. If nil, SourceKit-LSP will choose a batch size.", + "markdownDescription" : "The number of targets to prepare in parallel. If nil, SourceKit-LSP will choose a batch size.", + "type" : "integer" + }, "semanticServiceRestartTimeout" : { "description" : "If a request to sourcekitd or clangd exceeds this timeout, we assume that the semantic service provider is hanging for some reason and won't recover. To restore semantic functionality, we terminate and restart it.", "markdownDescription" : "If a request to sourcekitd or clangd exceeds this timeout, we assume that the semantic service provider is hanging for some reason and won't recover. To restore semantic functionality, we terminate and restart it.",