@@ -26,6 +26,7 @@ import (
2626 "os"
2727 "slices"
2828 "strings"
29+ "time"
2930
3031 "github.com/bufbuild/buf/private/buf/bufworkspace"
3132 "github.com/bufbuild/buf/private/bufpkg/bufanalysis"
@@ -43,7 +44,10 @@ import (
4344 "go.lsp.dev/protocol"
4445)
4546
46- const descriptorPath = "google/protobuf/descriptor.proto"
47+ const (
48+ descriptorPath = "google/protobuf/descriptor.proto"
49+ refreshCheckStagger = 5 * time .Millisecond
50+ )
4751
4852// file is a file that has been opened by the client.
4953//
@@ -285,9 +289,28 @@ func (f *file) Refresh(ctx context.Context) {
285289 f .FindModule (ctx )
286290
287291 progress .Report (ctx , "Running Checks" , 4.0 / 6 )
288- f .BuildImages (ctx )
289- f .RunLints (ctx )
290- f .RunBreaking (ctx )
292+ // Since checks are a more expensive operation, we do not want to run a check on every
293+ // Refresh call. Instead, we can stagger the checks and only run them periodically by
294+ // spinning them off into a go routine. Then we attempt to lock using the top-level LSP
295+ // lock. It is safe to use because if another LSP call is made, we allow checks to finish
296+ // before resolving a subsequent LSP request.
297+ go func () {
298+ // We stagger the check operation by 5ms and run it for the latest Refresh state.
299+ time .Sleep (refreshCheckStagger )
300+ // Call TryLock, if unnsuccessful, then another thread holds the lock, so we provide a
301+ // debug log and move on.
302+ if ! f .lsp .lock .TryLock () {
303+ f .lsp .logger .Debug (
304+ fmt .Sprintf ("another thread holds the LSP lock, no new checks started for %v" , f .uri ),
305+ )
306+ return
307+ }
308+ // We have successfully obtained the lock, we can now run the checks.
309+ defer f .lsp .lock .Unlock ()
310+ f .BuildImages (ctx )
311+ f .RunLints (ctx )
312+ f .RunBreaking (ctx )
313+ }()
291314
292315 progress .Report (ctx , "Indexing Symbols" , 5.0 / 6 )
293316 f .IndexSymbols (ctx )
0 commit comments