@@ -295,6 +295,15 @@ impl GlobalState {
295
295
LspNotification :: DidCloseTextDocument ( params) => {
296
296
state_handlers:: did_close ( params, & mut self . lsp_state , & mut self . world ) ?;
297
297
} ,
298
+ LspNotification :: DidCreateFiles ( params) => {
299
+ state_handlers:: did_create_files ( params, & self . world ) ?;
300
+ } ,
301
+ LspNotification :: DidDeleteFiles ( params) => {
302
+ state_handlers:: did_delete_files ( params, & mut self . world ) ?;
303
+ } ,
304
+ LspNotification :: DidRenameFiles ( params) => {
305
+ state_handlers:: did_rename_files ( params, & mut self . world ) ?;
306
+ } ,
298
307
}
299
308
} ,
300
309
@@ -686,8 +695,10 @@ pub(crate) enum IndexerQueueTask {
686
695
687
696
#[ derive( Debug ) ]
688
697
pub enum IndexerTask {
689
- Start { folders : Vec < String > } ,
690
- Update { document : Document , uri : Url } ,
698
+ Create { uri : Url } ,
699
+ Delete { uri : Url } ,
700
+ Rename { uri : Url , new : Url } ,
701
+ Update { uri : Url , document : Document } ,
691
702
}
692
703
693
704
#[ derive( Debug ) ]
@@ -703,6 +714,27 @@ struct RefreshDiagnosticsResult {
703
714
version : Option < i32 > ,
704
715
}
705
716
717
+ fn summarize_indexer_task ( batch : & [ IndexerTask ] ) -> String {
718
+ let mut counts = std:: collections:: HashMap :: new ( ) ;
719
+ for task in batch {
720
+ let type_name = match task {
721
+ IndexerTask :: Create { .. } => "Create" ,
722
+ IndexerTask :: Delete { .. } => "Delete" ,
723
+ IndexerTask :: Rename { .. } => "Rename" ,
724
+ IndexerTask :: Update { .. } => "Update" ,
725
+ } ;
726
+ * counts. entry ( type_name) . or_insert ( 0 ) += 1 ;
727
+ }
728
+
729
+ let mut summary = String :: new ( ) ;
730
+ for ( task_type, count) in counts. iter ( ) {
731
+ use std:: fmt:: Write ;
732
+ let _ = write ! ( summary, "{task_type}: {count} " ) ;
733
+ }
734
+
735
+ summary. trim_end ( ) . to_string ( )
736
+ }
737
+
706
738
static INDEXER_QUEUE : LazyLock < tokio:: sync:: mpsc:: UnboundedSender < IndexerQueueTask > > =
707
739
LazyLock :: new ( || {
708
740
let ( tx, rx) = tokio:: sync:: mpsc:: unbounded_channel ( ) ;
@@ -770,44 +802,60 @@ async fn process_indexer_queue(mut rx: mpsc::UnboundedReceiver<IndexerQueueTask>
770
802
}
771
803
772
804
async fn process_indexer_batch ( batch : Vec < IndexerTask > ) {
773
- // Deduplicate tasks by key. We use a `HashMap` so only the last insertion
774
- // is retained. `Update` tasks use URI as key, `Start` tasks use None (we
775
- // only expect one though). This is effectively a way of cancelling `Update`
776
- // tasks for outdated documents.
777
- let batch: std:: collections:: HashMap < _ , _ > = batch
778
- . into_iter ( )
779
- . map ( |task| match & task {
780
- IndexerTask :: Update { uri, .. } => ( Some ( uri. clone ( ) ) , task) ,
781
- IndexerTask :: Start { .. } => ( None , task) ,
782
- } )
783
- . collect ( ) ;
805
+ tracing:: trace!(
806
+ "Processing {n} indexer tasks ({summary})" ,
807
+ n = batch. len( ) ,
808
+ summary = summarize_indexer_task( & batch)
809
+ ) ;
810
+
811
+ let to_path_buf = |uri : & url:: Url | {
812
+ uri. to_file_path ( )
813
+ . map_err ( |_| anyhow ! ( "Failed to convert URI '{uri}' to file path" ) )
814
+ } ;
784
815
785
- let mut handles = Vec :: new ( ) ;
816
+ for task in batch {
817
+ let result: anyhow:: Result < ( ) > = ( || async {
818
+ match & task {
819
+ IndexerTask :: Create { uri } => {
820
+ let path = to_path_buf ( uri) ?;
821
+ indexer:: create ( & path) ?;
822
+ } ,
786
823
787
- for ( _, task) in batch {
788
- handles. push ( tokio:: task:: spawn_blocking ( move || match task {
789
- IndexerTask :: Start { folders } => {
790
- indexer:: start ( folders) ;
791
- } ,
792
- IndexerTask :: Update { document, uri } => {
793
- let result = if let Ok ( path) = uri. to_file_path ( ) {
794
- indexer:: update ( & document, & path)
795
- } else {
796
- Err ( anyhow ! ( "Failed to convert URI to file path: {uri}" ) )
797
- } ;
798
- if let Err ( err) = result {
799
- log:: error!( "Indexer update failed: {err}" ) ;
800
- }
801
- } ,
802
- } ) ) ;
803
- }
824
+ IndexerTask :: Update { uri, document } => {
825
+ let path = to_path_buf ( uri) ?;
826
+ indexer:: update ( & document, & path) ?;
827
+ } ,
828
+
829
+ IndexerTask :: Delete { uri } => {
830
+ let path = to_path_buf ( uri) ?;
831
+ indexer:: delete ( & path) ?;
832
+ } ,
833
+
834
+ IndexerTask :: Rename {
835
+ uri : old_uri,
836
+ new : new_uri,
837
+ } => {
838
+ let old_path = to_path_buf ( old_uri) ?;
839
+ let new_path = to_path_buf ( new_uri) ?;
840
+
841
+ indexer:: rename ( & old_path, & new_path) ?;
842
+ } ,
843
+ }
804
844
805
- for handle in handles {
806
- let _ = handle. await ;
845
+ Ok ( ( ) )
846
+ } ) ( )
847
+ . await ;
848
+
849
+ if let Err ( err) = result {
850
+ tracing:: warn!( "Can't process indexer task: {err}" ) ;
851
+ continue ;
852
+ }
807
853
}
808
854
}
809
855
810
856
async fn process_diagnostics_batch ( batch : Vec < RefreshDiagnosticsTask > ) {
857
+ tracing:: trace!( "Processing {n} diagnostic tasks" , n = batch. len( ) ) ;
858
+
811
859
// Deduplicate tasks by keeping only the last one for each URI. We use a
812
860
// `HashMap` so only the last insertion is retained. This is effectively a
813
861
// way of cancelling diagnostics tasks for outdated documents.
@@ -850,27 +898,110 @@ async fn process_diagnostics_batch(batch: Vec<RefreshDiagnosticsTask>) {
850
898
}
851
899
852
900
pub ( crate ) fn index_start ( folders : Vec < String > , state : WorldState ) {
853
- INDEXER_QUEUE
854
- . send ( IndexerQueueTask :: Indexer ( IndexerTask :: Start { folders } ) )
855
- . unwrap_or_else ( |err| lsp:: log_error!( "Failed to queue initial indexing: {err}" ) ) ;
901
+ lsp:: log_info!( "Initial indexing started" ) ;
902
+
903
+ let uris: Vec < Url > = folders
904
+ . into_iter ( )
905
+ . flat_map ( |folder| {
906
+ walkdir:: WalkDir :: new ( folder)
907
+ . into_iter ( )
908
+ . filter_entry ( |e| indexer:: filter_entry ( e) )
909
+ . filter_map ( |entry| {
910
+ let entry = match entry {
911
+ Ok ( e) => e,
912
+ Err ( _) => return None ,
913
+ } ;
914
+
915
+ if !entry. file_type ( ) . is_file ( ) {
916
+ return None ;
917
+ }
918
+ let path = entry. path ( ) ;
856
919
920
+ // Only index R files
921
+ let ext = path. extension ( ) . unwrap_or_default ( ) ;
922
+ if ext != "r" && ext != "R" {
923
+ return None ;
924
+ }
925
+
926
+ if let Ok ( uri) = url:: Url :: from_file_path ( path) {
927
+ Some ( uri)
928
+ } else {
929
+ tracing:: warn!( "Can't convert path to URI: {:?}" , path) ;
930
+ None
931
+ }
932
+ } )
933
+ } )
934
+ . collect ( ) ;
935
+
936
+ index_create ( uris, state) ;
937
+ }
938
+
939
+ pub ( crate ) fn index_create ( uris : Vec < Url > , state : WorldState ) {
940
+ for uri in uris {
941
+ INDEXER_QUEUE
942
+ . send ( IndexerQueueTask :: Indexer ( IndexerTask :: Create { uri } ) )
943
+ . unwrap_or_else ( |err| crate :: lsp:: log_error!( "Failed to queue index create: {err}" ) ) ;
944
+ }
945
+
946
+ diagnostics_refresh_all ( state) ;
947
+ }
948
+
949
+ pub ( crate ) fn index_update ( uris : Vec < Url > , state : WorldState ) {
950
+ for uri in uris {
951
+ let document = match state. get_document ( & uri) {
952
+ Ok ( doc) => doc. clone ( ) ,
953
+ Err ( err) => {
954
+ tracing:: warn!( "Can't get document '{uri}' for indexing: {err:?}" ) ;
955
+ continue ;
956
+ } ,
957
+ } ;
958
+
959
+ INDEXER_QUEUE
960
+ . send ( IndexerQueueTask :: Indexer ( IndexerTask :: Update {
961
+ document,
962
+ uri,
963
+ } ) )
964
+ . unwrap_or_else ( |err| lsp:: log_error!( "Failed to queue index update: {err}" ) ) ;
965
+ }
966
+
967
+ // Refresh all diagnostics since the indexer results for one file may affect
968
+ // other files
969
+ diagnostics_refresh_all ( state) ;
970
+ }
971
+
972
+ pub ( crate ) fn index_delete ( uris : Vec < Url > , state : WorldState ) {
973
+ for uri in uris {
974
+ INDEXER_QUEUE
975
+ . send ( IndexerQueueTask :: Indexer ( IndexerTask :: Delete { uri } ) )
976
+ . unwrap_or_else ( |err| lsp:: log_error!( "Failed to queue index update: {err}" ) ) ;
977
+ }
978
+
979
+ // Refresh all diagnostics since the indexer results for one file may affect
980
+ // other files
857
981
diagnostics_refresh_all ( state) ;
858
982
}
859
983
860
- pub ( crate ) fn index_update ( uri : Url , document : Document , state : WorldState ) {
861
- INDEXER_QUEUE
862
- . send ( IndexerQueueTask :: Indexer ( IndexerTask :: Update {
863
- document,
864
- uri : uri. clone ( ) ,
865
- } ) )
866
- . unwrap_or_else ( |err| lsp:: log_error!( "Failed to queue index update: {err}" ) ) ;
984
+ pub ( crate ) fn index_rename ( uris : Vec < ( Url , Url ) > , state : WorldState ) {
985
+ for ( old, new) in uris {
986
+ INDEXER_QUEUE
987
+ . send ( IndexerQueueTask :: Indexer ( IndexerTask :: Rename {
988
+ uri : old,
989
+ new,
990
+ } ) )
991
+ . unwrap_or_else ( |err| lsp:: log_error!( "Failed to queue index update: {err}" ) ) ;
992
+ }
867
993
868
994
// Refresh all diagnostics since the indexer results for one file may affect
869
995
// other files
870
996
diagnostics_refresh_all ( state) ;
871
997
}
872
998
873
999
pub ( crate ) fn diagnostics_refresh_all ( state : WorldState ) {
1000
+ tracing:: trace!(
1001
+ "Refreshing diagnostics for {n} documents" ,
1002
+ n = state. documents. len( )
1003
+ ) ;
1004
+
874
1005
for ( uri, _document) in state. documents . iter ( ) {
875
1006
INDEXER_QUEUE
876
1007
. send ( IndexerQueueTask :: Diagnostics ( RefreshDiagnosticsTask {
0 commit comments