Skip to content

Commit 05faeb5

Browse files
bors[bot]Jonas Schievink
andauthored
Merge #6153
6153: Improve prime_caches and display its progress r=matklad a=jonas-schievink It now computes the `CrateDefMap` of all crates, which is generally a reasonable approximation for "IDE features ready". There is still some delay after this finishes, I suspect mostly due to impl collection, which takes a while, but this should be an improvement already. For more accurate progress reports, this topologically sorts all crates before starting this operation. ~~Because that is also the ordering in which parallelization makes sense (which was previously attempted in #3529), I decided to throw that into the mix as well. It still doesn't provide *that* much of a performance boost, but it does scale beyond the current single-core architecture, and adding it was very easy.~~ ~~Unfortunately, as written, this will not tell the user which crate is actually causing slowdowns, since the displayed crate is the last one that was *started*, not the one we are currently *blocked* on, but that seems fairly difficult to implement unless I'm missing something.~~ (I have removed rayon for now since it does not work correctly with cancellation.) Co-authored-by: Jonas Schievink <[email protected]>
2 parents fac59f4 + cde7392 commit 05faeb5

File tree

5 files changed

+119
-13
lines changed

5 files changed

+119
-13
lines changed

crates/base_db/src/input.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,34 @@ impl CrateGraph {
221221
deps.into_iter()
222222
}
223223

224+
/// Returns all crates in the graph, sorted in topological order (ie. dependencies of a crate
225+
/// come before the crate itself).
226+
pub fn crates_in_topological_order(&self) -> Vec<CrateId> {
227+
let mut res = Vec::new();
228+
let mut visited = FxHashSet::default();
229+
230+
for krate in self.arena.keys().copied() {
231+
go(self, &mut visited, &mut res, krate);
232+
}
233+
234+
return res;
235+
236+
fn go(
237+
graph: &CrateGraph,
238+
visited: &mut FxHashSet<CrateId>,
239+
res: &mut Vec<CrateId>,
240+
source: CrateId,
241+
) {
242+
if !visited.insert(source) {
243+
return;
244+
}
245+
for dep in graph[source].dependencies.iter() {
246+
go(graph, visited, res, dep.crate_id)
247+
}
248+
res.push(source)
249+
}
250+
}
251+
224252
// FIXME: this only finds one crate with the given root; we could have multiple
225253
pub fn crate_id_for_crate_root(&self, file_id: FileId) -> Option<CrateId> {
226254
let (&crate_id, _) =

crates/ide/src/lib.rs

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ pub use crate::{
7777
hover::{HoverAction, HoverConfig, HoverGotoTypeData, HoverResult},
7878
inlay_hints::{InlayHint, InlayHintsConfig, InlayKind},
7979
markup::Markup,
80+
prime_caches::PrimeCachesProgress,
8081
references::{
8182
Declaration, Reference, ReferenceAccess, ReferenceKind, ReferenceSearchResult, RenameError,
8283
},
@@ -223,8 +224,11 @@ impl Analysis {
223224
self.with_db(|db| status::status(&*db, file_id))
224225
}
225226

226-
pub fn prime_caches(&self, files: Vec<FileId>) -> Cancelable<()> {
227-
self.with_db(|db| prime_caches::prime_caches(db, files))
227+
pub fn prime_caches<F>(&self, cb: F) -> Cancelable<()>
228+
where
229+
F: Fn(PrimeCachesProgress) + Sync + std::panic::UnwindSafe,
230+
{
231+
self.with_db(move |db| prime_caches::prime_caches(db, &cb))
228232
}
229233

230234
/// Gets the text of the source file.

crates/ide/src/prime_caches.rs

Lines changed: 39 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,45 @@
33
//! request takes longer to compute. This modules implemented prepopulating of
44
//! various caches, it's not really advanced at the moment.
55
6-
use crate::{FileId, RootDatabase};
6+
use base_db::SourceDatabase;
7+
use hir::db::DefDatabase;
78

8-
pub(crate) fn prime_caches(db: &RootDatabase, files: Vec<FileId>) {
9-
for file in files {
10-
let _ = crate::syntax_highlighting::highlight(db, file, None, false);
9+
use crate::RootDatabase;
10+
11+
#[derive(Debug)]
12+
pub enum PrimeCachesProgress {
13+
Started,
14+
/// We started indexing a crate.
15+
StartedOnCrate {
16+
on_crate: String,
17+
n_done: usize,
18+
n_total: usize,
19+
},
20+
/// We finished indexing all crates.
21+
Finished,
22+
}
23+
24+
pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) {
25+
let _p = profile::span("prime_caches");
26+
let graph = db.crate_graph();
27+
let topo = &graph.crates_in_topological_order();
28+
29+
cb(PrimeCachesProgress::Started);
30+
31+
// FIXME: This would be easy to parallelize, since it's in the ideal ordering for that.
32+
// Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks
33+
// cancellation, so we cannot use rayon.
34+
for (i, krate) in topo.iter().enumerate() {
35+
let crate_name =
36+
graph[*krate].declaration_name.as_ref().map(ToString::to_string).unwrap_or_default();
37+
38+
cb(PrimeCachesProgress::StartedOnCrate {
39+
on_crate: crate_name,
40+
n_done: i,
41+
n_total: topo.len(),
42+
});
43+
db.crate_def_map(*krate);
1144
}
45+
46+
cb(PrimeCachesProgress::Finished);
1247
}

crates/rust-analyzer/src/main_loop.rs

Lines changed: 35 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use std::{
77

88
use base_db::VfsPath;
99
use crossbeam_channel::{select, Receiver};
10+
use ide::PrimeCachesProgress;
1011
use ide::{Canceled, FileId};
1112
use lsp_server::{Connection, Notification, Request, Response};
1213
use lsp_types::notification::Notification as _;
@@ -61,7 +62,7 @@ pub(crate) enum Task {
6162
Response(Response),
6263
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
6364
Workspaces(Vec<anyhow::Result<ProjectWorkspace>>),
64-
Unit,
65+
PrimeCaches(PrimeCachesProgress),
6566
}
6667

6768
impl fmt::Debug for Event {
@@ -197,7 +198,28 @@ impl GlobalState {
197198
}
198199
}
199200
Task::Workspaces(workspaces) => self.switch_workspaces(workspaces),
200-
Task::Unit => (),
201+
Task::PrimeCaches(progress) => {
202+
let (state, message, fraction);
203+
match progress {
204+
PrimeCachesProgress::Started => {
205+
state = Progress::Begin;
206+
message = None;
207+
fraction = 0.0;
208+
}
209+
PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
210+
state = Progress::Report;
211+
message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
212+
fraction = Progress::fraction(n_done, n_total);
213+
}
214+
PrimeCachesProgress::Finished => {
215+
state = Progress::End;
216+
message = None;
217+
fraction = 1.0;
218+
}
219+
};
220+
221+
self.report_progress("indexing", state, message, Some(fraction));
222+
}
201223
},
202224
Event::Vfs(mut task) => {
203225
let _p = profile::span("GlobalState::handle_event/vfs");
@@ -573,12 +595,18 @@ impl GlobalState {
573595
Task::Diagnostics(diagnostics)
574596
})
575597
}
576-
self.task_pool.handle.spawn({
577-
let subs = subscriptions;
598+
self.task_pool.handle.spawn_with_sender({
578599
let snap = self.snapshot();
579-
move || {
580-
snap.analysis.prime_caches(subs).unwrap_or_else(|_: Canceled| ());
581-
Task::Unit
600+
move |sender| {
601+
snap.analysis
602+
.prime_caches(|progress| {
603+
sender.send(Task::PrimeCaches(progress)).unwrap();
604+
})
605+
.unwrap_or_else(|_: Canceled| {
606+
// Pretend that we're done, so that the progress bar is removed. Otherwise
607+
// the editor may complain about it already existing.
608+
sender.send(Task::PrimeCaches(PrimeCachesProgress::Finished)).unwrap()
609+
});
582610
}
583611
});
584612
}

crates/rust-analyzer/src/thread_pool.rs

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,17 @@ impl<T> TaskPool<T> {
2323
})
2424
}
2525

26+
pub(crate) fn spawn_with_sender<F>(&mut self, task: F)
27+
where
28+
F: FnOnce(Sender<T>) + Send + 'static,
29+
T: Send + 'static,
30+
{
31+
self.inner.execute({
32+
let sender = self.sender.clone();
33+
move || task(sender)
34+
})
35+
}
36+
2637
pub(crate) fn len(&self) -> usize {
2738
self.inner.queued_count()
2839
}

0 commit comments

Comments
 (0)