@@ -6,12 +6,13 @@ mod topologic_sort;
6
6
7
7
use std:: time:: Duration ;
8
8
9
- use hir:: db:: DefDatabase ;
9
+ use hir:: { db:: DefDatabase , Symbol } ;
10
+ use itertools:: Itertools ;
10
11
11
12
use crate :: {
12
13
base_db:: {
13
14
ra_salsa:: { Database , ParallelDatabase , Snapshot } ,
14
- Cancelled , CrateId , SourceDatabase , SourceRootDatabase ,
15
+ Cancelled , CrateId , SourceDatabase ,
15
16
} ,
16
17
symbol_index:: SymbolsDatabase ,
17
18
FxIndexMap , RootDatabase ,
@@ -21,11 +22,12 @@ use crate::{
21
22
#[ derive( Debug ) ]
22
23
pub struct ParallelPrimeCachesProgress {
23
24
/// the crates that we are currently priming.
24
- pub crates_currently_indexing : Vec < String > ,
25
+ pub crates_currently_indexing : Vec < Symbol > ,
25
26
/// the total number of crates we want to prime.
26
27
pub crates_total : usize ,
27
28
/// the total number of crates that have finished priming
28
29
pub crates_done : usize ,
30
+ pub work_type : & ' static str ,
29
31
}
30
32
31
33
pub fn parallel_prime_caches (
@@ -47,41 +49,32 @@ pub fn parallel_prime_caches(
47
49
} ;
48
50
49
51
enum ParallelPrimeCacheWorkerProgress {
50
- BeginCrate { crate_id : CrateId , crate_name : String } ,
52
+ BeginCrate { crate_id : CrateId , crate_name : Symbol } ,
51
53
EndCrate { crate_id : CrateId } ,
52
54
}
53
55
56
+ // We split off def map computation from other work,
57
+ // as the def map is the relevant one. Once the defmaps are computed
58
+ // the project is ready to go, the other indices are just nice to have for some IDE features.
59
+ #[ derive( PartialOrd , Ord , PartialEq , Eq , Copy , Clone ) ]
60
+ enum PrimingPhase {
61
+ DefMap ,
62
+ ImportMap ,
63
+ CrateSymbols ,
64
+ }
65
+
54
66
let ( work_sender, progress_receiver) = {
55
67
let ( progress_sender, progress_receiver) = crossbeam_channel:: unbounded ( ) ;
56
68
let ( work_sender, work_receiver) = crossbeam_channel:: unbounded ( ) ;
57
- let graph = graph. clone ( ) ;
58
- let local_roots = db. local_roots ( ) ;
59
69
let prime_caches_worker = move |db : Snapshot < RootDatabase > | {
60
- while let Ok ( ( crate_id, crate_name) ) = work_receiver. recv ( ) {
70
+ while let Ok ( ( crate_id, crate_name, kind ) ) = work_receiver. recv ( ) {
61
71
progress_sender
62
72
. send ( ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } ) ?;
63
73
64
- // Compute the DefMap and possibly ImportMap
65
- let file_id = graph[ crate_id] . root_file_id ;
66
- let root_id = db. file_source_root ( file_id) ;
67
- if db. source_root ( root_id) . is_library {
68
- db. crate_def_map ( crate_id) ;
69
- } else {
70
- // This also computes the DefMap
71
- db. import_map ( crate_id) ;
72
- }
73
-
74
- // Compute the symbol search index.
75
- // This primes the cache for `ide_db::symbol_index::world_symbols()`.
76
- //
77
- // We do this for workspace crates only (members of local_roots), because doing it
78
- // for all dependencies could be *very* unnecessarily slow in a large project.
79
- //
80
- // FIXME: We should do it unconditionally if the configuration is set to default to
81
- // searching dependencies (rust-analyzer.workspace.symbol.search.scope), but we
82
- // would need to pipe that configuration information down here.
83
- if local_roots. contains ( & root_id) {
84
- db. crate_symbols ( crate_id. into ( ) ) ;
74
+ match kind {
75
+ PrimingPhase :: DefMap => _ = db. crate_def_map ( crate_id) ,
76
+ PrimingPhase :: ImportMap => _ = db. import_map ( crate_id) ,
77
+ PrimingPhase :: CrateSymbols => _ = db. crate_symbols ( crate_id. into ( ) ) ,
85
78
}
86
79
87
80
progress_sender. send ( ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } ) ?;
@@ -112,16 +105,34 @@ pub fn parallel_prime_caches(
112
105
let mut crates_currently_indexing =
113
106
FxIndexMap :: with_capacity_and_hasher ( num_worker_threads, Default :: default ( ) ) ;
114
107
108
+ let mut additional_phases = vec ! [ ] ;
109
+
115
110
while crates_done < crates_total {
116
111
db. unwind_if_cancelled ( ) ;
117
112
118
113
for crate_id in & mut crates_to_prime {
119
- work_sender
120
- . send ( (
121
- crate_id,
122
- graph[ crate_id] . display_name . as_deref ( ) . unwrap_or_default ( ) . to_owned ( ) ,
123
- ) )
124
- . ok ( ) ;
114
+ let krate = & graph[ crate_id] ;
115
+ let name = krate
116
+ . display_name
117
+ . as_deref ( )
118
+ . cloned ( )
119
+ . unwrap_or_else ( || Symbol :: integer ( crate_id. into_raw ( ) . into_u32 ( ) as usize ) ) ;
120
+ if krate. origin . is_lang ( ) {
121
+ additional_phases. push ( ( crate_id, name. clone ( ) , PrimingPhase :: ImportMap ) ) ;
122
+ } else if krate. origin . is_local ( ) {
123
+ // Compute the symbol search index.
124
+ // This primes the cache for `ide_db::symbol_index::world_symbols()`.
125
+ //
126
+ // We do this for workspace crates only (members of local_roots), because doing it
127
+ // for all dependencies could be *very* unnecessarily slow in a large project.
128
+ //
129
+ // FIXME: We should do it unconditionally if the configuration is set to default to
130
+ // searching dependencies (rust-analyzer.workspace.symbol.search.scope), but we
131
+ // would need to pipe that configuration information down here.
132
+ additional_phases. push ( ( crate_id, name. clone ( ) , PrimingPhase :: CrateSymbols ) ) ;
133
+ }
134
+
135
+ work_sender. send ( ( crate_id, name, PrimingPhase :: DefMap ) ) . ok ( ) ;
125
136
}
126
137
127
138
// recv_timeout is somewhat a hack, we need a way to from this thread check to see if the current salsa revision
@@ -153,6 +164,50 @@ pub fn parallel_prime_caches(
153
164
crates_currently_indexing : crates_currently_indexing. values ( ) . cloned ( ) . collect ( ) ,
154
165
crates_done,
155
166
crates_total,
167
+ work_type : "Indexing" ,
168
+ } ;
169
+
170
+ cb ( progress) ;
171
+ }
172
+
173
+ let mut crates_done = 0 ;
174
+ let crates_total = additional_phases. len ( ) ;
175
+ for w in additional_phases. into_iter ( ) . sorted_by_key ( |& ( _, _, phase) | phase) {
176
+ work_sender. send ( w) . ok ( ) ;
177
+ }
178
+
179
+ while crates_done < crates_total {
180
+ db. unwind_if_cancelled ( ) ;
181
+
182
+ // recv_timeout is somewhat a hack, we need a way to from this thread check to see if the current salsa revision
183
+ // is cancelled on a regular basis. workers will only exit if they are processing a task that is cancelled, or
184
+ // if this thread exits, and closes the work channel.
185
+ let worker_progress = match progress_receiver. recv_timeout ( Duration :: from_millis ( 10 ) ) {
186
+ Ok ( p) => p,
187
+ Err ( crossbeam_channel:: RecvTimeoutError :: Timeout ) => {
188
+ continue ;
189
+ }
190
+ Err ( crossbeam_channel:: RecvTimeoutError :: Disconnected ) => {
191
+ // our workers may have died from a cancelled task, so we'll check and re-raise here.
192
+ db. unwind_if_cancelled ( ) ;
193
+ break ;
194
+ }
195
+ } ;
196
+ match worker_progress {
197
+ ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } => {
198
+ crates_currently_indexing. insert ( crate_id, crate_name) ;
199
+ }
200
+ ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } => {
201
+ crates_currently_indexing. swap_remove ( & crate_id) ;
202
+ crates_done += 1 ;
203
+ }
204
+ } ;
205
+
206
+ let progress = ParallelPrimeCachesProgress {
207
+ crates_currently_indexing : crates_currently_indexing. values ( ) . cloned ( ) . collect ( ) ,
208
+ crates_done,
209
+ crates_total,
210
+ work_type : "Populating symbols" ,
156
211
} ;
157
212
158
213
cb ( progress) ;
0 commit comments