Skip to content

Commit e8b8ddd

Browse files
committed
remove provisional cache
1 parent de68911 commit e8b8ddd

File tree

6 files changed

+142
-188
lines changed

6 files changed

+142
-188
lines changed

compiler/rustc_trait_selection/src/solve/search_graph/cache.rs

-102
This file was deleted.

compiler/rustc_trait_selection/src/solve/search_graph/mod.rs

+64-84
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,7 @@
1-
mod cache;
2-
3-
use self::cache::ProvisionalEntry;
41
use super::inspect;
52
use super::inspect::ProofTreeBuilder;
63
use super::SolverMode;
7-
use cache::ProvisionalCache;
4+
use rustc_data_structures::fx::FxHashMap;
85
use rustc_data_structures::fx::FxHashSet;
96
use rustc_index::Idx;
107
use rustc_index::IndexVec;
@@ -27,8 +24,14 @@ struct StackEntry<'tcx> {
2724
// The maximum depth reached by this stack entry, only up-to date
2825
// for the top of the stack and lazily updated for the rest.
2926
reached_depth: StackDepth,
27+
// In case of a cycle, the depth of the root.
28+
cycle_root_depth: StackDepth,
29+
3030
encountered_overflow: bool,
3131
has_been_used: bool,
32+
/// Starts out as `None` and gets set when rerunning this
33+
/// goal in case we encounter a cycle.
34+
provisional_result: Option<QueryResult<'tcx>>,
3235

3336
/// We put only the root goal of a coinductive cycle into the global cache.
3437
///
@@ -47,7 +50,7 @@ pub(super) struct SearchGraph<'tcx> {
4750
///
4851
/// An element is *deeper* in the stack if its index is *lower*.
4952
stack: IndexVec<StackDepth, StackEntry<'tcx>>,
50-
provisional_cache: ProvisionalCache<'tcx>,
53+
stack_entries: FxHashMap<CanonicalInput<'tcx>, StackDepth>,
5154
}
5255

5356
impl<'tcx> SearchGraph<'tcx> {
@@ -56,7 +59,7 @@ impl<'tcx> SearchGraph<'tcx> {
5659
mode,
5760
local_overflow_limit: tcx.recursion_limit().0.checked_ilog2().unwrap_or(0) as usize,
5861
stack: Default::default(),
59-
provisional_cache: ProvisionalCache::empty(),
62+
stack_entries: Default::default(),
6063
}
6164
}
6265

@@ -85,6 +88,7 @@ impl<'tcx> SearchGraph<'tcx> {
8588
/// would cause us to not track overflow and recursion depth correctly.
8689
fn pop_stack(&mut self) -> StackEntry<'tcx> {
8790
let elem = self.stack.pop().unwrap();
91+
assert!(self.stack_entries.remove(&elem.input).is_some());
8892
if let Some(last) = self.stack.raw.last_mut() {
8993
last.reached_depth = last.reached_depth.max(elem.reached_depth);
9094
last.encountered_overflow |= elem.encountered_overflow;
@@ -104,22 +108,17 @@ impl<'tcx> SearchGraph<'tcx> {
104108
}
105109

106110
pub(super) fn is_empty(&self) -> bool {
107-
self.stack.is_empty() && self.provisional_cache.is_empty()
111+
self.stack.is_empty()
108112
}
109113

110114
/// Whether we're currently in a cycle. This should only be used
111115
/// for debug assertions.
112116
pub(super) fn in_cycle(&self) -> bool {
113117
if let Some(stack_depth) = self.stack.last_index() {
114-
// Either the current goal on the stack is the root of a cycle...
115-
if self.stack[stack_depth].has_been_used {
116-
return true;
117-
}
118-
119-
// ...or it depends on a goal with a lower depth.
120-
let current_goal = self.stack[stack_depth].input;
121-
let entry_index = self.provisional_cache.lookup_table[&current_goal];
122-
self.provisional_cache.entries[entry_index].depth != stack_depth
118+
// Either the current goal on the stack is the root of a cycle
119+
// or it depends on a goal with a lower depth.
120+
self.stack[stack_depth].has_been_used
121+
|| self.stack[stack_depth].cycle_root_depth != stack_depth
123122
} else {
124123
false
125124
}
@@ -211,24 +210,23 @@ impl<'tcx> SearchGraph<'tcx> {
211210
}
212211
}
213212

214-
// Look at the provisional cache to detect cycles.
215-
let cache = &mut self.provisional_cache;
216-
match cache.lookup_table.entry(input) {
213+
// Check whether we're in a cycle.
214+
match self.stack_entries.entry(input) {
217215
// No entry, we push this goal on the stack and try to prove it.
218216
Entry::Vacant(v) => {
219217
let depth = self.stack.next_index();
220218
let entry = StackEntry {
221219
input,
222220
available_depth,
223221
reached_depth: depth,
222+
cycle_root_depth: depth,
224223
encountered_overflow: false,
225224
has_been_used: false,
225+
provisional_result: None,
226226
cycle_participants: Default::default(),
227227
};
228228
assert_eq!(self.stack.push(entry), depth);
229-
let entry_index =
230-
cache.entries.push(ProvisionalEntry { response: None, depth, input });
231-
v.insert(entry_index);
229+
v.insert(depth);
232230
}
233231
// We have a nested goal which relies on a goal `root` deeper in the stack.
234232
//
@@ -239,41 +237,50 @@ impl<'tcx> SearchGraph<'tcx> {
239237
//
240238
// Finally we can return either the provisional response for that goal if we have a
241239
// coinductive cycle or an ambiguous result if the cycle is inductive.
242-
Entry::Occupied(entry_index) => {
240+
Entry::Occupied(entry) => {
243241
inspect.goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::CacheHit(
244242
CacheHit::Provisional,
245243
));
246244

247-
let entry_index = *entry_index.get();
248-
let stack_depth = cache.depth(entry_index);
245+
let stack_depth = *entry.get();
249246
debug!("encountered cycle with depth {stack_depth:?}");
250-
251-
cache.add_dependency_of_leaf_on(entry_index);
252-
let mut iter = self.stack.iter_mut();
253-
let root = iter.nth(stack_depth.as_usize()).unwrap();
254-
for e in iter {
255-
root.cycle_participants.insert(e.input);
247+
// We start by updating the root depth of all cycle participants, and
248+
// add all cycle participants to the root.
249+
let root_depth = self.stack[stack_depth].cycle_root_depth;
250+
let (prev, participants) = self.stack.raw.split_at_mut(stack_depth.as_usize() + 1);
251+
let root = &mut prev[root_depth.as_usize()];
252+
for entry in participants {
253+
debug_assert!(entry.cycle_root_depth >= root_depth);
254+
entry.cycle_root_depth = root_depth;
255+
root.cycle_participants.insert(entry.input);
256+
// FIXME(@lcnr): I believe that this line is needed as we could
257+
// otherwise access a cache entry for the root of a cycle while
258+
// computing the result for a cycle participant. This can result
259+
// in unstable results due to incompleteness.
260+
//
261+
// However, a test for this would be an even more complex version of
262+
// tests/ui/traits/new-solver/coinduction/incompleteness-unstable-result.rs.
263+
// I did not bother to write such a test and we have no regression test
264+
// for this. It would be good to have such a test :)
265+
#[allow(rustc::potential_query_instability)]
266+
root.cycle_participants.extend(entry.cycle_participants.drain());
256267
}
257268

258-
// If we're in a cycle, we have to retry proving the current goal
259-
// until we reach a fixpoint.
269+
// If we're in a cycle, we have to retry proving the cycle head
270+
// until we reach a fixpoint. It is not enough to simply retry the
271+
// `root` goal of this cycle.
272+
//
273+
// See tests/ui/traits/new-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
274+
// for an example.
260275
self.stack[stack_depth].has_been_used = true;
261-
return if let Some(result) = cache.provisional_result(entry_index) {
276+
return if let Some(result) = self.stack[stack_depth].provisional_result {
262277
result
263278
} else {
264-
// If we don't have a provisional result yet, the goal has to
265-
// still be on the stack.
266-
let mut goal_on_stack = false;
267-
let mut is_coinductive = true;
268-
for entry in self.stack.raw[stack_depth.index()..]
279+
// If we don't have a provisional result yet we're in the first iteration,
280+
// so we start with no constraints.
281+
let is_coinductive = self.stack.raw[stack_depth.index()..]
269282
.iter()
270-
.skip_while(|entry| entry.input != input)
271-
{
272-
goal_on_stack = true;
273-
is_coinductive &= entry.input.value.goal.predicate.is_coinductive(tcx);
274-
}
275-
debug_assert!(goal_on_stack);
276-
283+
.all(|entry| entry.input.value.goal.predicate.is_coinductive(tcx));
277284
if is_coinductive {
278285
Self::response_no_constraints(tcx, input, Certainty::Yes)
279286
} else {
@@ -294,40 +301,25 @@ impl<'tcx> SearchGraph<'tcx> {
294301
// of the previous iteration is equal to the final result, at which
295302
// point we are done.
296303
for _ in 0..self.local_overflow_limit() {
297-
let response = prove_goal(self, inspect);
304+
let result = prove_goal(self, inspect);
298305

299306
// Check whether the current goal is the root of a cycle and whether
300307
// we have to rerun because its provisional result differed from the
301308
// final result.
302-
//
303-
// Also update the response for this goal stored in the provisional
304-
// cache.
305309
let stack_entry = self.pop_stack();
306310
debug_assert_eq!(stack_entry.input, input);
307-
let cache = &mut self.provisional_cache;
308-
let provisional_entry_index =
309-
*cache.lookup_table.get(&stack_entry.input).unwrap();
310-
let provisional_entry = &mut cache.entries[provisional_entry_index];
311311
if stack_entry.has_been_used
312-
&& provisional_entry.response.map_or(true, |r| r != response)
312+
&& stack_entry.provisional_result.map_or(true, |r| r != result)
313313
{
314-
// If so, update the provisional result for this goal and remove
315-
// all entries whose result depends on this goal from the provisional
316-
// cache...
317-
//
318-
// That's not completely correct, as a nested goal can also only
319-
// depend on a goal which is lower in the stack so it doesn't
320-
// actually depend on the current goal. This should be fairly
321-
// rare and is hopefully not relevant for performance.
322-
provisional_entry.response = Some(response);
323-
#[allow(rustc::potential_query_instability)]
324-
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
325-
cache.entries.truncate(provisional_entry_index.index() + 1);
326-
327-
// ...and finally push our goal back on the stack and reevaluate it.
328-
self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
314+
// If so, update its provisional result and reevaluate it.
315+
let depth = self.stack.push(StackEntry {
316+
has_been_used: false,
317+
provisional_result: Some(result),
318+
..stack_entry
319+
});
320+
assert_eq!(self.stack_entries.insert(input, depth), None);
329321
} else {
330-
return (stack_entry, response);
322+
return (stack_entry, result);
331323
}
332324
}
333325

@@ -343,17 +335,7 @@ impl<'tcx> SearchGraph<'tcx> {
343335
//
344336
// It is not possible for any nested goal to depend on something deeper on the
345337
// stack, as this would have also updated the depth of the current goal.
346-
let cache = &mut self.provisional_cache;
347-
let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
348-
let provisional_entry = &mut cache.entries[provisional_entry_index];
349-
let depth = provisional_entry.depth;
350-
if depth == self.stack.next_index() {
351-
for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
352-
let actual_index = cache.lookup_table.remove(&entry.input);
353-
debug_assert_eq!(Some(i), actual_index);
354-
debug_assert!(entry.depth == depth);
355-
}
356-
338+
if final_entry.cycle_root_depth == self.stack.next_index() {
357339
// When encountering a cycle, both inductive and coinductive, we only
358340
// move the root into the global cache. We also store all other cycle
359341
// participants involved.
@@ -371,8 +353,6 @@ impl<'tcx> SearchGraph<'tcx> {
371353
dep_node,
372354
result,
373355
)
374-
} else {
375-
provisional_entry.response = Some(result);
376356
}
377357

378358
result

0 commit comments

Comments
 (0)