Skip to content

Commit f4f80a7

Browse files
committed
clearup stack handling, be completely lazy
1 parent 578e15d commit f4f80a7

File tree

2 files changed

+90
-71
lines changed

2 files changed

+90
-71
lines changed

compiler/rustc_type_ir/src/search_graph/mod.rs

Lines changed: 79 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,11 @@
1212
//! The global cache has to be completely unobservable, while the per-cycle cache may impact
1313
//! behavior as long as the resulting behavior is still correct.
1414
use std::cmp::Ordering;
15-
use std::collections::BTreeMap;
1615
use std::collections::hash_map::Entry;
16+
use std::collections::{BTreeMap, btree_map};
1717
use std::fmt::Debug;
1818
use std::hash::Hash;
19+
use std::iter;
1920
use std::marker::PhantomData;
2021

2122
use derive_where::derive_where;
@@ -230,13 +231,19 @@ impl AvailableDepth {
230231
}
231232
}
232233

234+
#[derive(Clone, Copy, Debug)]
235+
struct CycleHead {
236+
paths_to_head: PathsToNested,
237+
usage_kind: UsageKind,
238+
}
239+
233240
/// All cycle heads a given goal depends on, ordered by their stack depth.
234241
///
235242
/// We also track all paths from this goal to that head. This is necessary
236243
/// when rebasing provisional cache results.
237244
#[derive(Clone, Debug, Default)]
238245
struct CycleHeads {
239-
heads: BTreeMap<StackDepth, PathsToNested>,
246+
heads: BTreeMap<StackDepth, CycleHead>,
240247
}
241248

242249
impl CycleHeads {
@@ -256,32 +263,32 @@ impl CycleHeads {
256263
self.heads.first_key_value().map(|(k, _)| *k)
257264
}
258265

259-
fn remove_highest_cycle_head(&mut self) -> PathsToNested {
266+
fn remove_highest_cycle_head(&mut self) -> CycleHead {
260267
let last = self.heads.pop_last();
261268
last.unwrap().1
262269
}
263270

264-
fn insert(&mut self, head: StackDepth, path_from_entry: impl Into<PathsToNested> + Copy) {
265-
*self.heads.entry(head).or_insert(path_from_entry.into()) |= path_from_entry.into();
271+
fn insert(
272+
&mut self,
273+
head_index: StackDepth,
274+
path_from_entry: impl Into<PathsToNested> + Copy,
275+
usage_kind: UsageKind,
276+
) {
277+
match self.heads.entry(head_index) {
278+
btree_map::Entry::Vacant(entry) => {
279+
entry.insert(CycleHead { paths_to_head: path_from_entry.into(), usage_kind });
280+
}
281+
btree_map::Entry::Occupied(entry) => {
282+
let head = entry.into_mut();
283+
entry.paths_to_head |= path_from_entry.into();
284+
entry.usage_kind = entry.usage_kind.merge(usage_kind);
285+
}
286+
}
266287
}
267288

268-
fn iter(&self) -> impl Iterator<Item = (StackDepth, PathsToNested)> + '_ {
289+
fn iter(&self) -> impl Iterator<Item = (StackDepth, CycleHead)> + '_ {
269290
self.heads.iter().map(|(k, v)| (*k, *v))
270291
}
271-
272-
/// Update the cycle heads of a goal at depth `this` given the cycle heads
273-
/// of a nested goal. This merges the heads after filtering the parent goal
274-
/// itself.
275-
fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &CycleHeads) {
276-
for (&head, &path_from_entry) in child.heads.iter() {
277-
match head.cmp(&this) {
278-
Ordering::Less => {}
279-
Ordering::Equal => continue,
280-
Ordering::Greater => unreachable!(),
281-
}
282-
self.insert(head, path_from_entry.extend_with(step_kind));
283-
}
284-
}
285292
}
286293

287294
bitflags::bitflags! {
@@ -487,9 +494,6 @@ impl<X: Cx> EvaluationResult<X> {
487494

488495
pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
489496
root_depth: AvailableDepth,
490-
/// The stack of goals currently being computed.
491-
///
492-
/// An element is *deeper* in the stack if its index is *lower*.
493497
stack: Stack<X>,
494498
/// The provisional cache contains entries for already computed goals which
495499
/// still depend on goals higher-up in the stack. We don't move them to the
@@ -511,7 +515,7 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
511515
/// cache entry.
512516
enum UpdateParentGoalCtxt<'a, X: Cx> {
513517
Ordinary(&'a NestedGoals<X>),
514-
ProvisionalCacheHit,
518+
ProvisionalCacheHitOrCycleOnStack,
515519
}
516520

517521
impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
@@ -532,22 +536,36 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
532536
stack: &mut Stack<X>,
533537
step_kind_from_parent: PathKind,
534538
required_depth_for_nested: usize,
535-
heads: &CycleHeads,
539+
heads: impl Iterator<Item = (StackDepth, CycleHead)>,
536540
encountered_overflow: bool,
537541
context: UpdateParentGoalCtxt<'_, X>,
538542
) {
539-
if let Some(parent_index) = stack.last_index() {
540-
let parent = &mut stack[parent_index];
543+
if let Some((parent_index, parent)) = stack.last_mut_with_index() {
541544
parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1);
542545
parent.encountered_overflow |= encountered_overflow;
543546

544-
parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads);
547+
for (head_index, head) in heads {
548+
match head_index.cmp(&parent_index) {
549+
Ordering::Less => parent.heads.insert(
550+
head_index,
551+
head.paths_to_head.extend_with(step_kind_from_parent),
552+
head.usage_kind,
553+
),
554+
Ordering::Equal => {
555+
let usage_kind = parent
556+
.has_been_used
557+
.map_or(head.usage_kind, |prev| prev.merge(head.usage_kind));
558+
parent.has_been_used = Some(usage_kind);
559+
}
560+
Ordering::Greater => unreachable!(),
561+
}
562+
}
545563
let parent_depends_on_cycle = match context {
546564
UpdateParentGoalCtxt::Ordinary(nested_goals) => {
547565
parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
548566
!nested_goals.is_empty()
549567
}
550-
UpdateParentGoalCtxt::ProvisionalCacheHit => true,
568+
UpdateParentGoalCtxt::ProvisionalCacheHitOrCycleOnStack => true,
551569
};
552570
// Once we've got goals which encountered overflow or a cycle,
553571
// we track all goals whose behavior may depend depend on these
@@ -674,7 +692,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
674692
&mut self.stack,
675693
step_kind_from_parent,
676694
evaluation_result.required_depth,
677-
&evaluation_result.heads,
695+
evaluation_result.heads.iter(),
678696
evaluation_result.encountered_overflow,
679697
UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals),
680698
);
@@ -772,7 +790,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
772790
stack_entry: &StackEntry<X>,
773791
mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
774792
) {
775-
let popped_head = self.stack.next_index();
793+
let popped_head_index = self.stack.next_index();
776794
#[allow(rustc::potential_query_instability)]
777795
self.provisional_cache.retain(|&input, entries| {
778796
entries.retain_mut(|entry| {
@@ -782,7 +800,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
782800
path_from_head,
783801
result,
784802
} = entry;
785-
let ep = if heads.highest_cycle_head() == popped_head {
803+
let popped_head = if heads.highest_cycle_head() == popped_head_index {
786804
heads.remove_highest_cycle_head()
787805
} else {
788806
return true;
@@ -796,9 +814,14 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
796814
// After rebasing the cycles `hph` will go through `e`. We need
797815
// to make sure that forall possible paths `hep` and `heph`
798816
// is equal to `hph.`
799-
for (h, ph) in stack_entry.heads.iter() {
800-
let hp =
801-
Self::cycle_path_kind(&self.stack, stack_entry.step_kind_from_parent, h);
817+
let ep = popped_head.paths_to_head;
818+
for (head_index, head) in stack_entry.heads.iter() {
819+
let ph = head.paths_to_head;
820+
let hp = Self::cycle_path_kind(
821+
&self.stack,
822+
stack_entry.step_kind_from_parent,
823+
head_index,
824+
);
802825

803826
// We first validate that all cycles while computing `p` would stay
804827
// the same if we were to recompute it as a nested goal of `e`.
@@ -818,7 +841,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
818841
// the heads of `e` to make sure that rebasing `e` again also considers
819842
// them.
820843
let eph = ep.extend_with_paths(ph);
821-
heads.insert(h, eph);
844+
heads.insert(head_index, eph, head.usage_kind);
822845
}
823846

824847
let Some(head) = heads.opt_highest_cycle_head() else {
@@ -878,11 +901,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
878901
&mut self.stack,
879902
step_kind_from_parent,
880903
0,
881-
heads,
904+
heads.iter(),
882905
encountered_overflow,
883-
UpdateParentGoalCtxt::ProvisionalCacheHit,
906+
UpdateParentGoalCtxt::ProvisionalCacheHitOrCycleOnStack,
884907
);
885-
debug_assert!(self.stack[head].has_been_used.is_some());
886908
debug!(?head, ?path_from_head, "provisional cache hit");
887909
return Some(result);
888910
}
@@ -994,12 +1016,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
9941016

9951017
// We don't move cycle participants to the global cache, so the
9961018
// cycle heads are always empty.
997-
let heads = Default::default();
1019+
let heads = iter::empty();
9981020
Self::update_parent_goal(
9991021
&mut self.stack,
10001022
step_kind_from_parent,
10011023
required_depth,
1002-
&heads,
1024+
heads,
10031025
encountered_overflow,
10041026
UpdateParentGoalCtxt::Ordinary(nested_goals),
10051027
);
@@ -1015,34 +1037,31 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10151037
input: X::Input,
10161038
step_kind_from_parent: PathKind,
10171039
) -> Option<X::Result> {
1018-
let head = self.stack.find(input)?;
1040+
let head_index = self.stack.find(input)?;
10191041
// We have a nested goal which directly relies on a goal deeper in the stack.
10201042
//
10211043
// We start by tagging all cycle participants, as that's necessary for caching.
10221044
//
10231045
// Finally we can return either the provisional response or the initial response
10241046
// in case we're in the first fixpoint iteration for this goal.
1025-
let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head);
1026-
debug!(?path_kind, "encountered cycle with depth {head:?}");
1027-
let usage_kind = UsageKind::Single(path_kind);
1028-
self.stack[head].has_been_used =
1029-
Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind)));
1030-
1031-
// Subtle: when encountering a cyclic goal, we still first checked for overflow,
1032-
// so we have to update the reached depth.
1033-
let last_index = self.stack.last_index().unwrap();
1034-
let last = &mut self.stack[last_index];
1035-
last.required_depth = last.required_depth.max(1);
1036-
1037-
last.nested_goals.insert(input, step_kind_from_parent.into());
1038-
last.nested_goals.insert(last.input, PathsToNested::EMPTY);
1039-
if last_index != head {
1040-
last.heads.insert(head, step_kind_from_parent);
1041-
}
1047+
let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1048+
debug!(?path_kind, "encountered cycle with depth {head_index:?}");
1049+
let head = CycleHead {
1050+
paths_to_head: step_kind_from_parent.into(),
1051+
usage_kind: UsageKind::Single(path_kind),
1052+
};
1053+
Self::update_parent_goal(
1054+
&mut self.stack,
1055+
step_kind_from_parent,
1056+
0,
1057+
iter::once((head_index, head)),
1058+
false,
1059+
UpdateParentGoalCtxt::ProvisionalCacheHitOrCycleOnStack,
1060+
);
10421061

10431062
// Return the provisional result or, if we're in the first iteration,
10441063
// start with no constraints.
1045-
if let Some(result) = self.stack[head].provisional_result {
1064+
if let Some(result) = self.stack[head_index].provisional_result {
10461065
Some(result)
10471066
} else {
10481067
Some(D::initial_provisional_result(cx, path_kind, input))

compiler/rustc_type_ir/src/search_graph/stack.rs

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use std::ops::{Index, IndexMut};
1+
use std::ops::Index;
22

33
use derive_where::derive_where;
44
use rustc_index::IndexVec;
@@ -48,6 +48,12 @@ pub(super) struct StackEntry<X: Cx> {
4848
pub nested_goals: NestedGoals<X>,
4949
}
5050

51+
/// The stack of goals currently being computed.
52+
///
53+
/// An element is *deeper* in the stack if its index is *lower*.
54+
///
55+
/// Only the last entry of the stack is mutable. All other entries get
56+
/// lazily updated in `update_parent_goal`.
5157
#[derive_where(Default; X: Cx)]
5258
pub(super) struct Stack<X: Cx> {
5359
entries: IndexVec<StackDepth, StackEntry<X>>,
@@ -62,10 +68,6 @@ impl<X: Cx> Stack<X> {
6268
self.entries.len()
6369
}
6470

65-
pub(super) fn last_index(&self) -> Option<StackDepth> {
66-
self.entries.last_index()
67-
}
68-
6971
pub(super) fn last(&self) -> Option<&StackEntry<X>> {
7072
self.entries.raw.last()
7173
}
@@ -74,6 +76,10 @@ impl<X: Cx> Stack<X> {
7476
self.entries.raw.last_mut()
7577
}
7678

79+
pub(super) fn last_mut_with_index(&mut self) -> Option<(StackDepth, &mut StackEntry<X>)> {
80+
self.entries.last_index().map(|idx| (idx, &mut self.entries[idx]))
81+
}
82+
7783
pub(super) fn next_index(&self) -> StackDepth {
7884
self.entries.next_index()
7985
}
@@ -108,9 +114,3 @@ impl<X: Cx> Index<StackDepth> for Stack<X> {
108114
&self.entries[index]
109115
}
110116
}
111-
112-
impl<X: Cx> IndexMut<StackDepth> for Stack<X> {
113-
fn index_mut(&mut self, index: StackDepth) -> &mut Self::Output {
114-
&mut self.entries[index]
115-
}
116-
}

0 commit comments

Comments
 (0)