From 578e15d9a243c969148b8cbe4bfc3e7422838338 Mon Sep 17 00:00:00 2001 From: lcnr Date: Thu, 26 Jun 2025 14:51:16 +0200 Subject: [PATCH 1/2] significantly improve provisional cache rebasing --- .../src/solve/search_graph.rs | 4 +- .../rustc_type_ir/src/search_graph/mod.rs | 183 +++++++++--------- .../rustc_type_ir/src/search_graph/stack.rs | 5 +- 3 files changed, 101 insertions(+), 91 deletions(-) diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs index 12cbc7e8f91e8..84f8eda4f8da7 100644 --- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs +++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs @@ -48,7 +48,9 @@ where ) -> QueryResult { match kind { PathKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes), - PathKind::Unknown => response_no_constraints(cx, input, Certainty::overflow(false)), + PathKind::Unknown | PathKind::ForcedAmbiguity => { + response_no_constraints(cx, input, Certainty::overflow(false)) + } // Even though we know these cycles to be unproductive, we still return // overflow during coherence. This is both as we are not 100% confident in // the implementation yet and any incorrect errors would be unsound there. diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 7433c215e6f36..257a623429d8e 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -21,10 +21,9 @@ use std::marker::PhantomData; use derive_where::derive_where; #[cfg(feature = "nightly")] use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext}; +use rustc_type_ir::data_structures::HashMap; use tracing::{debug, instrument}; -use crate::data_structures::HashMap; - mod stack; use stack::{Stack, StackDepth, StackEntry}; mod global_cache; @@ -137,6 +136,12 @@ pub enum PathKind { Unknown, /// A path with at least one coinductive step. Such cycles hold. Coinductive, + /// A path which is treated as ambiguous. Once a path has this path kind + /// any other segment does not change its kind. + /// + /// This is currently only used when fuzzing to support negative reasoning. + /// For more details, see #143054. + ForcedAmbiguity, } impl PathKind { @@ -149,6 +154,9 @@ impl PathKind { /// to `max(self, rest)`. fn extend(self, rest: PathKind) -> PathKind { match (self, rest) { + (PathKind::ForcedAmbiguity, _) | (_, PathKind::ForcedAmbiguity) => { + PathKind::ForcedAmbiguity + } (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive, (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown, (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive, @@ -187,41 +195,6 @@ impl UsageKind { } } -/// For each goal we track whether the paths from this goal -/// to its cycle heads are coinductive. -/// -/// This is a necessary condition to rebase provisional cache -/// entries. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum AllPathsToHeadCoinductive { - Yes, - No, -} -impl From for AllPathsToHeadCoinductive { - fn from(path: PathKind) -> AllPathsToHeadCoinductive { - match path { - PathKind::Coinductive => AllPathsToHeadCoinductive::Yes, - _ => AllPathsToHeadCoinductive::No, - } - } -} -impl AllPathsToHeadCoinductive { - #[must_use] - fn merge(self, other: impl Into) -> Self { - match (self, other.into()) { - (AllPathsToHeadCoinductive::Yes, AllPathsToHeadCoinductive::Yes) => { - AllPathsToHeadCoinductive::Yes - } - (AllPathsToHeadCoinductive::No, _) | (_, AllPathsToHeadCoinductive::No) => { - AllPathsToHeadCoinductive::No - } - } - } - fn and_merge(&mut self, other: impl Into) { - *self = self.merge(other); - } -} - #[derive(Debug, Clone, Copy)] struct AvailableDepth(usize); impl AvailableDepth { @@ -261,9 +234,9 @@ impl AvailableDepth { /// /// We also track all paths from this goal to that head. This is necessary /// when rebasing provisional cache results. -#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[derive(Clone, Debug, Default)] struct CycleHeads { - heads: BTreeMap, + heads: BTreeMap, } impl CycleHeads { @@ -283,27 +256,16 @@ impl CycleHeads { self.heads.first_key_value().map(|(k, _)| *k) } - fn remove_highest_cycle_head(&mut self) { + fn remove_highest_cycle_head(&mut self) -> PathsToNested { let last = self.heads.pop_last(); - debug_assert_ne!(last, None); - } - - fn insert( - &mut self, - head: StackDepth, - path_from_entry: impl Into + Copy, - ) { - self.heads.entry(head).or_insert(path_from_entry.into()).and_merge(path_from_entry); + last.unwrap().1 } - fn merge(&mut self, heads: &CycleHeads) { - for (&head, &path_from_entry) in heads.heads.iter() { - self.insert(head, path_from_entry); - debug_assert!(matches!(self.heads[&head], AllPathsToHeadCoinductive::Yes)); - } + fn insert(&mut self, head: StackDepth, path_from_entry: impl Into + Copy) { + *self.heads.entry(head).or_insert(path_from_entry.into()) |= path_from_entry.into(); } - fn iter(&self) -> impl Iterator + '_ { + fn iter(&self) -> impl Iterator + '_ { self.heads.iter().map(|(k, v)| (*k, *v)) } @@ -317,13 +279,7 @@ impl CycleHeads { Ordering::Equal => continue, Ordering::Greater => unreachable!(), } - - let path_from_entry = match step_kind { - PathKind::Coinductive => AllPathsToHeadCoinductive::Yes, - PathKind::Unknown | PathKind::Inductive => path_from_entry, - }; - - self.insert(head, path_from_entry); + self.insert(head, path_from_entry.extend_with(step_kind)); } } } @@ -332,13 +288,14 @@ bitflags::bitflags! { /// Tracks how nested goals have been accessed. This is necessary to disable /// global cache entries if computing them would otherwise result in a cycle or /// access a provisional cache entry. - #[derive(Debug, Clone, Copy)] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct PathsToNested: u8 { /// The initial value when adding a goal to its own nested goals. const EMPTY = 1 << 0; const INDUCTIVE = 1 << 1; const UNKNOWN = 1 << 2; const COINDUCTIVE = 1 << 3; + const FORCED_AMBIGUITY = 1 << 4; } } impl From for PathsToNested { @@ -347,6 +304,7 @@ impl From for PathsToNested { PathKind::Inductive => PathsToNested::INDUCTIVE, PathKind::Unknown => PathsToNested::UNKNOWN, PathKind::Coinductive => PathsToNested::COINDUCTIVE, + PathKind::ForcedAmbiguity => PathsToNested::FORCED_AMBIGUITY, } } } @@ -379,10 +337,45 @@ impl PathsToNested { self.insert(PathsToNested::COINDUCTIVE); } } + PathKind::ForcedAmbiguity => { + if self.intersects( + PathsToNested::EMPTY + | PathsToNested::INDUCTIVE + | PathsToNested::UNKNOWN + | PathsToNested::COINDUCTIVE, + ) { + self.remove( + PathsToNested::EMPTY + | PathsToNested::INDUCTIVE + | PathsToNested::UNKNOWN + | PathsToNested::COINDUCTIVE, + ); + self.insert(PathsToNested::FORCED_AMBIGUITY); + } + } } self } + + #[must_use] + fn extend_with_paths(self, path: PathsToNested) -> Self { + let mut new = PathsToNested::empty(); + for p in path.iter_paths() { + new |= self.extend_with(p); + } + new + } + + fn iter_paths(self) -> impl Iterator { + let (PathKind::Inductive + | PathKind::Unknown + | PathKind::Coinductive + | PathKind::ForcedAmbiguity); + [PathKind::Inductive, PathKind::Unknown, PathKind::Coinductive, PathKind::ForcedAmbiguity] + .into_iter() + .filter(move |&p| self.contains(p.into())) + } } /// The nested goals of each stack entry and the path from the @@ -693,7 +686,7 @@ impl, X: Cx> SearchGraph { if let Some((_scope, expected)) = validate_cache { // Do not try to move a goal into the cache again if we're testing // the global cache. - assert_eq!(evaluation_result.result, expected, "input={input:?}"); + assert_eq!(expected, evaluation_result.result, "input={input:?}"); } else if D::inspect_is_noop(inspect) { self.insert_global_cache(cx, input, evaluation_result, dep_node) } @@ -763,14 +756,11 @@ impl, X: Cx> SearchGraph { /// provisional cache entry is still applicable. We need to keep the cache entries to /// prevent hangs. /// - /// What we therefore do is check whether the cycle kind of all cycles the goal of a - /// provisional cache entry is involved in would stay the same when computing the - /// goal without its cycle head on the stack. For more details, see the relevant + /// This can be thought of as pretending to reevaluate the popped head as nested goals + /// of this provisional result. For this to be correct, all cycles encountered while + /// we'd reevaluate the cycle head as a nested goal must keep the same cycle kind. /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html). /// - /// This can be thought of rotating the sub-tree of this provisional result and changing - /// its entry point while making sure that all paths through this sub-tree stay the same. - /// /// In case the popped cycle head failed to reach a fixpoint anything which depends on /// its provisional result is invalid. Actually discarding provisional cache entries in /// this case would cause hangs, so we instead change the result of dependant provisional @@ -782,7 +772,7 @@ impl, X: Cx> SearchGraph { stack_entry: &StackEntry, mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result, ) { - let head = self.stack.next_index(); + let popped_head = self.stack.next_index(); #[allow(rustc::potential_query_instability)] self.provisional_cache.retain(|&input, entries| { entries.retain_mut(|entry| { @@ -792,30 +782,45 @@ impl, X: Cx> SearchGraph { path_from_head, result, } = entry; - if heads.highest_cycle_head() == head { + let ep = if heads.highest_cycle_head() == popped_head { heads.remove_highest_cycle_head() } else { return true; - } - - // We only try to rebase if all paths from the cache entry - // to its heads are coinductive. In this case these cycle - // kinds won't change, no matter the goals between these - // heads and the provisional cache entry. - if heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) { - return false; - } + }; - // The same for nested goals of the cycle head. - if stack_entry.heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) - { - return false; + // We're rebasing an entry `e` over a head `p`. This head + // has a number of own heads `h` it depends on. We need to + // make sure that the path kind of all paths `hph` remain the + // same after rebasing. + // + // After rebasing the cycles `hph` will go through `e`. We need + // to make sure that forall possible paths `hep` and `heph` + // is equal to `hph.` + for (h, ph) in stack_entry.heads.iter() { + let hp = + Self::cycle_path_kind(&self.stack, stack_entry.step_kind_from_parent, h); + + // We first validate that all cycles while computing `p` would stay + // the same if we were to recompute it as a nested goal of `e`. + let he = hp.extend(*path_from_head); + for ph in ph.iter_paths() { + let hph = hp.extend(ph); + for ep in ep.iter_paths() { + let hep = ep.extend(he); + let heph = hep.extend(ph); + if hph != heph { + return false; + } + } + } + + // If so, all paths reached while computing `p` have to get added + // the heads of `e` to make sure that rebasing `e` again also considers + // them. + let eph = ep.extend_with_paths(ph); + heads.insert(h, eph); } - // Merge the cycle heads of the provisional cache entry and the - // popped head. If the popped cycle head was a root, discard all - // provisional cache entries which depend on it. - heads.merge(&stack_entry.heads); let Some(head) = heads.opt_highest_cycle_head() else { return false; }; diff --git a/compiler/rustc_type_ir/src/search_graph/stack.rs b/compiler/rustc_type_ir/src/search_graph/stack.rs index e0fd934df698f..a58cd82b02303 100644 --- a/compiler/rustc_type_ir/src/search_graph/stack.rs +++ b/compiler/rustc_type_ir/src/search_graph/stack.rs @@ -3,7 +3,7 @@ use std::ops::{Index, IndexMut}; use derive_where::derive_where; use rustc_index::IndexVec; -use super::{AvailableDepth, Cx, CycleHeads, NestedGoals, PathKind, UsageKind}; +use crate::search_graph::{AvailableDepth, Cx, CycleHeads, NestedGoals, PathKind, UsageKind}; rustc_index::newtype_index! { #[orderable] @@ -79,6 +79,9 @@ impl Stack { } pub(super) fn push(&mut self, entry: StackEntry) -> StackDepth { + if cfg!(debug_assertions) && self.entries.iter().any(|e| e.input == entry.input) { + panic!("pushing duplicate entry on stack: {entry:?} {:?}", self.entries); + } self.entries.push(entry) } From 48f67bb9aaa984f67ea631f122adc06a6da46f3b Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 5 Aug 2025 15:28:25 +0200 Subject: [PATCH 2/2] simplify stack handling, be completely lazy --- .../rustc_type_ir/src/search_graph/mod.rs | 141 +++++++++++------- .../rustc_type_ir/src/search_graph/stack.rs | 22 +-- 2 files changed, 95 insertions(+), 68 deletions(-) diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 257a623429d8e..7a6d1c8785791 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -12,10 +12,11 @@ //! The global cache has to be completely unobservable, while the per-cycle cache may impact //! behavior as long as the resulting behavior is still correct. use std::cmp::Ordering; -use std::collections::BTreeMap; use std::collections::hash_map::Entry; +use std::collections::{BTreeMap, btree_map}; use std::fmt::Debug; use std::hash::Hash; +use std::iter; use std::marker::PhantomData; use derive_where::derive_where; @@ -230,13 +231,19 @@ impl AvailableDepth { } } +#[derive(Clone, Copy, Debug)] +struct CycleHead { + paths_to_head: PathsToNested, + usage_kind: UsageKind, +} + /// All cycle heads a given goal depends on, ordered by their stack depth. /// /// We also track all paths from this goal to that head. This is necessary /// when rebasing provisional cache results. #[derive(Clone, Debug, Default)] struct CycleHeads { - heads: BTreeMap, + heads: BTreeMap, } impl CycleHeads { @@ -256,32 +263,32 @@ impl CycleHeads { self.heads.first_key_value().map(|(k, _)| *k) } - fn remove_highest_cycle_head(&mut self) -> PathsToNested { + fn remove_highest_cycle_head(&mut self) -> CycleHead { let last = self.heads.pop_last(); last.unwrap().1 } - fn insert(&mut self, head: StackDepth, path_from_entry: impl Into + Copy) { - *self.heads.entry(head).or_insert(path_from_entry.into()) |= path_from_entry.into(); + fn insert( + &mut self, + head_index: StackDepth, + path_from_entry: impl Into + Copy, + usage_kind: UsageKind, + ) { + match self.heads.entry(head_index) { + btree_map::Entry::Vacant(entry) => { + entry.insert(CycleHead { paths_to_head: path_from_entry.into(), usage_kind }); + } + btree_map::Entry::Occupied(entry) => { + let head = entry.into_mut(); + head.paths_to_head |= path_from_entry.into(); + head.usage_kind = head.usage_kind.merge(usage_kind); + } + } } - fn iter(&self) -> impl Iterator + '_ { + fn iter(&self) -> impl Iterator + '_ { self.heads.iter().map(|(k, v)| (*k, *v)) } - - /// Update the cycle heads of a goal at depth `this` given the cycle heads - /// of a nested goal. This merges the heads after filtering the parent goal - /// itself. - fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &CycleHeads) { - for (&head, &path_from_entry) in child.heads.iter() { - match head.cmp(&this) { - Ordering::Less => {} - Ordering::Equal => continue, - Ordering::Greater => unreachable!(), - } - self.insert(head, path_from_entry.extend_with(step_kind)); - } - } } bitflags::bitflags! { @@ -487,9 +494,6 @@ impl EvaluationResult { pub struct SearchGraph, X: Cx = ::Cx> { root_depth: AvailableDepth, - /// The stack of goals currently being computed. - /// - /// An element is *deeper* in the stack if its index is *lower*. stack: Stack, /// The provisional cache contains entries for already computed goals which /// still depend on goals higher-up in the stack. We don't move them to the @@ -511,6 +515,7 @@ pub struct SearchGraph, X: Cx = ::Cx> { /// cache entry. enum UpdateParentGoalCtxt<'a, X: Cx> { Ordinary(&'a NestedGoals), + CycleOnStack(X::Input), ProvisionalCacheHit, } @@ -532,21 +537,42 @@ impl, X: Cx> SearchGraph { stack: &mut Stack, step_kind_from_parent: PathKind, required_depth_for_nested: usize, - heads: &CycleHeads, + heads: impl Iterator, encountered_overflow: bool, context: UpdateParentGoalCtxt<'_, X>, ) { - if let Some(parent_index) = stack.last_index() { - let parent = &mut stack[parent_index]; + if let Some((parent_index, parent)) = stack.last_mut_with_index() { parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1); parent.encountered_overflow |= encountered_overflow; - parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads); + for (head_index, head) in heads { + match head_index.cmp(&parent_index) { + Ordering::Less => parent.heads.insert( + head_index, + head.paths_to_head.extend_with(step_kind_from_parent), + head.usage_kind, + ), + Ordering::Equal => { + let usage_kind = parent + .has_been_used + .map_or(head.usage_kind, |prev| prev.merge(head.usage_kind)); + parent.has_been_used = Some(usage_kind); + } + Ordering::Greater => unreachable!(), + } + } let parent_depends_on_cycle = match context { UpdateParentGoalCtxt::Ordinary(nested_goals) => { parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals); !nested_goals.is_empty() } + UpdateParentGoalCtxt::CycleOnStack(head) => { + // We lookup provisional cache entries before detecting cycles. + // We therefore can't use a global cache entry if it contains a cycle + // whose head is in the provisional cache. + parent.nested_goals.insert(head, step_kind_from_parent.into()); + true + } UpdateParentGoalCtxt::ProvisionalCacheHit => true, }; // Once we've got goals which encountered overflow or a cycle, @@ -674,7 +700,7 @@ impl, X: Cx> SearchGraph { &mut self.stack, step_kind_from_parent, evaluation_result.required_depth, - &evaluation_result.heads, + evaluation_result.heads.iter(), evaluation_result.encountered_overflow, UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals), ); @@ -772,7 +798,7 @@ impl, X: Cx> SearchGraph { stack_entry: &StackEntry, mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result, ) { - let popped_head = self.stack.next_index(); + let popped_head_index = self.stack.next_index(); #[allow(rustc::potential_query_instability)] self.provisional_cache.retain(|&input, entries| { entries.retain_mut(|entry| { @@ -782,7 +808,7 @@ impl, X: Cx> SearchGraph { path_from_head, result, } = entry; - let ep = if heads.highest_cycle_head() == popped_head { + let popped_head = if heads.highest_cycle_head() == popped_head_index { heads.remove_highest_cycle_head() } else { return true; @@ -796,9 +822,14 @@ impl, X: Cx> SearchGraph { // After rebasing the cycles `hph` will go through `e`. We need // to make sure that forall possible paths `hep` and `heph` // is equal to `hph.` - for (h, ph) in stack_entry.heads.iter() { - let hp = - Self::cycle_path_kind(&self.stack, stack_entry.step_kind_from_parent, h); + let ep = popped_head.paths_to_head; + for (head_index, head) in stack_entry.heads.iter() { + let ph = head.paths_to_head; + let hp = Self::cycle_path_kind( + &self.stack, + stack_entry.step_kind_from_parent, + head_index, + ); // We first validate that all cycles while computing `p` would stay // the same if we were to recompute it as a nested goal of `e`. @@ -818,7 +849,7 @@ impl, X: Cx> SearchGraph { // the heads of `e` to make sure that rebasing `e` again also considers // them. let eph = ep.extend_with_paths(ph); - heads.insert(h, eph); + heads.insert(head_index, eph, head.usage_kind); } let Some(head) = heads.opt_highest_cycle_head() else { @@ -878,11 +909,10 @@ impl, X: Cx> SearchGraph { &mut self.stack, step_kind_from_parent, 0, - heads, + heads.iter(), encountered_overflow, UpdateParentGoalCtxt::ProvisionalCacheHit, ); - debug_assert!(self.stack[head].has_been_used.is_some()); debug!(?head, ?path_from_head, "provisional cache hit"); return Some(result); } @@ -994,12 +1024,12 @@ impl, X: Cx> SearchGraph { // We don't move cycle participants to the global cache, so the // cycle heads are always empty. - let heads = Default::default(); + let heads = iter::empty(); Self::update_parent_goal( &mut self.stack, step_kind_from_parent, required_depth, - &heads, + heads, encountered_overflow, UpdateParentGoalCtxt::Ordinary(nested_goals), ); @@ -1015,34 +1045,31 @@ impl, X: Cx> SearchGraph { input: X::Input, step_kind_from_parent: PathKind, ) -> Option { - let head = self.stack.find(input)?; + let head_index = self.stack.find(input)?; // We have a nested goal which directly relies on a goal deeper in the stack. // // We start by tagging all cycle participants, as that's necessary for caching. // // Finally we can return either the provisional response or the initial response // in case we're in the first fixpoint iteration for this goal. - let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head); - debug!(?path_kind, "encountered cycle with depth {head:?}"); - let usage_kind = UsageKind::Single(path_kind); - self.stack[head].has_been_used = - Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); - - // Subtle: when encountering a cyclic goal, we still first checked for overflow, - // so we have to update the reached depth. - let last_index = self.stack.last_index().unwrap(); - let last = &mut self.stack[last_index]; - last.required_depth = last.required_depth.max(1); - - last.nested_goals.insert(input, step_kind_from_parent.into()); - last.nested_goals.insert(last.input, PathsToNested::EMPTY); - if last_index != head { - last.heads.insert(head, step_kind_from_parent); - } + let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index); + debug!(?path_kind, "encountered cycle with depth {head_index:?}"); + let head = CycleHead { + paths_to_head: step_kind_from_parent.into(), + usage_kind: UsageKind::Single(path_kind), + }; + Self::update_parent_goal( + &mut self.stack, + step_kind_from_parent, + 0, + iter::once((head_index, head)), + false, + UpdateParentGoalCtxt::CycleOnStack(input), + ); // Return the provisional result or, if we're in the first iteration, // start with no constraints. - if let Some(result) = self.stack[head].provisional_result { + if let Some(result) = self.stack[head_index].provisional_result { Some(result) } else { Some(D::initial_provisional_result(cx, path_kind, input)) diff --git a/compiler/rustc_type_ir/src/search_graph/stack.rs b/compiler/rustc_type_ir/src/search_graph/stack.rs index a58cd82b02303..ea99dc6e7fd28 100644 --- a/compiler/rustc_type_ir/src/search_graph/stack.rs +++ b/compiler/rustc_type_ir/src/search_graph/stack.rs @@ -1,4 +1,4 @@ -use std::ops::{Index, IndexMut}; +use std::ops::Index; use derive_where::derive_where; use rustc_index::IndexVec; @@ -48,6 +48,12 @@ pub(super) struct StackEntry { pub nested_goals: NestedGoals, } +/// The stack of goals currently being computed. +/// +/// An element is *deeper* in the stack if its index is *lower*. +/// +/// Only the last entry of the stack is mutable. All other entries get +/// lazily updated in `update_parent_goal`. #[derive_where(Default; X: Cx)] pub(super) struct Stack { entries: IndexVec>, @@ -62,10 +68,6 @@ impl Stack { self.entries.len() } - pub(super) fn last_index(&self) -> Option { - self.entries.last_index() - } - pub(super) fn last(&self) -> Option<&StackEntry> { self.entries.raw.last() } @@ -74,6 +76,10 @@ impl Stack { self.entries.raw.last_mut() } + pub(super) fn last_mut_with_index(&mut self) -> Option<(StackDepth, &mut StackEntry)> { + self.entries.last_index().map(|idx| (idx, &mut self.entries[idx])) + } + pub(super) fn next_index(&self) -> StackDepth { self.entries.next_index() } @@ -108,9 +114,3 @@ impl Index for Stack { &self.entries[index] } } - -impl IndexMut for Stack { - fn index_mut(&mut self, index: StackDepth) -> &mut Self::Output { - &mut self.entries[index] - } -}