From 3bf0cebaf21e91c51d938e504b0289a4a29ce8ab Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 27 Sep 2025 17:04:18 +0200 Subject: [PATCH 01/45] Run nested cycles in a single fixpoint iteration Fix serde attribute --- src/active_query.rs | 5 +- src/cycle.rs | 230 ++++++++++-- src/function.rs | 44 ++- src/function/execute.rs | 332 ++++++++++++------ src/function/fetch.rs | 19 +- src/function/maybe_changed_after.rs | 178 ++++++---- src/function/memo.rs | 256 ++++++++++---- src/function/sync.rs | 2 +- src/ingredient.rs | 37 +- src/zalsa_local.rs | 106 +++++- tests/backtrace.rs | 6 +- tests/cycle.rs | 9 +- tests/parallel/cycle_nested_deep.rs | 1 + .../parallel/cycle_nested_deep_conditional.rs | 2 +- .../cycle_nested_deep_conditional_changed.rs | 9 +- 15 files changed, 929 insertions(+), 307 deletions(-) diff --git a/src/active_query.rs b/src/active_query.rs index 0b2231052..11cf5d2eb 100644 --- a/src/active_query.rs +++ b/src/active_query.rs @@ -225,6 +225,7 @@ impl ActiveQuery { active_tracked_structs, mem::take(cycle_heads), iteration_count, + false, ); let revisions = QueryRevisions { @@ -498,7 +499,7 @@ impl fmt::Display for Backtrace { if full { write!(fmt, " -> ({changed_at:?}, {durability:#?}")?; if !cycle_heads.is_empty() || !iteration_count.is_initial() { - write!(fmt, ", iteration = {iteration_count:?}")?; + write!(fmt, ", iteration = {iteration_count}")?; } write!(fmt, ")")?; } @@ -517,7 +518,7 @@ impl fmt::Display for Backtrace { } write!( fmt, - "{:?} -> {:?}", + "{:?} -> {}", head.database_key_index, head.iteration_count )?; } diff --git a/src/cycle.rs b/src/cycle.rs index 12cb1cdc9..413635953 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -52,6 +52,7 @@ use thin_vec::{thin_vec, ThinVec}; use crate::key::DatabaseKeyIndex; +use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering}; use crate::sync::OnceLock; use crate::Revision; @@ -96,14 +97,26 @@ pub enum CycleRecoveryStrategy { /// would be the cycle head. It returns an "initial value" when the cycle is encountered (if /// fixpoint iteration is enabled for that query), and then is responsible for re-iterating the /// cycle until it converges. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Debug)] #[cfg_attr(feature = "persistence", derive(serde::Serialize, serde::Deserialize))] pub struct CycleHead { pub(crate) database_key_index: DatabaseKeyIndex, - pub(crate) iteration_count: IterationCount, + pub(crate) iteration_count: AtomicIterationCount, + #[cfg_attr(feature = "persistence", serde(skip))] + removed: AtomicBool, +} + +impl Clone for CycleHead { + fn clone(&self) -> Self { + Self { + database_key_index: self.database_key_index, + iteration_count: self.iteration_count.load().into(), + removed: self.removed.load(Ordering::Relaxed).into(), + } + } } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Default)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Default, PartialOrd, Ord)] #[cfg_attr(feature = "persistence", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "persistence", serde(transparent))] pub struct IterationCount(u8); @@ -131,11 +144,65 @@ impl IterationCount { } } +impl std::fmt::Display for IterationCount { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "iteration={}", self.0) + } +} + +#[derive(Debug)] +pub(crate) struct AtomicIterationCount(AtomicU8); + +impl AtomicIterationCount { + pub(crate) fn load(&self) -> IterationCount { + IterationCount(self.0.load(Ordering::Relaxed)) + } + + pub(crate) fn store(&self, value: IterationCount) { + self.0.store(value.0, Ordering::Release); + } + + pub(crate) fn store_mut(&mut self, value: IterationCount) { + *self.0.get_mut() = value.0; + } +} + +impl std::fmt::Display for AtomicIterationCount { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.load().fmt(f) + } +} + +impl From for AtomicIterationCount { + fn from(iteration_count: IterationCount) -> Self { + AtomicIterationCount(iteration_count.0.into()) + } +} + +#[cfg(feature = "persistence")] +impl serde::Serialize for AtomicIterationCount { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.load().serialize(serializer) + } +} + +#[cfg(feature = "persistence")] +impl<'de> serde::Deserialize<'de> for AtomicIterationCount { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + IterationCount::deserialize(deserializer).map(Into::into) + } +} + /// Any provisional value generated by any query in a cycle will track the cycle head(s) (can be /// plural in case of nested cycles) representing the cycles it is part of, and the current /// iteration count for each cycle head. This struct tracks these cycle heads. #[derive(Clone, Debug, Default)] -#[cfg_attr(feature = "persistence", derive(serde::Serialize, serde::Deserialize))] pub struct CycleHeads(ThinVec); impl CycleHeads { @@ -146,12 +213,15 @@ impl CycleHeads { pub(crate) fn initial(database_key_index: DatabaseKeyIndex) -> Self { Self(thin_vec![CycleHead { database_key_index, - iteration_count: IterationCount::initial(), + iteration_count: IterationCount::initial().into(), + removed: false.into() }]) } - pub(crate) fn iter(&self) -> std::slice::Iter<'_, CycleHead> { - self.0.iter() + pub(crate) fn iter(&self) -> CycleHeadsIterator<'_> { + CycleHeadsIterator { + inner: self.0.iter(), + } } pub(crate) fn contains(&self, value: &DatabaseKeyIndex) -> bool { @@ -159,17 +229,18 @@ impl CycleHeads { .any(|head| head.database_key_index == *value) } - pub(crate) fn remove(&mut self, value: &DatabaseKeyIndex) -> bool { - let found = self - .0 - .iter() - .position(|&head| head.database_key_index == *value); - let Some(found) = found else { return false }; - self.0.swap_remove(found); - true + pub(crate) fn clear_except(&self, except: DatabaseKeyIndex) { + for head in self.0.iter() { + if head.database_key_index == except { + continue; + } + + // TODO: verify ordering + head.removed.store(true, Ordering::Release); + } } - pub(crate) fn update_iteration_count( + pub(crate) fn update_iteration_count_mut( &mut self, cycle_head_index: DatabaseKeyIndex, new_iteration_count: IterationCount, @@ -179,7 +250,21 @@ impl CycleHeads { .iter_mut() .find(|cycle_head| cycle_head.database_key_index == cycle_head_index) { - cycle_head.iteration_count = new_iteration_count; + cycle_head.iteration_count.store_mut(new_iteration_count); + } + } + + pub(crate) fn update_iteration_count( + &self, + cycle_head_index: DatabaseKeyIndex, + new_iteration_count: IterationCount, + ) { + if let Some(cycle_head) = self + .0 + .iter() + .find(|cycle_head| cycle_head.database_key_index == cycle_head_index) + { + cycle_head.iteration_count.store(new_iteration_count); } } @@ -188,15 +273,41 @@ impl CycleHeads { self.0.reserve(other.0.len()); for head in other { - if let Some(existing) = self - .0 - .iter() - .find(|candidate| candidate.database_key_index == head.database_key_index) - { - assert_eq!(existing.iteration_count, head.iteration_count); + self.insert(head); + } + } + + pub(crate) fn insert(&mut self, head: &CycleHead) -> bool { + if let Some(existing) = self + .0 + .iter_mut() + .find(|candidate| candidate.database_key_index == head.database_key_index) + { + let removed = existing.removed.get_mut(); + + if *removed { + *removed = false; + + true } else { - self.0.push(*head); + let existing_count = existing.iteration_count.load(); + let head_count = head.iteration_count.load(); + + // It's now possible that a query can depend on different iteration counts of the same query + // This because some queries (inner) read the provisional value of the last iteration + // while outer queries read the value from the last iteration (which is i+1 if the head didn't converge). + assert_eq!( + existing_count, head_count, + "Can't merge cycle heads {:?} with different iteration counts ({existing_count:?}, {head_count:?})", + existing.database_key_index + ); + + false } + } else { + debug_assert!(!head.removed.load(Ordering::Relaxed)); + self.0.push(head.clone()); + true } } @@ -206,6 +317,37 @@ impl CycleHeads { } } +#[cfg(feature = "persistence")] +impl serde::Serialize for CycleHeads { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeSeq; + + let mut seq = serializer.serialize_seq(None)?; + for e in self { + if e.removed.load(Ordering::Relaxed) { + continue; + } + + seq.serialize_element(e)?; + } + seq.end() + } +} + +#[cfg(feature = "persistence")] +impl<'de> serde::Deserialize<'de> for CycleHeads { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let vec: ThinVec = serde::Deserialize::deserialize(deserializer)?; + Ok(CycleHeads(vec)) + } +} + impl IntoIterator for CycleHeads { type Item = CycleHead; type IntoIter = as IntoIterator>::IntoIter; @@ -215,9 +357,29 @@ impl IntoIterator for CycleHeads { } } +pub struct CycleHeadsIterator<'a> { + inner: std::slice::Iter<'a, CycleHead>, +} + +impl<'a> Iterator for CycleHeadsIterator<'a> { + type Item = &'a CycleHead; + + fn next(&mut self) -> Option { + loop { + let next = self.inner.next()?; + + if next.removed.load(Ordering::Relaxed) { + continue; + } + + return Some(next); + } + } +} + impl<'a> std::iter::IntoIterator for &'a CycleHeads { type Item = &'a CycleHead; - type IntoIter = std::slice::Iter<'a, CycleHead>; + type IntoIter = CycleHeadsIterator<'a>; fn into_iter(self) -> Self::IntoIter { self.iter() @@ -241,28 +403,22 @@ pub enum ProvisionalStatus { Provisional { iteration: IterationCount, verified_at: Revision, + nested: bool, }, Final { iteration: IterationCount, verified_at: Revision, + nested: bool, }, FallbackImmediate, } impl ProvisionalStatus { - pub(crate) const fn iteration(&self) -> Option { - match self { - ProvisionalStatus::Provisional { iteration, .. } => Some(*iteration), - ProvisionalStatus::Final { iteration, .. } => Some(*iteration), - ProvisionalStatus::FallbackImmediate => None, - } - } - - pub(crate) const fn verified_at(&self) -> Option { + pub(crate) fn nested(&self) -> bool { match self { - ProvisionalStatus::Provisional { verified_at, .. } => Some(*verified_at), - ProvisionalStatus::Final { verified_at, .. } => Some(*verified_at), - ProvisionalStatus::FallbackImmediate => None, + ProvisionalStatus::Provisional { nested, .. } => *nested, + ProvisionalStatus::Final { nested, .. } => *nested, + ProvisionalStatus::FallbackImmediate => false, } } } diff --git a/src/function.rs b/src/function.rs index 58f773895..baa853828 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::SyncGuard; +pub(crate) use sync::{ClaimGuard, SyncGuard}; use std::any::Any; use std::fmt; @@ -8,7 +8,8 @@ use std::sync::atomic::Ordering; use std::sync::OnceLock; use crate::cycle::{ - empty_cycle_heads, CycleHeads, CycleRecoveryAction, CycleRecoveryStrategy, ProvisionalStatus, + empty_cycle_heads, CycleHeads, CycleRecoveryAction, CycleRecoveryStrategy, IterationCount, + ProvisionalStatus, }; use crate::database::RawDatabase; use crate::function::delete::DeletedEntries; @@ -348,16 +349,49 @@ where ProvisionalStatus::Final { iteration, verified_at: memo.verified_at.load(), + nested: memo.revisions.is_nested_cycle(), } } } else { ProvisionalStatus::Provisional { iteration, verified_at: memo.verified_at.load(), + nested: memo.revisions.is_nested_cycle(), } }) } + fn set_cycle_iteration_count(&self, zalsa: &Zalsa, input: Id, iteration_count: IterationCount) { + let Some(memo) = + self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) + else { + return; + }; + + memo.revisions + .set_iteration_count(Self::database_key_index(self, input), iteration_count); + } + + fn set_cycle_finalized(&self, zalsa: &Zalsa, input: Id) { + let Some(memo) = + self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) + else { + return; + }; + + memo.revisions.verified_final.store(true, Ordering::Release); + } + + fn cycle_converged(&self, zalsa: &Zalsa, input: Id) -> bool { + let Some(memo) = + self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) + else { + return true; + }; + + memo.revisions.cycle_converged() + } + fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads { self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) .map(|memo| memo.cycle_heads()) @@ -375,7 +409,7 @@ where match self.sync_table.try_claim(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), ClaimResult::Cycle => WaitForResult::Cycle, - ClaimResult::Claimed(_) => WaitForResult::Available, + ClaimResult::Claimed(guard) => WaitForResult::Available(guard), } } @@ -435,10 +469,6 @@ where unreachable!("function does not allocate pages") } - fn cycle_recovery_strategy(&self) -> CycleRecoveryStrategy { - C::CYCLE_STRATEGY - } - #[cfg(feature = "accumulator")] unsafe fn accumulated<'db>( &'db self, diff --git a/src/function/execute.rs b/src/function/execute.rs index 9521a9dce..107f15eae 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -1,3 +1,5 @@ +use smallvec::SmallVec; + use crate::active_query::CompletedQuery; use crate::cycle::{CycleRecoveryStrategy, IterationCount}; use crate::function::memo::Memo; @@ -32,6 +34,7 @@ where opt_old_memo: Option<&Memo<'db, C>>, ) -> &'db Memo<'db, C> { let id = database_key_index.key_index(); + let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); crate::tracing::info!("{:?}: executing query", database_key_index); @@ -40,7 +43,6 @@ where database_key: database_key_index, }) }); - let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); let (new_value, mut completed_query) = match C::CYCLE_STRATEGY { CycleRecoveryStrategy::Panic => Self::execute_query( @@ -60,6 +62,8 @@ where if let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() { // Did the new result we got depend on our own provisional value, in a cycle? if cycle_heads.contains(&database_key_index) { + let id = database_key_index.key_index(); + // Ignore the computed value, leave the fallback value there. let memo = self .get_memo_from_table_for(zalsa, id, memo_ingredient_index) @@ -119,7 +123,7 @@ where } self.insert_memo( zalsa, - id, + database_key_index.key_index(), Memo::new( Some(new_value), zalsa.current_revision(), @@ -139,19 +143,32 @@ where memo_ingredient_index: MemoIngredientIndex, ) -> (C::Output<'db>, CompletedQuery) { let id = database_key_index.key_index(); - let mut iteration_count = IterationCount::initial(); - let mut active_query = zalsa_local.push_query(database_key_index, iteration_count); // Our provisional value from the previous iteration, when doing fixpoint iteration. // Initially it's set to None, because the initial provisional value is created lazily, // only when a cycle is actually encountered. - let mut opt_last_provisional: Option<&Memo<'db, C>> = None; + let mut previous_memo: Option<&Memo<'db, C>> = None; + // TODO: Can we seed those somehow? let mut last_stale_tracked_ids: Vec<(Identity, Id)> = Vec::new(); + let _guard = ClearCycleHeadIfPanicking::new(self, zalsa, id, memo_ingredient_index); + let mut iteration_count = IterationCount::initial(); - loop { - let previous_memo = opt_last_provisional.or(opt_old_memo); + if let Some(old_memo) = opt_old_memo { + if old_memo.verified_at.load() == zalsa.current_revision() + && old_memo.cycle_heads().contains(&database_key_index) + { + previous_memo = Some(old_memo); + if old_memo.revisions.is_nested_cycle() { + iteration_count = old_memo.revisions.iteration(); + } + } + } + + let mut active_query = zalsa_local.push_query(database_key_index, iteration_count); + + let (new_value, completed_query) = loop { // Tracked struct ids that existed in the previous revision // but weren't recreated in the last iteration. It's important that we seed the next // query with these ids because the query might re-create them as part of the next iteration. @@ -163,115 +180,232 @@ where let (mut new_value, mut completed_query) = Self::execute_query(db, zalsa, active_query, previous_memo); + // If there are no cycle heads, break out of the loop (`cycle_heads_mut` returns `None` if the cycle head list is empty) + let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() else { + break (new_value, completed_query); + }; + + let mut cycle_heads = std::mem::take(cycle_heads); + + let mut queue: SmallVec<[DatabaseKeyIndex; 4]> = cycle_heads + .iter() + .map(|head| head.database_key_index) + .filter(|head| *head != database_key_index) + .collect(); + + while let Some(head) = queue.pop() { + let ingredient = zalsa.lookup_ingredient(head.ingredient_index()); + let nested_heads = ingredient.cycle_heads(zalsa, head.key_index()); + + for head in nested_heads { + if cycle_heads.insert(head) && !queue.contains(&head.database_key_index) { + queue.push(head.database_key_index); + } + } + } + // Did the new result we got depend on our own provisional value, in a cycle? - if let Some(cycle_heads) = completed_query - .revisions - .cycle_heads_mut() - .filter(|cycle_heads| cycle_heads.contains(&database_key_index)) - { - let last_provisional_value = if let Some(last_provisional) = opt_last_provisional { - // We have a last provisional value from our previous time around the loop. - last_provisional.value.as_ref() - } else { - // This is our first time around the loop; a provisional value must have been - // inserted into the memo table when the cycle was hit, so let's pull our - // initial provisional value from there. - let memo = self - .get_memo_from_table_for(zalsa, id, memo_ingredient_index) - .filter(|memo| memo.verified_at.load() == zalsa.current_revision()) - .unwrap_or_else(|| { - unreachable!( - "{database_key_index:#?} is a cycle head, \ + if !cycle_heads.contains(&database_key_index) { + completed_query.revisions.set_cycle_heads(cycle_heads); + break (new_value, completed_query); + } + + let last_provisional_value = if let Some(last_provisional) = previous_memo { + // We have a last provisional value from our previous time around the loop. + last_provisional.value.as_ref() + } else { + // This is our first time around the loop; a provisional value must have been + // inserted into the memo table when the cycle was hit, so let's pull our + // initial provisional value from there. + let memo = self + .get_memo_from_table_for(zalsa, id, memo_ingredient_index) + .unwrap_or_else(|| { + unreachable!( + "{database_key_index:#?} is a cycle head, \ but no provisional memo found" - ) - }); + ) + }); - debug_assert!(memo.may_be_provisional()); - memo.value.as_ref() - }; + debug_assert!(memo.may_be_provisional()); + memo.value.as_ref() + }; - let last_provisional_value = last_provisional_value.expect( + + let last_provisional_value = last_provisional_value.expect( "`fetch_cold_cycle` should have inserted a provisional memo with Cycle::initial", ); - crate::tracing::debug!( - "{database_key_index:?}: execute: \ + crate::tracing::debug!( + "{database_key_index:?}: execute: \ I am a cycle head, comparing last provisional value with new value" - ); - // If the new result is equal to the last provisional result, the cycle has - // converged and we are done. - if !C::values_equal(&new_value, last_provisional_value) { - // We are in a cycle that hasn't converged; ask the user's - // cycle-recovery function what to do: - match C::recover_from_cycle( - db, - &new_value, - iteration_count.as_u32(), - C::id_to_input(zalsa, id), - ) { - crate::CycleRecoveryAction::Iterate => {} - crate::CycleRecoveryAction::Fallback(fallback_value) => { - crate::tracing::debug!( - "{database_key_index:?}: execute: user cycle_fn says to fall back" - ); - new_value = fallback_value; - } - } - // `iteration_count` can't overflow as we check it against `MAX_ITERATIONS` - // which is less than `u32::MAX`. - iteration_count = iteration_count.increment().unwrap_or_else(|| { - tracing::warn!( - "{database_key_index:?}: execute: too many cycle iterations" + ); + + // determine if it is a nested query. + // This is a nested query if it depends on any other cycle head than itself + // where claiming it results in a cycle. In that case, both queries form a single connected component + // that we can iterate together rather than having separate nested fixpoint iterations. + let outer_cycle = cycle_heads + .iter() + .filter(|head| head.database_key_index != database_key_index) + .find_map(|head| { + let head_ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + head_ingredient + .wait_for(zalsa, head.database_key_index.key_index()) + .is_cycle() + .then_some(head.database_key_index) + }); + + let this_converged = C::values_equal(&new_value, last_provisional_value); + + iteration_count = if outer_cycle.is_some() { + iteration_count + } else { + cycle_heads + .iter() + .map(|head| head.iteration_count.load()) + .max() + .unwrap_or(iteration_count) + }; + + // If the new result is equal to the last provisional result, the cycle has + // converged and we are done. + if !this_converged { + // We are in a cycle that hasn't converged; ask the user's + // cycle-recovery function what to do: + match C::recover_from_cycle( + db, + &new_value, + iteration_count.as_u32(), + C::id_to_input(zalsa, id), + ) { + crate::CycleRecoveryAction::Iterate => {} + crate::CycleRecoveryAction::Fallback(fallback_value) => { + crate::tracing::debug!( + "{database_key_index:?}: execute: user cycle_fn says to fall back" ); - panic!("{database_key_index:?}: execute: too many cycle iterations") - }); - zalsa.event(&|| { - Event::new(EventKind::WillIterateCycle { - database_key: database_key_index, - iteration_count, - }) + new_value = fallback_value; + } + } + } else { + completed_query.revisions.set_cycle_converged(true); + } + + if let Some(outer_cycle) = outer_cycle { + tracing::debug!( + "Detected nested cycle {database_key_index:?}, iterate it as part of the outer cycle {outer_cycle:?}" + ); + + completed_query.revisions.mark_nested_cycle(); + completed_query.revisions.set_cycle_heads(cycle_heads); + + break (new_value, completed_query); + } + + // Verify that all cycles have converged, including all inner cycles. + let converged = this_converged + && cycle_heads + .iter() + .filter(|head| head.database_key_index != database_key_index) + .all(|head| { + let ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + ingredient.cycle_converged(zalsa, head.database_key_index.key_index()) }); - cycle_heads.update_iteration_count(database_key_index, iteration_count); - completed_query - .revisions - .update_iteration_count(iteration_count); - crate::tracing::info!("{database_key_index:?}: execute: iterate again...",); - opt_last_provisional = Some(self.insert_memo( - zalsa, - id, - Memo::new( - Some(new_value), - zalsa.current_revision(), - completed_query.revisions, - ), - memo_ingredient_index, - )); - last_stale_tracked_ids = completed_query.stale_tracked_structs; - - active_query = zalsa_local.push_query(database_key_index, iteration_count); + if converged { + crate::tracing::debug!( + "{database_key_index:?}: execute: fixpoint iteration has a final value after {iteration_count:?} iterations" + ); + + // Set the nested cycles as verified. This is necessary because + // `validate_provisional` doesn't follow cycle heads recursively (and the inner memos now depend on all cycle heads). + for head in cycle_heads { + if head.database_key_index == database_key_index { + continue; + } + + let ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + ingredient.set_cycle_finalized(zalsa, head.database_key_index.key_index()); + } + + *completed_query.revisions.verified_final.get_mut() = true; + + break (new_value, completed_query); + } + + completed_query.revisions.set_cycle_heads(cycle_heads); + + // `iteration_count` can't overflow as we check it against `MAX_ITERATIONS` + // which is less than `u32::MAX`. + iteration_count = iteration_count.increment().unwrap_or_else(|| { + tracing::warn!("{database_key_index:?}: execute: too many cycle iterations"); + panic!("{database_key_index:?}: execute: too many cycle iterations") + }); + + zalsa.event(&|| { + Event::new(EventKind::WillIterateCycle { + database_key: database_key_index, + iteration_count, + }) + }); + + crate::tracing::info!( + "{database_key_index:?}: execute: iterate again ({iteration_count:?})...", + ); + + completed_query + .revisions + .update_iteration_count_mut(database_key_index, iteration_count); + + for head in completed_query.revisions.cycle_heads() { + if head.database_key_index == database_key_index { continue; } - crate::tracing::debug!( - "{database_key_index:?}: execute: fixpoint iteration has a final value" + + let ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + // let iteration_count = if was_initial && !head.iteration_count.load().is_initial() { + // IterationCount::first_after_restart() + // } else { + // iteration_count + // }; + + ingredient.set_cycle_iteration_count( + zalsa, + head.database_key_index.key_index(), + iteration_count, ); - cycle_heads.remove(&database_key_index); - - if cycle_heads.is_empty() { - // If there are no more cycle heads, we can mark this as verified. - completed_query - .revisions - .verified_final - .store(true, Ordering::Relaxed); - } } - crate::tracing::debug!( - "{database_key_index:?}: execute: result.revisions = {revisions:#?}", - revisions = &completed_query.revisions + let new_memo = self.insert_memo( + zalsa, + id, + Memo::new( + Some(new_value), + zalsa.current_revision(), + completed_query.revisions, + ), + memo_ingredient_index, ); - break (new_value, completed_query); - } + previous_memo = Some(new_memo); + + last_stale_tracked_ids = completed_query.stale_tracked_structs; + active_query = zalsa_local.push_query(database_key_index, iteration_count); + + continue; + }; + + crate::tracing::debug!( + "{database_key_index:?}: execute_maybe_iterate: result.revisions = {revisions:#?}", + revisions = &completed_query.revisions + ); + + (new_value, completed_query) } #[inline] diff --git a/src/function/fetch.rs b/src/function/fetch.rs index a1b6658f6..6f7be1397 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -232,7 +232,7 @@ where } } - let memo = self.execute(db, zalsa, zalsa_local, database_key_index, opt_old_memo); + let memo = self.execute(db,zalsa, zalsa_local, database_key_index, opt_old_memo); Some(memo) } @@ -257,6 +257,23 @@ where let can_shallow_update = self.shallow_verify_memo(zalsa, database_key_index, memo); if can_shallow_update.yes() { self.update_shallow(zalsa, database_key_index, memo, can_shallow_update); + + if C::CYCLE_STRATEGY == CycleRecoveryStrategy::Fixpoint { + // This feels strange. I feel like we need to preserve the cycle heads. Let's say a cycle head only sometimes participates in the cycle. + // This doesn't mean that the value becomes final because of it. The query might as well be cyclic in the next iteration but + // we then never re-executed that query because it was marked as `verified_final`. + memo.revisions + .cycle_heads() + .clear_except(database_key_index); + memo.revisions.reset_nested_cycle(); + } + + crate::tracing::debug!( + "hit cycle at {database_key_index:#?}, \ + returning last provisional value: {:#?}", + memo.revisions + ); + // SAFETY: memo is present in memo_map. return unsafe { self.extend_memo_lifetime(memo) }; } diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 4f69655cd..0c0de1e04 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -3,7 +3,7 @@ use rustc_hash::FxHashMap; #[cfg(feature = "accumulator")] use crate::accumulator::accumulated_map::InputAccumulatedValues; use crate::cycle::{CycleRecoveryStrategy, ProvisionalStatus}; -use crate::function::memo::Memo; +use crate::function::memo::{Memo, TryClaimCycleHeadsIter, TryClaimHeadsResult}; use crate::function::sync::ClaimResult; use crate::function::{Configuration, IngredientImpl}; @@ -400,6 +400,7 @@ where ProvisionalStatus::Final { iteration, verified_at, + nested: _, } => { // Only consider the cycle head if it is from the same revision as the memo if verified_at != memo_verified_at { @@ -413,7 +414,7 @@ where // // If we don't account for the iteration, then `a` (from iteration 0) will be finalized // because its cycle head `b` is now finalized, but `b` never pulled `a` in the last iteration. - if iteration != cycle_head.iteration_count { + if iteration != cycle_head.iteration_count.load() { return false; } @@ -471,70 +472,111 @@ where return false; } - // SAFETY: We do not access the query stack reentrantly. - unsafe { - zalsa_local.with_query_stack_unchecked(|stack| { - cycle_heads.iter().all(|cycle_head| { - stack - .iter() - .rev() - .find(|query| query.database_key_index == cycle_head.database_key_index) - .map(|query| query.iteration_count()) - .or_else(|| { - // If the cycle head isn't on our stack because: - // - // * another thread holds the lock on the cycle head (but it waits for the current query to complete) - // * we're in `maybe_changed_after` because `maybe_changed_after` doesn't modify the cycle stack - // - // check if the latest memo has the same iteration count. - - // However, we've to be careful to skip over fixpoint initial values: - // If the head is the memo we're trying to validate, always return `None` - // to force a re-execution of the query. This is necessary because the query - // has obviously not completed its iteration yet. - // - // This should be rare but the `cycle_panic` test fails on some platforms (mainly GitHub actions) - // without this check. What happens there is that: - // - // * query a blocks on query b - // * query b tries to claim a, fails to do so and inserts the fixpoint initial value - // * query b completes and has `a` as head. It returns its query result Salsa blocks query b from - // exiting inside `block_on` (or the thread would complete before the cycle iteration is complete) - // * query a resumes but panics because of the fixpoint iteration function - // * query b resumes. It rexecutes its own query which then tries to fetch a (which depends on itself because it's a fixpoint initial value). - // Without this check, `validate_same_iteration` would return `true` because the latest memo for `a` is the fixpoint initial value. - // But it should return `false` so that query b's thread re-executes `a` (which then also causes the panic). - // - // That's why we always return `None` if the cycle head is the same as the current database key index. - if cycle_head.database_key_index == database_key_index { - return None; - } - - let ingredient = zalsa.lookup_ingredient( - cycle_head.database_key_index.ingredient_index(), - ); - let wait_result = ingredient - .wait_for(zalsa, cycle_head.database_key_index.key_index()); + let mut cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); - if !wait_result.is_cycle() { - return None; - } + while let Some(cycle_head) = cycle_heads_iter.next() { + match cycle_head { + TryClaimHeadsResult::Cycle { + head_iteration_count, + current_iteration_count, + verified_at: head_verified_at, + } => { + if head_verified_at != verified_at { + return false; + } - let provisional_status = ingredient.provisional_status( - zalsa, - cycle_head.database_key_index.key_index(), - )?; + if head_iteration_count != current_iteration_count { + return false; + } + } + TryClaimHeadsResult::Available(available_cycle_head) => { + // Check the cycle heads recursively + if available_cycle_head.is_nested(zalsa) { + available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); + } else { + return false; + } + } + TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { + return false; + } + } - if provisional_status.verified_at() == Some(verified_at) { - provisional_status.iteration() - } else { - None - } - }) - == Some(cycle_head.iteration_count) - }) - }) + // // If the cycle head isn't on our stack because: + // // + // // * another thread holds the lock on the cycle head (but it waits for the current query to complete) + // // * we're in `maybe_changed_after` because `maybe_changed_after` doesn't modify the cycle stack + // // + // // check if the latest memo has the same iteration count. + + // // However, we've to be careful to skip over fixpoint initial values: + // // If the head is the memo we're trying to validate, always return `None` + // // to force a re-execution of the query. This is necessary because the query + // // has obviously not completed its iteration yet. + // // + // // This should be rare but the `cycle_panic` test fails on some platforms (mainly GitHub actions) + // // without this check. What happens there is that: + // // + // // * query a blocks on query b + // // * query b tries to claim a, fails to do so and inserts the fixpoint initial value + // // * query b completes and has `a` as head. It returns its query result Salsa blocks query b from + // // exiting inside `block_on` (or the thread would complete before the cycle iteration is complete) + // // * query a resumes but panics because of the fixpoint iteration function + // // * query b resumes. It rexecutes its own query which then tries to fetch a (which depends on itself because it's a fixpoint initial value). + // // Without this check, `validate_same_iteration` would return `true` because the latest memo for `a` is the fixpoint initial value. + // // But it should return `false` so that query b's thread re-executes `a` (which then also causes the panic). + // // + // // That's why we always return `None` if the cycle head is the same as the current database key index. + // if cycle_head.database_key_index == database_key_index { + // return false; + // } + + // let wait_result = ingredient.wait_for(zalsa, cycle_head.database_key_index.key_index()); + + // let provisional_status = match wait_result { + // WaitForResult::Running(_) => { + // // This Memo is guaranteed to be outdated because another thread + // // is computing a new value right now + // return None; + // } + // WaitForResult::Available(_claim_guard) => { + // // Nested cycles are released as soon as their query completes + // // and the outer queries are part of their `cycle_heads`. + + // let provisional_status = ingredient + // .provisional_status(zalsa, cycle_head.database_key_index.key_index())?; + + // if !provisional_status.nested() { + // return None; + // } + + // let cycle_heads = + // ingredient.cycle_heads(zalsa, cycle_head.database_key_index.key_index()); + + // // This doesn't work, unless we need the same check in blocks-on etc. + // if !cycle_heads.contains(&database_key_index) { + // return None; + // } + + // provisional_status + // } + // WaitForResult::Cycle => { + // // The head is hold by the current thread or another thread waiting on the + // // result of this thread. + // ingredient + // .provisional_status(zalsa, cycle_head.database_key_index.key_index())? + // } + // }; + + // if provisional_status.verified_at() == Some(verified_at) { + // provisional_status.iteration() + // } else { + // None + // } + // } + + true } /// VerifyResult::Unchanged if the memo's value and `changed_at` time is up-to-date in the @@ -553,6 +595,12 @@ where cycle_heads: &mut VerifyCycleHeads, can_shallow_update: ShallowUpdate, ) -> VerifyResult { + // If the value is from the same revision but is still provisional, consider it changed + // because we're now in a new iteration. + if can_shallow_update == ShallowUpdate::Verified && old_memo.may_be_provisional() { + return VerifyResult::changed(); + } + crate::tracing::debug!( "{database_key_index:?}: deep_verify_memo(old_memo = {old_memo:#?})", old_memo = old_memo.tracing_debug() @@ -562,12 +610,6 @@ where match old_memo.revisions.origin.as_ref() { QueryOriginRef::Derived(edges) => { - // If the value is from the same revision but is still provisional, consider it changed - // because we're now in a new iteration. - if can_shallow_update == ShallowUpdate::Verified && old_memo.may_be_provisional() { - return VerifyResult::changed(); - } - #[cfg(feature = "accumulator")] let mut inputs = InputAccumulatedValues::Empty; let mut child_cycle_heads = Vec::new(); diff --git a/src/function/memo.rs b/src/function/memo.rs index 793f4832a..3e2c10ba5 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -3,9 +3,10 @@ use std::fmt::{Debug, Formatter}; use std::mem::transmute; use std::ptr::NonNull; -use crate::cycle::{empty_cycle_heads, CycleHead, CycleHeads, IterationCount, ProvisionalStatus}; -use crate::function::{Configuration, IngredientImpl}; -use crate::hash::FxHashSet; +use smallvec::SmallVec; + +use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount, ProvisionalStatus}; +use crate::function::{ClaimGuard, Configuration, IngredientImpl}; use crate::ingredient::{Ingredient, WaitForResult}; use crate::key::DatabaseKeyIndex; use crate::revision::AtomicRevision; @@ -176,29 +177,38 @@ impl<'db, C: Configuration> Memo<'db, C> { // IMPORTANT: If you make changes to this function, make sure to run `cycle_nested_deep` with // shuttle with at least 10k iterations. - // The most common case is that the entire cycle is running in the same thread. - // If that's the case, short circuit and return `true` immediately. - if self.all_cycles_on_stack(zalsa_local) { + let cycle_heads = self.revisions.cycle_heads(); + if cycle_heads.is_empty() { return true; } - // Otherwise, await all cycle heads, recursively. - return block_on_heads_cold(zalsa, self.cycle_heads()); + return block_on_heads_cold(zalsa, zalsa_local, self.cycle_heads()); #[inline(never)] - fn block_on_heads_cold(zalsa: &Zalsa, heads: &CycleHeads) -> bool { + fn block_on_heads_cold( + zalsa: &Zalsa, + zalsa_local: &ZalsaLocal, + heads: &CycleHeads, + ) -> bool { let _entered = crate::tracing::debug_span!("block_on_heads").entered(); - let mut cycle_heads = TryClaimCycleHeadsIter::new(zalsa, heads); + let mut cycle_heads = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, heads); let mut all_cycles = true; while let Some(claim_result) = cycle_heads.next() { match claim_result { - TryClaimHeadsResult::Cycle => {} + TryClaimHeadsResult::Cycle { .. } => {} TryClaimHeadsResult::Finalized => { all_cycles = false; } - TryClaimHeadsResult::Available => { - all_cycles = false; + TryClaimHeadsResult::Available(available) => { + if available.is_nested(zalsa) { + // This is a nested cycle. The lock of nested cycles is released + // when there query completes. But we need to recurse + // TODO: What about cycle initial values. Do we need to reset nested? + available.queue_cycle_heads(&mut cycle_heads); + } else { + all_cycles = false; + } } TryClaimHeadsResult::Running(running) => { all_cycles = false; @@ -217,17 +227,23 @@ impl<'db, C: Configuration> Memo<'db, C> { /// claiming all cycle heads failed because one of them is running on another thread. pub(super) fn try_claim_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool { let _entered = crate::tracing::debug_span!("try_claim_heads").entered(); - if self.all_cycles_on_stack(zalsa_local) { + + let cycle_heads = self.revisions.cycle_heads(); + if cycle_heads.is_empty() { return true; } - let cycle_heads = TryClaimCycleHeadsIter::new(zalsa, self.revisions.cycle_heads()); + let mut cycle_heads = + TryClaimCycleHeadsIter::new(zalsa, zalsa_local, self.revisions.cycle_heads()); - for claim_result in cycle_heads { + while let Some(claim_result) = cycle_heads.next() { match claim_result { - TryClaimHeadsResult::Cycle - | TryClaimHeadsResult::Finalized - | TryClaimHeadsResult::Available => {} + TryClaimHeadsResult::Cycle { .. } | TryClaimHeadsResult::Finalized => {} + TryClaimHeadsResult::Available(available) => { + if available.is_nested(zalsa) { + available.queue_cycle_heads(&mut cycle_heads); + } + } TryClaimHeadsResult::Running(_) => { return false; } @@ -237,25 +253,6 @@ impl<'db, C: Configuration> Memo<'db, C> { true } - fn all_cycles_on_stack(&self, zalsa_local: &ZalsaLocal) -> bool { - let cycle_heads = self.revisions.cycle_heads(); - if cycle_heads.is_empty() { - return true; - } - - // SAFETY: We do not access the query stack reentrantly. - unsafe { - zalsa_local.with_query_stack_unchecked(|stack| { - cycle_heads.iter().all(|cycle_head| { - stack - .iter() - .rev() - .any(|query| query.database_key_index == cycle_head.database_key_index) - }) - }) - } - } - /// Cycle heads that should be propagated to dependent queries. #[inline(always)] pub(super) fn cycle_heads(&self) -> &CycleHeads { @@ -266,6 +263,53 @@ impl<'db, C: Configuration> Memo<'db, C> { } } + // pub(super) fn root_cycle_heads( + // &self, + // zalsa: &Zalsa, + // database_key_index: DatabaseKeyIndex, + // ) -> impl Iterator { + // let mut queue: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]> = self + // .cycle_heads() + // .iter() + // .filter(|head| head.database_key_index != database_key_index) + // .map(|head| (head.database_key_index, head.iteration_count.load())) + // .collect(); + + // let mut visited: FxHashSet<_> = queue.iter().copied().collect(); + // let mut roots: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]> = SmallVec::new(); + + // while let Some((next_key, next_iteration_count)) = queue.pop() { + // let ingredient = zalsa.lookup_ingredient(next_key.ingredient_index()); + // let nested = match ingredient.provisional_status(zalsa, next_key.key_index()) { + // Some( + // ProvisionalStatus::Final { nested, .. } + // | ProvisionalStatus::Provisional { nested, .. }, + // ) => nested, + // None | Some(ProvisionalStatus::FallbackImmediate) => false, + // }; + + // if nested { + // // If this is a nested cycle head, keep following its cycle heads until we find a root. + // queue.extend( + // ingredient + // .cycle_heads(zalsa, next_key.key_index()) + // // TODO: Do we need to include the removed heads here? + // // I think so + // .iter() + // .filter_map(|head| { + // let entry = (head.database_key_index, head.iteration_count.load()); + // visited.insert(entry).then_some(entry) + // }), + // ); + // continue; + // } + + // roots.push((next_key, next_iteration_count)); + // } + + // roots.into_iter() + // } + /// Mark memo as having been verified in the `revision_now`, which should /// be the current revision. /// The caller is responsible to update the memo's `accumulated` state if their accumulated @@ -474,13 +518,17 @@ mod persistence { pub(super) enum TryClaimHeadsResult<'me> { /// Claiming every cycle head results in a cycle head. - Cycle, + Cycle { + head_iteration_count: IterationCount, + current_iteration_count: IterationCount, + verified_at: Revision, + }, /// The cycle head has been finalized. Finalized, /// The cycle head is not finalized, but it can be claimed. - Available, + Available(AvailableCycleHead<'me>), /// The cycle head is currently executed on another thread. Running(RunningCycleHead<'me>), @@ -493,33 +541,67 @@ pub(super) struct RunningCycleHead<'me> { impl<'a> RunningCycleHead<'a> { fn block_on(self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { - let key_index = self.inner.database_key().key_index(); + let database_key_index = self.inner.database_key(); + let key_index = database_key_index.key_index(); self.inner.block_on(cycle_heads.zalsa); - cycle_heads.queue_ingredient_heads(self.ingredient, key_index); + let nested_heads = self.ingredient.cycle_heads(cycle_heads.zalsa, key_index); + + cycle_heads.queue_ingredient_heads(nested_heads); + } +} + +pub(super) struct AvailableCycleHead<'me> { + database_key_index: DatabaseKeyIndex, + _guard: ClaimGuard<'me>, + ingredient: &'me dyn Ingredient, +} + +impl<'a> AvailableCycleHead<'a> { + pub(super) fn is_nested(&self, zalsa: &Zalsa) -> bool { + self.ingredient + .provisional_status(zalsa, self.database_key_index.key_index()) + .is_some_and(|status| status.nested()) + } + + pub(super) fn queue_cycle_heads(&self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { + let nested_heads = self + .ingredient + .cycle_heads(cycle_heads.zalsa, self.database_key_index.key_index()); + + cycle_heads.queue_ingredient_heads(nested_heads); } } /// Iterator to try claiming the transitive cycle heads of a memo. -struct TryClaimCycleHeadsIter<'a> { +pub(super) struct TryClaimCycleHeadsIter<'a> { zalsa: &'a Zalsa, - queue: Vec, - queued: FxHashSet, + zalsa_local: &'a ZalsaLocal, + queue: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]>, + queued: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]>, } impl<'a> TryClaimCycleHeadsIter<'a> { - fn new(zalsa: &'a Zalsa, heads: &CycleHeads) -> Self { - let queue: Vec<_> = heads.iter().copied().collect(); - let queued: FxHashSet<_> = queue.iter().copied().collect(); + pub(super) fn new( + zalsa: &'a Zalsa, + zalsa_local: &'a ZalsaLocal, + cycle_heads: &CycleHeads, + ) -> Self { + let queue: SmallVec<_> = cycle_heads + .iter() + .map(|head| (head.database_key_index, head.iteration_count.load())) + .collect(); + let queued = queue.iter().copied().collect(); Self { zalsa, + zalsa_local, queue, queued, } } - fn queue_ingredient_heads(&mut self, ingredient: &dyn Ingredient, key: Id) { + fn queue_ingredient_heads(&mut self, cycle_heads: &CycleHeads) { // Recursively wait for all cycle heads that this head depends on. It's important // that we fetch those from the updated memo because the cycle heads can change // between iterations and new cycle heads can be added if a query depeonds on @@ -528,11 +610,19 @@ impl<'a> TryClaimCycleHeadsIter<'a> { // IMPORTANT: It's critical that we get the cycle head from the latest memo // here, in case the memo has become part of another cycle (we need to block on that too!). self.queue.extend( - ingredient - .cycle_heads(self.zalsa, key) + cycle_heads .iter() - .copied() - .filter(|head| self.queued.insert(*head)), + .map(|head| (head.database_key_index, head.iteration_count.load())) + .filter(|head| { + let already_checked = self.queued.contains(head); + + if already_checked { + false + } else { + self.queued.push(*head); + true + } + }), ) } } @@ -541,9 +631,29 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { type Item = TryClaimHeadsResult<'me>; fn next(&mut self) -> Option { - let head = self.queue.pop()?; + let (head_database_key, head_iteration_count) = self.queue.pop()?; + + // The most common case is that the head is already in the query stack. So let's check that first. + // SAFETY: We do not access the query stack reentrantly. + if let Some(current_iteration_count) = unsafe { + self.zalsa_local.with_query_stack_unchecked(|stack| { + stack + .iter() + .rev() + .find(|query| query.database_key_index == head_database_key) + .map(|query| query.iteration_count()) + }) + } { + crate::tracing::debug!( + "Waiting for {head_database_key:?} results in a cycle (because it is already in the query stack)" + ); + return Some(TryClaimHeadsResult::Cycle { + head_iteration_count, + current_iteration_count, + verified_at: self.zalsa.current_revision(), + }); + } - let head_database_key = head.database_key_index; let head_key_index = head_database_key.key_index(); let ingredient = self .zalsa @@ -554,34 +664,54 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { .unwrap_or(ProvisionalStatus::Provisional { iteration: IterationCount::initial(), verified_at: Revision::start(), + nested: false, }); match cycle_head_kind { ProvisionalStatus::Final { .. } | ProvisionalStatus::FallbackImmediate => { // This cycle is already finalized, so we don't need to wait on it; // keep looping through cycle heads. - crate::tracing::trace!("Dependent cycle head {head:?} has been finalized."); + crate::tracing::trace!( + "Dependent cycle head {head_database_key:?} has been finalized." + ); Some(TryClaimHeadsResult::Finalized) } - ProvisionalStatus::Provisional { .. } => { + ProvisionalStatus::Provisional { + iteration, + verified_at, + .. + } => { match ingredient.wait_for(self.zalsa, head_key_index) { WaitForResult::Cycle { .. } => { // We hit a cycle blocking on the cycle head; this means this query actively // participates in the cycle and some other query is blocked on this thread. - crate::tracing::debug!("Waiting for {head:?} results in a cycle"); - Some(TryClaimHeadsResult::Cycle) + crate::tracing::debug!( + "Waiting for {head_database_key:?} results in a cycle" + ); + Some(TryClaimHeadsResult::Cycle { + current_iteration_count: iteration, + head_iteration_count, + verified_at, + }) } WaitForResult::Running(running) => { - crate::tracing::debug!("Ingredient {head:?} is running: {running:?}"); + crate::tracing::debug!( + "Ingredient {head_database_key:?} is running: {running:?}" + ); Some(TryClaimHeadsResult::Running(RunningCycleHead { inner: running, ingredient, })) } - WaitForResult::Available => { - self.queue_ingredient_heads(ingredient, head_key_index); - Some(TryClaimHeadsResult::Available) + WaitForResult::Available(guard) => { + crate::tracing::debug!("Query {head_database_key:?} is available",); + + Some(TryClaimHeadsResult::Available(AvailableCycleHead { + _guard: guard, + ingredient, + database_key_index: head_database_key, + })) } } } diff --git a/src/function/sync.rs b/src/function/sync.rs index 0a88844af..38b44d6e4 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -83,7 +83,7 @@ impl SyncTable { /// Marks an active 'claim' in the synchronization map. The claim is /// released when this value is dropped. #[must_use] -pub(crate) struct ClaimGuard<'me> { +pub struct ClaimGuard<'me> { key_index: Id, zalsa: &'me Zalsa, sync_table: &'me SyncTable, diff --git a/src/ingredient.rs b/src/ingredient.rs index 3cf36ae61..8c9ae2ca6 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -1,9 +1,9 @@ use std::any::{Any, TypeId}; use std::fmt; -use crate::cycle::{empty_cycle_heads, CycleHeads, CycleRecoveryStrategy, ProvisionalStatus}; +use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount, ProvisionalStatus}; use crate::database::RawDatabase; -use crate::function::{VerifyCycleHeads, VerifyResult}; +use crate::function::{ClaimGuard, VerifyCycleHeads, VerifyResult}; use crate::hash::{FxHashSet, FxIndexSet}; use crate::runtime::Running; use crate::sync::Arc; @@ -93,9 +93,10 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { /// on an other thread, it's up to caller to block until the result becomes available if desired. /// A return value of [`WaitForResult::Cycle`] means that a cycle was encountered; the waited-on query is either already claimed /// by the current thread, or by a thread waiting on the current thread. - fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { - _ = (zalsa, key_index); - WaitForResult::Available + fn wait_for<'me>(&'me self, _zalsa: &'me Zalsa, _key_index: Id) -> WaitForResult<'me> { + unreachable!( + "wait_for should only be called on cycle heads and only functions can be cycle heads" + ); } /// Invoked when the value `output_key` should be marked as valid in the current revision. @@ -157,11 +158,21 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { } // Function ingredient methods - /// If this ingredient is a participant in a cycle, what is its cycle recovery strategy? - /// (Really only relevant to [`crate::function::FunctionIngredient`], - /// since only function ingredients push themselves onto the active query stack.) - fn cycle_recovery_strategy(&self) -> CycleRecoveryStrategy { - unreachable!("only function ingredients can be part of a cycle") + fn cycle_converged(&self, _zalsa: &Zalsa, _input: Id) -> bool { + unreachable!("cycle_converged should only be called on cycle heads and only functions can be cycle heads"); + } + + fn set_cycle_iteration_count( + &self, + _zalsa: &Zalsa, + _input: Id, + _iteration_count: IterationCount, + ) { + unreachable!("increment_iteration_count should only be called on cycle heads and only functions can be cycle heads"); + } + + fn set_cycle_finalized(&self, _zalsa: &Zalsa, _input: Id) { + unreachable!("finalize_cycle_head should only be called on cycle heads and only functions can be cycle heads"); } /// What were the inputs (if any) that were used to create the value at `key_index`. @@ -304,7 +315,7 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) pub enum WaitForResult<'me> { Running(Running<'me>), - Available, + Available(ClaimGuard<'me>), Cycle, } @@ -312,4 +323,8 @@ impl WaitForResult<'_> { pub const fn is_cycle(&self) -> bool { matches!(self, WaitForResult::Cycle) } + + pub const fn is_running(&self) -> bool { + matches!(self, WaitForResult::Running(_)) + } } diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index e332b516f..93bd2a337 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -11,11 +11,11 @@ use crate::accumulator::{ Accumulator, }; use crate::active_query::{CompletedQuery, QueryStack}; -use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount}; +use crate::cycle::{empty_cycle_heads, AtomicIterationCount, CycleHeads, IterationCount}; use crate::durability::Durability; use crate::key::DatabaseKeyIndex; use crate::runtime::Stamp; -use crate::sync::atomic::AtomicBool; +use crate::sync::atomic::{AtomicBool, Ordering}; use crate::table::{PageIndex, Slot, Table}; use crate::tracked_struct::{Disambiguator, Identity, IdentityHash}; use crate::zalsa::{IngredientIndex, Zalsa}; @@ -494,6 +494,7 @@ impl QueryRevisionsExtra { mut tracked_struct_ids: ThinVec<(Identity, Id)>, cycle_heads: CycleHeads, iteration: IterationCount, + converged: bool, ) -> Self { #[cfg(feature = "accumulator")] let acc = accumulated.is_empty(); @@ -513,7 +514,9 @@ impl QueryRevisionsExtra { accumulated, cycle_heads, tracked_struct_ids, - iteration, + iteration: iteration.into(), + nested_cycle: false.into(), + cycle_converged: converged, })) }; @@ -561,7 +564,12 @@ struct QueryRevisionsExtraInner { /// iterate again. cycle_heads: CycleHeads, - iteration: IterationCount, + iteration: AtomicIterationCount, + + cycle_converged: bool, + + #[cfg_attr(feature = "persistence", serde(with = "crate::zalsa_local::persistence::atomic_bool"))] + nested_cycle: AtomicBool, } impl QueryRevisionsExtraInner { @@ -573,6 +581,8 @@ impl QueryRevisionsExtraInner { tracked_struct_ids, cycle_heads, iteration: _, + cycle_converged: _, + nested_cycle: _, } = self; #[cfg(feature = "accumulator")] @@ -607,6 +617,7 @@ impl QueryRevisions { ThinVec::default(), CycleHeads::initial(query), IterationCount::initial(), + false, ), } } @@ -649,22 +660,80 @@ impl QueryRevisions { ThinVec::default(), cycle_heads, IterationCount::default(), + false, ); } }; } - pub(crate) const fn iteration(&self) -> IterationCount { + pub(crate) fn cycle_converged(&self) -> bool { + match &self.extra.0 { + Some(extra) => extra.cycle_converged, + None => false, + } + } + + pub(crate) fn set_cycle_converged(&mut self, cycle_converged: bool) { + if let Some(extra) = &mut self.extra.0 { + extra.cycle_converged = cycle_converged + } + } + + pub(crate) fn is_nested_cycle(&self) -> bool { + match &self.extra.0 { + Some(extra) => extra.nested_cycle.load(Ordering::Relaxed), + None => false, + } + } + + pub(crate) fn reset_nested_cycle(&self) { + if let Some(extra) = &self.extra.0 { + extra.nested_cycle.store(false, Ordering::Release) + } + } + + pub(crate) fn mark_nested_cycle(&mut self) { + if let Some(extra) = &mut self.extra.0 { + *extra.nested_cycle.get_mut() = true + } + } + + pub(crate) fn iteration(&self) -> IterationCount { match &self.extra.0 { - Some(extra) => extra.iteration, + Some(extra) => extra.iteration.load(), None => IterationCount::initial(), } } + pub(crate) fn set_iteration_count( + &self, + database_key_index: DatabaseKeyIndex, + iteration_count: IterationCount, + ) { + let Some(extra) = &self.extra.0 else { + return; + }; + + extra.iteration.store(iteration_count); + + extra + .cycle_heads + .update_iteration_count(database_key_index, iteration_count); + } + /// Updates the iteration count if this query has any cycle heads. Otherwise it's a no-op. - pub(crate) fn update_iteration_count(&mut self, iteration_count: IterationCount) { + pub(crate) fn update_iteration_count_mut( + &mut self, + cycle_head_index: DatabaseKeyIndex, + iteration_count: IterationCount, + ) { if let Some(extra) = &mut self.extra.0 { - extra.iteration = iteration_count + extra.iteration.store_mut(iteration_count); + // I think updating is required for `validate_same_iteration` to work because + // unless we can skip self? + extra + .cycle_heads + .update_iteration_count_mut(cycle_head_index, iteration_count); } } @@ -1196,7 +1265,7 @@ pub(crate) mod persistence { } } } - + // A workaround the fact that `shuttle` atomic types do not implement `serde::{Serialize, Deserialize}`. pub(super) mod verified_final { use crate::sync::atomic::{AtomicBool, Ordering}; @@ -1215,4 +1284,23 @@ pub(crate) mod persistence { serde::Deserialize::deserialize(deserializer).map(AtomicBool::new) } } + + pub(super) mod atomic_bool { + use crate::sync::atomic::{AtomicBool, Ordering}; + + pub fn serialize(value: &AtomicBool, serializer: S) -> Result + where + S: serde::Serializer, + { + serde::Serialize::serialize(&value.load(Ordering::Relaxed), serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + serde::Deserialize::deserialize(deserializer).map(AtomicBool::new) + } + } + } diff --git a/tests/backtrace.rs b/tests/backtrace.rs index 74124c1ab..8aab2c058 100644 --- a/tests/backtrace.rs +++ b/tests/backtrace.rs @@ -108,7 +108,7 @@ fn backtrace_works() { at tests/backtrace.rs:32 1: query_cycle(Id(2)) at tests/backtrace.rs:45 - cycle heads: query_cycle(Id(2)) -> IterationCount(0) + cycle heads: query_cycle(Id(2)) -> iteration=0 2: query_f(Id(2)) at tests/backtrace.rs:40 "#]] @@ -119,9 +119,9 @@ fn backtrace_works() { query stacktrace: 0: query_e(Id(3)) -> (R1, Durability::LOW) at tests/backtrace.rs:32 - 1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = IterationCount(0)) + 1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = iteration=0) at tests/backtrace.rs:45 - cycle heads: query_cycle(Id(3)) -> IterationCount(0) + cycle heads: query_cycle(Id(3)) -> iteration=0 2: query_f(Id(3)) -> (R1, Durability::HIGH) at tests/backtrace.rs:40 "#]] diff --git a/tests/cycle.rs b/tests/cycle.rs index 7a7e26a07..5a6a25565 100644 --- a/tests/cycle.rs +++ b/tests/cycle.rs @@ -95,18 +95,22 @@ impl Input { } } + #[track_caller] fn assert(&self, db: &dyn Db, expected: Value) { assert_eq!(self.eval(db), expected) } + #[track_caller] fn assert_value(&self, db: &dyn Db, expected: u8) { self.assert(db, Value::N(expected)) } + #[track_caller] fn assert_bounds(&self, db: &dyn Db) { self.assert(db, Value::OutOfBounds) } + #[track_caller] fn assert_count(&self, db: &dyn Db) { self.assert(db, Value::TooManyIterations) } @@ -226,6 +230,7 @@ fn value(num: u8) -> Input { #[test] #[should_panic(expected = "dependency graph cycle")] fn self_panic() { + // TODO: This test takes very long to run? let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); @@ -893,7 +898,7 @@ fn cycle_unchanged() { /// /// If nothing in a nested cycle changed in the new revision, no part of the cycle should /// re-execute. -#[test] +#[test_log::test] fn cycle_unchanged_nested() { let mut db = ExecuteValidateLoggerDatabase::default(); let a_in = Inputs::new(&db, vec![]); @@ -978,7 +983,7 @@ fn cycle_unchanged_nested_intertwined() { e.assert_value(&db, 60); } - db.assert_logs_len(15 + i); + db.assert_logs_len(13 + i); // next revision, we change only A, which is not part of the cycle and the cycle does not // depend on. diff --git a/tests/parallel/cycle_nested_deep.rs b/tests/parallel/cycle_nested_deep.rs index 7b7c2f42a..f2b355616 100644 --- a/tests/parallel/cycle_nested_deep.rs +++ b/tests/parallel/cycle_nested_deep.rs @@ -63,6 +63,7 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue { #[test_log::test] fn the_test() { crate::sync::check(|| { + tracing::debug!("Starting new run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let db_t3 = db_t1.clone(); diff --git a/tests/parallel/cycle_nested_deep_conditional.rs b/tests/parallel/cycle_nested_deep_conditional.rs index 316612845..4eff75189 100644 --- a/tests/parallel/cycle_nested_deep_conditional.rs +++ b/tests/parallel/cycle_nested_deep_conditional.rs @@ -72,7 +72,7 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue { #[test_log::test] fn the_test() { crate::sync::check(|| { - tracing::debug!("New run"); + tracing::debug!("Starting new run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let db_t3 = db_t1.clone(); diff --git a/tests/parallel/cycle_nested_deep_conditional_changed.rs b/tests/parallel/cycle_nested_deep_conditional_changed.rs index 7c96d808d..f827cda0b 100644 --- a/tests/parallel/cycle_nested_deep_conditional_changed.rs +++ b/tests/parallel/cycle_nested_deep_conditional_changed.rs @@ -81,7 +81,7 @@ fn the_test() { use crate::sync; use salsa::Setter as _; sync::check(|| { - tracing::debug!("New run"); + tracing::debug!("Starting new run"); // This is a bit silly but it works around https://github.com/awslabs/shuttle/issues/192 static INITIALIZE: sync::Mutex> = @@ -108,6 +108,7 @@ fn the_test() { } let t1 = thread::spawn(move || { + let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_a(db, input); }); @@ -117,6 +118,7 @@ fn the_test() { query_a(&db, input) }); let t2 = thread::spawn(move || { + let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_b(db, input); }); @@ -125,19 +127,20 @@ fn the_test() { query_b(&db, input) }); let t3 = thread::spawn(move || { + let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_d(db, input); }); - let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); query_d(&db, input) }); let t4 = thread::spawn(move || { + let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); + let (db, input) = get_db(|db, input| { query_e(db, input); }); - let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered(); query_e(&db, input) }); From 702fc109de42ce33e943db14d7ce9de716c75e7c Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Tue, 30 Sep 2025 13:03:53 +0200 Subject: [PATCH 02/45] Remove inline from `validate_same_iteration` --- src/function/maybe_changed_after.rs | 150 ++++++++-------------------- 1 file changed, 44 insertions(+), 106 deletions(-) diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 0c0de1e04..8e34f1160 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -2,7 +2,7 @@ use rustc_hash::FxHashMap; #[cfg(feature = "accumulator")] use crate::accumulator::accumulated_map::InputAccumulatedValues; -use crate::cycle::{CycleRecoveryStrategy, ProvisionalStatus}; +use crate::cycle::{CycleHeads, CycleRecoveryStrategy, ProvisionalStatus}; use crate::function::memo::{Memo, TryClaimCycleHeadsIter, TryClaimHeadsResult}; use crate::function::sync::ClaimResult; use crate::function::{Configuration, IngredientImpl}; @@ -453,6 +453,48 @@ where database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, ) -> bool { + #[cold] + #[inline(never)] + fn validate_same_iteration_cold( + zalsa: &Zalsa, + zalsa_local: &ZalsaLocal, + cycle_heads: &CycleHeads, + verified_at: Revision, + ) -> bool { + let mut cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); + + while let Some(cycle_head) = cycle_heads_iter.next() { + match cycle_head { + TryClaimHeadsResult::Cycle { + head_iteration_count, + current_iteration_count, + verified_at: head_verified_at, + } => { + if head_verified_at != verified_at { + return false; + } + + if head_iteration_count != current_iteration_count { + return false; + } + } + TryClaimHeadsResult::Available(available_cycle_head) => { + // Check the cycle heads recursively + if available_cycle_head.is_nested(zalsa) { + available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); + } else { + return false; + } + } + TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { + return false; + } + } + } + + true + } + crate::tracing::trace!( "{database_key_index:?}: validate_same_iteration(memo = {memo:#?})", memo = memo.tracing_debug() @@ -472,111 +514,7 @@ where return false; } - let mut cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); - - while let Some(cycle_head) = cycle_heads_iter.next() { - match cycle_head { - TryClaimHeadsResult::Cycle { - head_iteration_count, - current_iteration_count, - verified_at: head_verified_at, - } => { - if head_verified_at != verified_at { - return false; - } - - if head_iteration_count != current_iteration_count { - return false; - } - } - TryClaimHeadsResult::Available(available_cycle_head) => { - // Check the cycle heads recursively - if available_cycle_head.is_nested(zalsa) { - available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); - } else { - return false; - } - } - TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { - return false; - } - } - - // // If the cycle head isn't on our stack because: - // // - // // * another thread holds the lock on the cycle head (but it waits for the current query to complete) - // // * we're in `maybe_changed_after` because `maybe_changed_after` doesn't modify the cycle stack - // // - // // check if the latest memo has the same iteration count. - - // // However, we've to be careful to skip over fixpoint initial values: - // // If the head is the memo we're trying to validate, always return `None` - // // to force a re-execution of the query. This is necessary because the query - // // has obviously not completed its iteration yet. - // // - // // This should be rare but the `cycle_panic` test fails on some platforms (mainly GitHub actions) - // // without this check. What happens there is that: - // // - // // * query a blocks on query b - // // * query b tries to claim a, fails to do so and inserts the fixpoint initial value - // // * query b completes and has `a` as head. It returns its query result Salsa blocks query b from - // // exiting inside `block_on` (or the thread would complete before the cycle iteration is complete) - // // * query a resumes but panics because of the fixpoint iteration function - // // * query b resumes. It rexecutes its own query which then tries to fetch a (which depends on itself because it's a fixpoint initial value). - // // Without this check, `validate_same_iteration` would return `true` because the latest memo for `a` is the fixpoint initial value. - // // But it should return `false` so that query b's thread re-executes `a` (which then also causes the panic). - // // - // // That's why we always return `None` if the cycle head is the same as the current database key index. - // if cycle_head.database_key_index == database_key_index { - // return false; - // } - - // let wait_result = ingredient.wait_for(zalsa, cycle_head.database_key_index.key_index()); - - // let provisional_status = match wait_result { - // WaitForResult::Running(_) => { - // // This Memo is guaranteed to be outdated because another thread - // // is computing a new value right now - // return None; - // } - // WaitForResult::Available(_claim_guard) => { - // // Nested cycles are released as soon as their query completes - // // and the outer queries are part of their `cycle_heads`. - - // let provisional_status = ingredient - // .provisional_status(zalsa, cycle_head.database_key_index.key_index())?; - - // if !provisional_status.nested() { - // return None; - // } - - // let cycle_heads = - // ingredient.cycle_heads(zalsa, cycle_head.database_key_index.key_index()); - - // // This doesn't work, unless we need the same check in blocks-on etc. - // if !cycle_heads.contains(&database_key_index) { - // return None; - // } - - // provisional_status - // } - // WaitForResult::Cycle => { - // // The head is hold by the current thread or another thread waiting on the - // // result of this thread. - // ingredient - // .provisional_status(zalsa, cycle_head.database_key_index.key_index())? - // } - // }; - - // if provisional_status.verified_at() == Some(verified_at) { - // provisional_status.iteration() - // } else { - // None - // } - // - } - - true + validate_same_iteration_cold(zalsa, zalsa_local, cycle_heads, verified_at) } /// VerifyResult::Unchanged if the memo's value and `changed_at` time is up-to-date in the From 6264d7aab7b7eb1dca2d37aa739e94d887d1b03f Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 1 Oct 2025 09:24:04 +0200 Subject: [PATCH 03/45] Nits --- src/cycle.rs | 6 ++++- src/function/execute.rs | 24 ++++++++++++++++--- src/function/fetch.rs | 2 +- src/function/maybe_changed_after.rs | 1 + src/zalsa_local.rs | 8 ++++--- .../cycle_nested_deep_conditional_changed.rs | 11 ++++----- 6 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/cycle.rs b/src/cycle.rs index 413635953..0cf54336d 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -130,6 +130,10 @@ impl IterationCount { self.0 == 0 } + pub(crate) const fn panicked() -> Self { + Self(u8::MAX) + } + pub(crate) const fn increment(self) -> Option { let next = Self(self.0 + 1); if next.0 <= MAX_ITERATIONS.0 { @@ -226,7 +230,7 @@ impl CycleHeads { pub(crate) fn contains(&self, value: &DatabaseKeyIndex) -> bool { self.into_iter() - .any(|head| head.database_key_index == *value) + .any(|head| head.database_key_index == *value && !head.removed.load(Ordering::Relaxed)) } pub(crate) fn clear_except(&self, except: DatabaseKeyIndex) { diff --git a/src/function/execute.rs b/src/function/execute.rs index 107f15eae..8a9886eaa 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -187,12 +187,27 @@ where let mut cycle_heads = std::mem::take(cycle_heads); + // Recursively resolve all cycle heads that this head depends on. + // This isn't required in a single-threaded execution but it's not guaranteed that all nested cycles are listed + // in cycle heads in a multi-threaded execution: + // + // t1: a -> b + // t2: c -> b (blocks on t1) + // t1: a -> b -> c (cycle, returns fixpoint initial with c(0) in heads) + // t1: a -> b (completes b, b has c(0) in its cycle heads, releases `b`, which resumes `t2`, and `retry_provisional` blocks on `c` (t2)) + // t2: c -> a (cycle, returns fixpoint initial for a with a(0) in heads) + // t2: completes c, `provisional_retry` blocks on `a` (t2) + // t1: a (complets `b` with `c` in heads) + // + // Note how `a` only depends on `c` but not `a`. This is because `a` only saw the initial value of `c` and wasn't updated when `c` completed. + // That's why we need to resolve the cycle heads recursively to `cycle_heads` contains all cycle heads at the moment this query completed. let mut queue: SmallVec<[DatabaseKeyIndex; 4]> = cycle_heads .iter() .map(|head| head.database_key_index) .filter(|head| *head != database_key_index) .collect(); + // TODO: Can we also resolve whether the cycles have converged here? while let Some(head) = queue.pop() { let ingredient = zalsa.lookup_ingredient(head.ingredient_index()); let nested_heads = ingredient.cycle_heads(zalsa, head.key_index()); @@ -230,10 +245,9 @@ where memo.value.as_ref() }; - let last_provisional_value = last_provisional_value.expect( - "`fetch_cold_cycle` should have inserted a provisional memo with Cycle::initial", - ); + "`fetch_cold_cycle` should have inserted a provisional memo with Cycle::initial", + ); crate::tracing::debug!( "{database_key_index:?}: execute: \ I am a cycle head, comparing last provisional value with new value" @@ -487,6 +501,10 @@ impl Drop for ClearCycleHeadIfPanicking<'_, C> { if std::thread::panicking() { let revisions = QueryRevisions::fixpoint_initial(self.ingredient.database_key_index(self.id)); + revisions.update_iteration_count_mut( + self.ingredient.database_key_index(self.id), + IterationCount::panicked(), + ); let memo = Memo::new(None, self.zalsa.current_revision(), revisions); self.ingredient diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 6f7be1397..65c757b14 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -232,7 +232,7 @@ where } } - let memo = self.execute(db,zalsa, zalsa_local, database_key_index, opt_old_memo); + let memo = self.execute(db, zalsa, zalsa_local, database_key_index, opt_old_memo); Some(memo) } diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 8e34f1160..3d273b2e2 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -446,6 +446,7 @@ where /// If this is a provisional memo, validate that it was cached in the same iteration of the /// same cycle(s) that we are still executing. If so, it is valid for reuse. This avoids /// runaway re-execution of the same queries within a fixpoint iteration. + #[inline] fn validate_same_iteration( &self, zalsa: &Zalsa, diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 93bd2a337..c4ee34316 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -568,7 +568,10 @@ struct QueryRevisionsExtraInner { cycle_converged: bool, - #[cfg_attr(feature = "persistence", serde(with = "crate::zalsa_local::persistence::atomic_bool"))] + #[cfg_attr( + feature = "persistence", + serde(with = "crate::zalsa_local::persistence::atomic_bool") + )] nested_cycle: AtomicBool, } @@ -1265,7 +1268,7 @@ pub(crate) mod persistence { } } } - + // A workaround the fact that `shuttle` atomic types do not implement `serde::{Serialize, Deserialize}`. pub(super) mod verified_final { use crate::sync::atomic::{AtomicBool, Ordering}; @@ -1302,5 +1305,4 @@ pub(crate) mod persistence { serde::Deserialize::deserialize(deserializer).map(AtomicBool::new) } } - } diff --git a/tests/parallel/cycle_nested_deep_conditional_changed.rs b/tests/parallel/cycle_nested_deep_conditional_changed.rs index f827cda0b..51d506456 100644 --- a/tests/parallel/cycle_nested_deep_conditional_changed.rs +++ b/tests/parallel/cycle_nested_deep_conditional_changed.rs @@ -108,26 +108,23 @@ fn the_test() { } let t1 = thread::spawn(move || { - let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); + let _span = tracing::info_span!("t1", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_a(db, input); }); - let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); - query_a(&db, input) }); let t2 = thread::spawn(move || { - let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); + let _span = tracing::info_span!("t2", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_b(db, input); }); - let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); query_b(&db, input) }); let t3 = thread::spawn(move || { - let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered(); + let _span = tracing::info_span!("t3", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_d(db, input); }); @@ -135,7 +132,7 @@ fn the_test() { query_d(&db, input) }); let t4 = thread::spawn(move || { - let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); + let _span = tracing::info_span!("t4", thread_id = ?thread::current().id()).entered(); let (db, input) = get_db(|db, input| { query_e(db, input); From f9aca5b50cfd9d41a06cbb78b124d4b01f909399 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 2 Oct 2025 08:30:40 +0200 Subject: [PATCH 04/45] Move locking into sync table --- src/function.rs | 12 +- src/function/execute.rs | 109 +++++++---- src/function/fetch.rs | 25 ++- src/function/maybe_changed_after.rs | 22 ++- src/function/memo.rs | 18 +- src/function/sync.rs | 183 +++++++++++++++++-- src/ingredient.rs | 8 +- src/runtime.rs | 32 ++++ src/runtime/dependency_graph.rs | 82 ++++++++- tests/parallel/cycle_nested_three_threads.rs | 15 +- 10 files changed, 418 insertions(+), 88 deletions(-) diff --git a/src/function.rs b/src/function.rs index baa853828..7572e3dfa 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, SyncGuard}; +pub(crate) use sync::{ClaimGuard, SyncGuard, SyncTable}; use std::any::Any; use std::fmt; @@ -13,7 +13,7 @@ use crate::cycle::{ }; use crate::database::RawDatabase; use crate::function::delete::DeletedEntries; -use crate::function::sync::{ClaimResult, SyncTable}; +use crate::function::sync::ClaimResult; use crate::hash::{FxHashSet, FxIndexSet}; use crate::ingredient::{Ingredient, WaitForResult}; use crate::key::DatabaseKeyIndex; @@ -392,6 +392,10 @@ where memo.revisions.cycle_converged() } + fn sync_table(&self) -> &SyncTable { + &self.sync_table + } + fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads { self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) .map(|memo| memo.cycle_heads()) @@ -406,9 +410,9 @@ where /// * [`WaitResult::Cycle`] Claiming the `key_index` results in a cycle because it's on the current's thread query stack or /// running on another thread that is blocked on this thread. fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { - match self.sync_table.try_claim(zalsa, key_index) { + match self.sync_table.try_claim(zalsa, key_index, false) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle => WaitForResult::Cycle, + ClaimResult::Cycle(bool) => WaitForResult::Cycle(bool), ClaimResult::Claimed(guard) => WaitForResult::Available(guard), } } diff --git a/src/function/execute.rs b/src/function/execute.rs index 8a9886eaa..85cb270b2 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -3,7 +3,8 @@ use smallvec::SmallVec; use crate::active_query::CompletedQuery; use crate::cycle::{CycleRecoveryStrategy, IterationCount}; use crate::function::memo::Memo; -use crate::function::{Configuration, IngredientImpl}; +use crate::function::{ClaimGuard, Configuration, IngredientImpl}; +use crate::ingredient::WaitForResult; use crate::plumbing::ZalsaLocal; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::tracked_struct::Identity; @@ -32,6 +33,7 @@ where zalsa_local: &'db ZalsaLocal, database_key_index: DatabaseKeyIndex, opt_old_memo: Option<&Memo<'db, C>>, + claim_guard: ClaimGuard, ) -> &'db Memo<'db, C> { let id = database_key_index.key_index(); let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); @@ -44,13 +46,16 @@ where }) }); - let (new_value, mut completed_query) = match C::CYCLE_STRATEGY { - CycleRecoveryStrategy::Panic => Self::execute_query( - db, - zalsa, - zalsa_local.push_query(database_key_index, IterationCount::initial()), - opt_old_memo, - ), + let (new_value, mut completed_query, new_lock_owner) = match C::CYCLE_STRATEGY { + CycleRecoveryStrategy::Panic => { + let (new_value, completed_query) = Self::execute_query( + db, + zalsa, + zalsa_local.push_query(database_key_index, IterationCount::initial()), + opt_old_memo, + ); + (new_value, completed_query, None) + } CycleRecoveryStrategy::FallbackImmediate => { let (mut new_value, mut completed_query) = Self::execute_query( db, @@ -93,7 +98,7 @@ where completed_query.revisions.verified_final = AtomicBool::new(false); } - (new_value, completed_query) + (new_value, completed_query, None) } CycleRecoveryStrategy::Fixpoint => self.execute_maybe_iterate( db, @@ -121,7 +126,8 @@ where // outputs and update the tracked struct IDs for seeding the next revision. self.diff_outputs(zalsa, database_key_index, old_memo, &completed_query); } - self.insert_memo( + + let new_memo = self.insert_memo( zalsa, database_key_index.key_index(), Memo::new( @@ -130,7 +136,13 @@ where completed_query.revisions, ), memo_ingredient_index, - ) + ); + + if let Some(new_lock_owner) = new_lock_owner { + claim_guard.transfer_to(new_lock_owner); + } + + new_memo } fn execute_maybe_iterate<'db>( @@ -141,7 +153,7 @@ where zalsa_local: &'db ZalsaLocal, database_key_index: DatabaseKeyIndex, memo_ingredient_index: MemoIngredientIndex, - ) -> (C::Output<'db>, CompletedQuery) { + ) -> (C::Output<'db>, CompletedQuery, Option) { let id = database_key_index.key_index(); // Our provisional value from the previous iteration, when doing fixpoint iteration. @@ -168,7 +180,7 @@ where let mut active_query = zalsa_local.push_query(database_key_index, iteration_count); - let (new_value, completed_query) = loop { + let (new_value, completed_query, new_lock_owner) = loop { // Tracked struct ids that existed in the previous revision // but weren't recreated in the last iteration. It's important that we seed the next // query with these ids because the query might re-create them as part of the next iteration. @@ -182,9 +194,10 @@ where // If there are no cycle heads, break out of the loop (`cycle_heads_mut` returns `None` if the cycle head list is empty) let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() else { - break (new_value, completed_query); + break (new_value, completed_query, None); }; + // TODO: Remove "removed" cycle heads" let mut cycle_heads = std::mem::take(cycle_heads); // Recursively resolve all cycle heads that this head depends on. @@ -222,7 +235,7 @@ where // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { completed_query.revisions.set_cycle_heads(cycle_heads); - break (new_value, completed_query); + break (new_value, completed_query, None); } let last_provisional_value = if let Some(last_provisional) = previous_memo { @@ -257,18 +270,37 @@ where // This is a nested query if it depends on any other cycle head than itself // where claiming it results in a cycle. In that case, both queries form a single connected component // that we can iterate together rather than having separate nested fixpoint iterations. - let outer_cycle = cycle_heads - .iter() - .filter(|head| head.database_key_index != database_key_index) - .find_map(|head| { - let head_ingredient = - zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - head_ingredient - .wait_for(zalsa, head.database_key_index.key_index()) - .is_cycle() - .then_some(head.database_key_index) - }); + // SAFETY: We don't enter `query_stack_unchecked` recursively. + let outer_on_stack = unsafe { + zalsa_local.with_query_stack_unchecked(|stack| { + stack + .iter() + .rev() + .filter(|query| query.database_key_index != database_key_index) + .find(|query| cycle_heads.contains(&query.database_key_index)) + .map(|query| query.database_key_index) + }) + }; + + // Prefer queries on the stack over those on other threads to take this query out of the cycle as quickly as possible. + let outer_cycle = outer_on_stack.or_else(|| { + cycle_heads + .iter() + .filter(|head| head.database_key_index != database_key_index) + .find_map(|head| { + // let on_stack = unsafe zalsa_local.with_query_stack_unchecked(| stack| stack.iter().) + + let head_ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + let is_outer_cycle = matches!( + head_ingredient.wait_for(zalsa, head.database_key_index.key_index()), + WaitForResult::Cycle(false) + ); + is_outer_cycle.then_some(head.database_key_index) + }) + }); let this_converged = C::values_equal(&new_value, last_provisional_value); @@ -301,19 +333,21 @@ where new_value = fallback_value; } } - } else { - completed_query.revisions.set_cycle_converged(true); } + completed_query + .revisions + .set_cycle_converged(this_converged); + completed_query.revisions.mark_nested_cycle(); + if let Some(outer_cycle) = outer_cycle { tracing::debug!( "Detected nested cycle {database_key_index:?}, iterate it as part of the outer cycle {outer_cycle:?}" ); - completed_query.revisions.mark_nested_cycle(); completed_query.revisions.set_cycle_heads(cycle_heads); - break (new_value, completed_query); + break (new_value, completed_query, Some(outer_cycle)); } // Verify that all cycles have converged, including all inner cycles. @@ -325,7 +359,14 @@ where let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - ingredient.cycle_converged(zalsa, head.database_key_index.key_index()) + let converged = + ingredient.cycle_converged(zalsa, head.database_key_index.key_index()); + + if !converged { + tracing::debug!("inner cycle {database_key_index:?} has not converged"); + } + + converged }); if converged { @@ -347,7 +388,7 @@ where *completed_query.revisions.verified_final.get_mut() = true; - break (new_value, completed_query); + break (new_value, completed_query, None); } completed_query.revisions.set_cycle_heads(cycle_heads); @@ -419,7 +460,7 @@ where revisions = &completed_query.revisions ); - (new_value, completed_query) + (new_value, completed_query, new_lock_owner) } #[inline] @@ -499,7 +540,7 @@ impl<'a, C: Configuration> ClearCycleHeadIfPanicking<'a, C> { impl Drop for ClearCycleHeadIfPanicking<'_, C> { fn drop(&mut self) { if std::thread::panicking() { - let revisions = + let mut revisions = QueryRevisions::fixpoint_initial(self.ingredient.database_key_index(self.id)); revisions.update_iteration_count_mut( self.ingredient.database_key_index(self.id), diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 65c757b14..82e1de0ff 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -132,7 +132,7 @@ where ) -> Option<&'db Memo<'db, C>> { let database_key_index = self.database_key_index(id); // Try to claim this query: if someone else has claimed it already, go back and start again. - let claim_guard = match self.sync_table.try_claim(zalsa, id) { + let claim_guard = match self.sync_table.try_claim(zalsa, id, true) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); @@ -223,16 +223,23 @@ where // some head is running on another thread, drop our claim guard to give that thread // a chance to take ownership of this query and complete it as part of its fixpoint iteration. // We will then block on the cycle head and retry once all cycle heads completed. - if !old_memo.try_claim_heads(zalsa, zalsa_local) { - drop(claim_guard); - old_memo.block_on_heads(zalsa, zalsa_local); - return None; - } + // if !old_memo.try_claim_heads(zalsa, zalsa_local) { + // drop(claim_guard); + // old_memo.block_on_heads(zalsa, zalsa_local); + // return None; + // } } } } - let memo = self.execute(db, zalsa, zalsa_local, database_key_index, opt_old_memo); + let memo = self.execute( + db, + zalsa, + zalsa_local, + database_key_index, + opt_old_memo, + claim_guard, + ); Some(memo) } @@ -258,7 +265,9 @@ where if can_shallow_update.yes() { self.update_shallow(zalsa, database_key_index, memo, can_shallow_update); - if C::CYCLE_STRATEGY == CycleRecoveryStrategy::Fixpoint { + if C::CYCLE_STRATEGY == CycleRecoveryStrategy::Fixpoint + && memo.revisions.is_nested_cycle() + { // This feels strange. I feel like we need to preserve the cycle heads. Let's say a cycle head only sometimes participates in the cycle. // This doesn't mean that the value becomes final because of it. The query might as well be cyclic in the next iteration but // we then never re-executed that query because it was marked as `verified_final`. diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 3d273b2e2..54e02243f 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -141,7 +141,7 @@ where ) -> Option { let database_key_index = self.database_key_index(key_index); - let _claim_guard = match self.sync_table.try_claim(zalsa, key_index) { + let _claim_guard = match self.sync_table.try_claim(zalsa, key_index, true) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); @@ -227,7 +227,14 @@ where // `in_cycle` tracks if the enclosing query is in a cycle. `deep_verify.cycle_heads` tracks // if **this query** encountered a cycle (which means there's some provisional value somewhere floating around). if old_memo.value.is_some() && !cycle_heads.has_any() { - let memo = self.execute(db, zalsa, zalsa_local, database_key_index, Some(old_memo)); + let memo = self.execute( + db, + zalsa, + zalsa_local, + database_key_index, + Some(old_memo), + _claim_guard, + ); let changed_at = memo.revisions.changed_at; // Always assume that a provisional value has changed. @@ -481,11 +488,12 @@ where } TryClaimHeadsResult::Available(available_cycle_head) => { // Check the cycle heads recursively - if available_cycle_head.is_nested(zalsa) { - available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); - } else { - return false; - } + // if available_cycle_head.is_nested(zalsa) { + // available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); + // } else { + // return false; + // } + return false; } TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { return false; diff --git a/src/function/memo.rs b/src/function/memo.rs index 3e2c10ba5..55d28c23e 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -201,14 +201,14 @@ impl<'db, C: Configuration> Memo<'db, C> { all_cycles = false; } TryClaimHeadsResult::Available(available) => { - if available.is_nested(zalsa) { - // This is a nested cycle. The lock of nested cycles is released - // when there query completes. But we need to recurse - // TODO: What about cycle initial values. Do we need to reset nested? - available.queue_cycle_heads(&mut cycle_heads); - } else { - all_cycles = false; - } + // if available.is_nested(zalsa) { + // // This is a nested cycle. The lock of nested cycles is released + // // when there query completes. But we need to recurse + // // TODO: What about cycle initial values. Do we need to reset nested? + // available.queue_cycle_heads(&mut cycle_heads); + // } else { + all_cycles = false; + // } } TryClaimHeadsResult::Running(running) => { all_cycles = false; @@ -682,7 +682,7 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { .. } => { match ingredient.wait_for(self.zalsa, head_key_index) { - WaitForResult::Cycle { .. } => { + WaitForResult::Cycle(..) => { // We hit a cycle blocking on the cycle head; this means this query actively // participates in the cycle and some other query is blocked on this thread. crate::tracing::debug!( diff --git a/src/function/sync.rs b/src/function/sync.rs index 38b44d6e4..67b2b018c 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -20,17 +20,20 @@ pub(crate) enum ClaimResult<'a> { /// Can't claim the query because it is running on an other thread. Running(Running<'a>), /// Claiming the query results in a cycle. - Cycle, + Cycle(bool), /// Successfully claimed the query. Claimed(ClaimGuard<'a>), } pub(crate) struct SyncState { - id: ThreadId, + /// The thread id that is owning this query (actively executing it or iterating it as part of a larger cycle). + id: OwnerId, /// Set to true if any other queries are blocked, /// waiting for this query to complete. anyone_waiting: bool, + + is_transfer_target: bool, } impl SyncTable { @@ -41,14 +44,104 @@ impl SyncTable { } } - pub(crate) fn try_claim<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> ClaimResult<'me> { + fn make_transfer_target(&self, key_index: Id) -> Option { + let mut read = self.syncs.lock(); + read.get_mut(&key_index).map(|state| { + state.anyone_waiting = true; + state.is_transfer_target = true; + + match state.id { + OwnerId::Thread(thread_id) => thread_id, + OwnerId::Transferred => { + panic!("Can't transfer ownership to a query that has been transferred") + } + } + }) + } + + fn remove_from_map_and_unblock_queries(&self, zalsa: &Zalsa, key_index: Id) { + let mut syncs = self.syncs.lock(); + + let SyncState { + anyone_waiting, + is_transfer_target, + .. + } = syncs.remove(&key_index).expect("key claimed twice?"); + + if !anyone_waiting { + return; + } + + let database_key = DatabaseKeyIndex::new(self.ingredient, key_index); + let wait_result = if thread::panicking() { + tracing::info!("Unblocking queries blocked on {database_key:?} after a panick"); + WaitResult::Panicked + } else { + WaitResult::Completed + }; + + zalsa + .runtime() + .unblock_queries_blocked_on(database_key, wait_result); + + if !is_transfer_target { + return; + } + + let transferred_dependents = zalsa.runtime().take_transferred_dependents(database_key); + + drop(syncs); + + for dependent in transferred_dependents { + let ingredient = zalsa.lookup_ingredient(dependent.ingredient_index()); + ingredient + .sync_table() + .remove_from_map_and_unblock_queries(zalsa, dependent.key_index()); + } + } + + pub(crate) fn try_claim<'me>( + &'me self, + zalsa: &'me Zalsa, + key_index: Id, + reentry: bool, + ) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { std::collections::hash_map::Entry::Occupied(occupied_entry) => { let &mut SyncState { - id, + ref mut id, ref mut anyone_waiting, + ref mut is_transfer_target, } = occupied_entry.into_mut(); + + let id = match id { + OwnerId::Thread(id) => *id, + OwnerId::Transferred => { + match zalsa.runtime().transfered_thread_id( + DatabaseKeyIndex::new(self.ingredient, key_index), + reentry, + ) { + Ok(owner_thread_id) => { + if reentry { + *id = OwnerId::Thread(owner_thread_id); + *is_transfer_target = false; + + return ClaimResult::Claimed(ClaimGuard { + key_index, + zalsa, + sync_table: self, + defused: false, + }); + } else { + return ClaimResult::Cycle(true); + } + } + Err(thread_id) => thread_id, + } + } + }; + // NB: `Ordering::Relaxed` is sufficient here, // as there are no loads that are "gated" on this // value. Everything that is written is also protected @@ -62,24 +155,41 @@ impl SyncTable { write, ) { BlockResult::Running(blocked_on) => ClaimResult::Running(blocked_on), - BlockResult::Cycle => ClaimResult::Cycle, + BlockResult::Cycle => ClaimResult::Cycle(false), } } std::collections::hash_map::Entry::Vacant(vacant_entry) => { vacant_entry.insert(SyncState { - id: thread::current().id(), + id: OwnerId::Thread(thread::current().id()), anyone_waiting: false, + is_transfer_target: false, }); ClaimResult::Claimed(ClaimGuard { key_index, zalsa, sync_table: self, + defused: false, }) } } } } +#[derive(Copy, Clone, Debug)] +enum OwnerId { + /// Entry is owned by this thread + Thread(thread::ThreadId), + /// Entry has been transferred and is owned by another thread. + /// The id is known by the `DependencyGraph`. + Transferred, +} + +impl OwnerId { + const fn is_transferred(&self) -> bool { + matches!(self, OwnerId::Transferred) + } +} + /// Marks an active 'claim' in the synchronization map. The claim is /// released when this value is dropped. #[must_use] @@ -87,33 +197,66 @@ pub struct ClaimGuard<'me> { key_index: Id, zalsa: &'me Zalsa, sync_table: &'me SyncTable, + defused: bool, } impl ClaimGuard<'_> { - fn remove_from_map_and_unblock_queries(&self) { + pub(crate) fn transfer_to(mut self, new_owner: DatabaseKeyIndex) { + // TODO: If new_owner is already transferred, redirect to its owner instead. + + let self_key = DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index); + tracing::debug!("Transferring ownership of {self_key:?} to {new_owner:?}",); + + let owner_ingredient = self.zalsa.lookup_ingredient(new_owner.ingredient_index()); + + // Get the owning thread of `new_owner`. + let owner_sync_table = owner_ingredient.sync_table(); + let owner_thread_id = owner_sync_table + .make_transfer_target(new_owner.key_index()) + .expect("new owner to be a locked query"); + let mut syncs = self.sync_table.syncs.lock(); - let SyncState { anyone_waiting, .. } = - syncs.remove(&self.key_index).expect("key claimed twice?"); + // FIXME: We need to update the sync tables here? No we don't, they're still transferred. + self.zalsa + .runtime() + .transfer_lock(self_key, new_owner, owner_thread_id); + + tracing::debug!("Acquired lock on syncs"); + + let SyncState { + anyone_waiting, id, .. + } = syncs.get_mut(&self.key_index).expect("key claimed twice?"); + + // Transfer ownership + *id = OwnerId::Transferred; - if anyone_waiting { + // TODO: Do we need to wake up any threads that are awaiting any of the dependents to update the dependency graph -> I think so. + if *anyone_waiting { + tracing::debug!( + "Wake up blocked threads after transferring ownership to {new_owner:?}" + ); + // Wake up all threads that were waiting on the query to complete so that they'll retry and block on the new owner. let database_key = DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index); - self.zalsa.runtime().unblock_queries_blocked_on( - database_key, - if thread::panicking() { - tracing::info!("Unblocking queries blocked on {database_key:?} after a panick"); - WaitResult::Panicked - } else { - WaitResult::Completed - }, - ) + self.zalsa + .runtime() + .unblock_queries_blocked_on(database_key, WaitResult::Completed); } + + *anyone_waiting = false; + + tracing::debug!("Transfer ownership completed"); + + self.defused = true; } } impl Drop for ClaimGuard<'_> { fn drop(&mut self) { - self.remove_from_map_and_unblock_queries() + if !self.defused { + self.sync_table + .remove_from_map_and_unblock_queries(self.zalsa, self.key_index); + } } } diff --git a/src/ingredient.rs b/src/ingredient.rs index 8c9ae2ca6..ae30c362e 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -99,6 +99,10 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { ); } + fn sync_table(&self) -> &crate::function::SyncTable { + unreachable!("owning_thread should only be called on functions"); + } + /// Invoked when the value `output_key` should be marked as valid in the current revision. /// This occurs because the value for `executor`, which generated it, was marked as valid /// in the current revision. @@ -316,12 +320,12 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) pub enum WaitForResult<'me> { Running(Running<'me>), Available(ClaimGuard<'me>), - Cycle, + Cycle(bool), } impl WaitForResult<'_> { pub const fn is_cycle(&self) -> bool { - matches!(self, WaitForResult::Cycle) + matches!(self, WaitForResult::Cycle(_)) } pub const fn is_running(&self) -> bool { diff --git a/src/runtime.rs b/src/runtime.rs index 8436c684d..a79e3402b 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,3 +1,5 @@ +use smallvec::SmallVec; + use self::dependency_graph::DependencyGraph; use crate::durability::Durability; use crate::function::SyncGuard; @@ -268,6 +270,36 @@ impl Runtime { .unblock_runtimes_blocked_on(database_key, wait_result); } + pub(super) fn transfered_thread_id( + &self, + query: DatabaseKeyIndex, + reentry: bool, + ) -> Result { + self.dependency_graph + .lock() + .transfered_thread_id(query, reentry) + } + + pub(super) fn take_transferred_dependents( + &self, + query: DatabaseKeyIndex, + ) -> SmallVec<[DatabaseKeyIndex; 4]> { + self.dependency_graph + .lock() + .take_transferred_dependents(query) + } + + pub(super) fn transfer_lock( + &self, + query: DatabaseKeyIndex, + new_owner: DatabaseKeyIndex, + owning_thread: ThreadId, + ) { + self.dependency_graph + .lock() + .transfer_lock(query, new_owner, owning_thread); + } + #[cfg(feature = "persistence")] pub(crate) fn deserialize_from(&mut self, other: &mut Runtime) { // The only field that is serialized is `revisions`. diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index fd26c04fa..ba6137798 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -6,7 +6,7 @@ use smallvec::SmallVec; use crate::key::DatabaseKeyIndex; use crate::runtime::dependency_graph::edge::EdgeCondvar; use crate::runtime::WaitResult; -use crate::sync::thread::ThreadId; +use crate::sync::thread::{self, ThreadId}; use crate::sync::MutexGuard; #[derive(Debug, Default)] @@ -25,6 +25,14 @@ pub(super) struct DependencyGraph { /// it stores its `WaitResult` here. As they wake up, each query Q in Qs will /// come here to fetch their results. wait_results: FxHashMap, + + /// A `K -> Q` pair indicates that `K`'s lock is now owned by + /// `Q` (The thread id of `Q` and its database key) + transfered: FxHashMap, + + /// A `K -> Qs` pair indicates that `K`'s lock is now owned by + /// `Qs` (The thread id of `Qs` and their database keys) + transfered_dependents: FxHashMap>, } impl DependencyGraph { @@ -117,6 +125,9 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { + tracing::debug!( + "Unblocking runtimes blocked on {database_key:?} with wait result {wait_result:?}" + ); let dependents = self .query_dependents .remove(&database_key) @@ -127,10 +138,79 @@ impl DependencyGraph { } } + pub(super) fn take_transferred_dependents( + &mut self, + query: DatabaseKeyIndex, + ) -> SmallVec<[DatabaseKeyIndex; 4]> { + self.transfered_dependents + .remove(&query) + .unwrap_or_default() + } + + pub(super) fn transfered_thread_id( + &mut self, + database_key_index: DatabaseKeyIndex, + claim: bool, + ) -> Result { + let (thread_id, parent) = self + .transfered + .get(&database_key_index) + .expect("transfered thread id not found"); + + if *thread_id == thread::current().id() { + if claim { + if let Some(dependents) = self.transfered_dependents.get_mut(parent) { + if let Some(index) = + dependents.iter().position(|key| *key == database_key_index) + { + dependents.swap_remove(index); + } + } + } + Ok(*thread_id) + } else { + Err(*thread_id) + } + } + + pub(super) fn transfer_lock( + &mut self, + query: DatabaseKeyIndex, + new_owner: DatabaseKeyIndex, + owning_thread: ThreadId, + ) { + let dependents = match self.transfered.entry(query) { + std::collections::hash_map::Entry::Vacant(entry) => { + entry.insert((owning_thread, new_owner)); + None + } + std::collections::hash_map::Entry::Occupied(mut entry) => { + let current_owner = entry.get().1; + *entry.get_mut() = (owning_thread, new_owner); + + self.transfered_dependents.remove(¤t_owner) + } + } + .unwrap_or_default(); + + let all_dependents = self.transfered_dependents.entry(new_owner).or_default(); + + for entry in &dependents { + *self.transfered.get_mut(entry).unwrap() = (owning_thread, new_owner); + all_dependents.push(*entry); + } + + tracing::debug!("Unblocking dependents of query {query:?}"); + for dependent in dependents { + self.unblock_runtimes_blocked_on(dependent, WaitResult::Completed); + } + } + /// Unblock the runtime with the given id with the given wait-result. /// This will cause it resume execution (though it will have to grab /// the lock on this data structure first, to recover the wait result). fn unblock_runtime(&mut self, id: ThreadId, wait_result: WaitResult) { + tracing::debug!("Unblocking runtime {id:?} with wait result {wait_result:?}"); let edge = self.edges.remove(&id).expect("not blocked"); self.wait_results.insert(id, wait_result); diff --git a/tests/parallel/cycle_nested_three_threads.rs b/tests/parallel/cycle_nested_three_threads.rs index c761a80f4..22232bd85 100644 --- a/tests/parallel/cycle_nested_three_threads.rs +++ b/tests/parallel/cycle_nested_three_threads.rs @@ -76,9 +76,18 @@ fn the_test() { let db_t2 = db_t1.clone(); let db_t3 = db_t1.clone(); - let t1 = thread::spawn(move || query_a(&db_t1)); - let t2 = thread::spawn(move || query_b(&db_t2)); - let t3 = thread::spawn(move || query_c(&db_t3)); + let t1 = thread::spawn(move || { + let _span = tracing::info_span!("t1", thread_id = ?thread::current().id()).entered(); + query_a(&db_t1) + }); + let t2 = thread::spawn(move || { + let _span = tracing::info_span!("t2", thread_id = ?thread::current().id()).entered(); + query_b(&db_t2) + }); + let t3 = thread::spawn(move || { + let _span = tracing::info_span!("t3", thread_id = ?thread::current().id()).entered(); + query_c(&db_t3) + }); let r_t1 = t1.join().unwrap(); let r_t2 = t2.join().unwrap(); From 2649303455e62e45dfa23afc6b08b675fddb0d95 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 2 Oct 2025 17:01:32 +0200 Subject: [PATCH 05/45] More trying --- log.txt | 1881 +++++++++++++++++++++++++++ src/function/execute.rs | 6 +- src/function/maybe_changed_after.rs | 3 + src/function/memo.rs | 2 +- src/function/sync.rs | 12 +- src/runtime/dependency_graph.rs | 42 +- 6 files changed, 1925 insertions(+), 21 deletions(-) create mode 100644 log.txt diff --git a/log.txt b/log.txt new file mode 100644 index 000000000..d0fdf3021 --- /dev/null +++ b/log.txt @@ -0,0 +1,1881 @@ + Compiling salsa v0.23.0 (/Users/micha/astral/salsa) +warning: unreachable pattern + --> src/function/maybe_changed_after.rs:501:54 + | +489 | TryClaimHeadsResult::Running(running) => { + | ------------------------------------- matches all the relevant values +... +501 | TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no value can reach this + | + = note: `#[warn(unreachable_patterns)]` on by default + +warning: unused variable: `available_cycle_head` + --> src/function/maybe_changed_after.rs:492:52 + | +492 | TryClaimHeadsResult::Available(available_cycle_head) => { + | ^^^^^^^^^^^^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_available_cycle_head` + | + = note: `#[warn(unused_variables)]` on by default + +warning: unused variable: `available` + --> src/function/memo.rs:203:52 + | +203 | TryClaimHeadsResult::Available(available) => { + | ^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_available` + +warning: unused variable: `anyone_waiting` + --> src/function/sync.rs:66:13 + | +66 | anyone_waiting, + | ^^^^^^^^^^^^^^- + | | + | help: try removing the field + +warning: unused variable: `is_transfer_target` + --> src/function/sync.rs:67:13 + | +67 | is_transfer_target, + | ^^^^^^^^^^^^^^^^^^- + | | + | help: try removing the field + +warning: type `SyncTable` is more private than the item ` as Ingredient>::sync_table` + --> src/function.rs:395:5 + | +395 | fn sync_table(&self) -> &SyncTable { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ method ` as Ingredient>::sync_table` is reachable at visibility `pub` + | +note: but type `SyncTable` is only usable at visibility `pub(crate)` + --> src/function/sync.rs:14:1 + | + 14 | pub(crate) struct SyncTable { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(private_interfaces)]` on by default + +warning: type `SyncTable` is more private than the item `Ingredient::sync_table` + --> src/ingredient.rs:102:5 + | +102 | fn sync_table(&self) -> &crate::function::SyncTable { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ method `Ingredient::sync_table` is reachable at visibility `pub` + | +note: but type `SyncTable` is only usable at visibility `pub(crate)` + --> src/function/sync.rs:14:1 + | + 14 | pub(crate) struct SyncTable { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: method `nested` is never used + --> src/cycle.rs:421:19 + | +420 | impl ProvisionalStatus { + | ---------------------- method in this implementation +421 | pub(crate) fn nested(&self) -> bool { + | ^^^^^^ + | + = note: `#[warn(dead_code)]` on by default + +warning: method `try_claim_heads` is never used + --> src/function/memo.rs:228:19 + | +101 | impl<'db, C: Configuration> Memo<'db, C> { + | ---------------------------------------- method in this implementation +... +228 | pub(super) fn try_claim_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool { + | ^^^^^^^^^^^^^^^ + +warning: fields `database_key_index` and `ingredient` are never read + --> src/function/memo.rs:555:5 + | +554 | pub(super) struct AvailableCycleHead<'me> { + | ------------------ fields in this struct +555 | database_key_index: DatabaseKeyIndex, + | ^^^^^^^^^^^^^^^^^^ +556 | _guard: ClaimGuard<'me>, +557 | ingredient: &'me dyn Ingredient, + | ^^^^^^^^^^ + +warning: methods `is_nested` and `queue_cycle_heads` are never used + --> src/function/memo.rs:561:19 + | +560 | impl<'a> AvailableCycleHead<'a> { + | ------------------------------- methods in this implementation +561 | pub(super) fn is_nested(&self, zalsa: &Zalsa) -> bool { + | ^^^^^^^^^ +... +567 | pub(super) fn queue_cycle_heads(&self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { + | ^^^^^^^^^^^^^^^^^ + +warning: method `is_transferred` is never used + --> src/function/sync.rs:188:14 + | +187 | impl OwnerId { + | ------------ method in this implementation +188 | const fn is_transferred(&self) -> bool { + | ^^^^^^^^^^^^^^ + +warning: `salsa` (lib) generated 12 warnings (run `cargo fix --lib -p salsa` to apply 2 suggestions) + Finished `test` profile [unoptimized + debuginfo] target(s) in 1.42s +──────────── + Nextest run ID 25a7d6b1-a9dd-486f-9846-2680591f3411 with nextest profile: default + Starting 1 test across 1 binary (8 tests skipped) + SIGABRT [ 0.331s] salsa::parallel cycle_nested_deep::the_test + stdout ─── + + running 1 test + 2025-10-02T12:12:04.732025Z DEBUG parallel::cycle_nested_deep: Starting new run + 2025-10-02T12:12:04.733661Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}: salsa::zalsa_local: report_tracked_read(input=query_a::interned_arguments(Id(0)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.733840Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::function::execute: query_a(Id(0)): executing query + 2025-10-02T12:12:04.734004Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::zalsa_local: report_tracked_read(input=query_b::interned_arguments(Id(400)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.734099Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): executing query + 2025-10-02T12:12:04.734224Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.734315Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): executing query + 2025-10-02T12:12:04.734440Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_d::interned_arguments(Id(c00)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.734543Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): executing query + 2025-10-02T12:12:04.734611Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.734678Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::fetch: hit cycle at query_c(Id(800)), inserting and returning fixpoint initial value + 2025-10-02T12:12:04.734769Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.734802Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.734933Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): execute_maybe_iterate: result.revisions = QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + } + 2025-10-02T12:12:04.735005Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Transferring ownership of query_d(Id(c00)) to query_c(Id(800)) + 2025-10-02T12:12:04.735038Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::runtime::dependency_graph: transitive_dependents of query query_d(Id(c00)): [] + 2025-10-02T12:12:04.735062Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_d(Id(c00))] + 2025-10-02T12:12:04.735074Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_d(Id(c00)) + 2025-10-02T12:12:04.735089Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Acquired lock on syncs + 2025-10-02T12:12:04.735100Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Transfer ownership completed + 2025-10-02T12:12:04.735136Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.735155Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::zalsa_local: report_tracked_read(input=query_d(Id(c00)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.735254Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_e::interned_arguments(Id(1000)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.735315Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::execute: query_e(Id(1000)): executing query + 2025-10-02T12:12:04.735373Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.735434Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: FixpointInitial, + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.735510Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: FixpointInitial, + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.735574Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::function::fetch: hit cycle at query_c(Id(800)), returning last provisional value: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: FixpointInitial, + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + } + 2025-10-02T12:12:04.735636Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.735656Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.735755Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::execute: query_e(Id(1000)): execute_maybe_iterate: result.revisions = QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + } + 2025-10-02T12:12:04.735818Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::sync: Transferring ownership of query_e(Id(1000)) to query_c(Id(800)) + 2025-10-02T12:12:04.735847Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::runtime::dependency_graph: transitive_dependents of query query_e(Id(1000)): [] + 2025-10-02T12:12:04.735857Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_e(Id(1000))] + 2025-10-02T12:12:04.735869Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_e(Id(1000)) + 2025-10-02T12:12:04.735884Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::sync: Acquired lock on syncs + 2025-10-02T12:12:04.735893Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::sync: Transfer ownership completed + 2025-10-02T12:12:04.735924Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.735942Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::zalsa_local: report_tracked_read(input=query_e(Id(1000)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.735992Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_b::interned_arguments(Id(400)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.736071Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_b(Id(400))}: salsa::function::fetch: hit cycle at query_b(Id(400)), inserting and returning fixpoint initial value + 2025-10-02T12:12:04.736133Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_b(Id(400)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.736151Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_b(Id(400)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.736202Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_a::interned_arguments(Id(0)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.736265Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_a(Id(0))}: salsa::function::fetch: hit cycle at query_a(Id(0)), inserting and returning fixpoint initial value + 2025-10-02T12:12:04.736322Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_a(Id(0))}:block_on_heads: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.736340Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_a(Id(0))}: salsa::zalsa_local: report_tracked_read(input=query_a(Id(0)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.736482Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): execute: I am a cycle head, comparing last provisional value with new value + 2025-10-02T12:12:04.736513Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: Detected nested cycle query_c(Id(800)), iterate it as part of the outer cycle query_b(Id(400)) + 2025-10-02T12:12:04.736533Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): execute_maybe_iterate: result.revisions = QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + } + 2025-10-02T12:12:04.736649Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Transferring ownership of query_c(Id(800)) to query_b(Id(400)) + 2025-10-02T12:12:04.736696Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: transitive_dependents of query query_c(Id(800)): [] + 2025-10-02T12:12:04.736712Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_c(Id(800))] + 2025-10-02T12:12:04.736725Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_c(Id(800)) + 2025-10-02T12:12:04.736741Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Acquired lock on syncs + 2025-10-02T12:12:04.736751Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Wake up blocked threads after transferring ownership to query_b(Id(400)) + 2025-10-02T12:12:04.736768Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: Unblocking runtimes blocked on query_c(Id(800)) with wait result Completed + 2025-10-02T12:12:04.736783Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Transfer ownership completed + 2025-10-02T12:12:04.736839Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.736853Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_b(Id(400)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.736923Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle + 2025-10-02T12:12:04.736944Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.737149Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): execute: I am a cycle head, comparing last provisional value with new value + 2025-10-02T12:12:04.737195Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: Detected nested cycle query_b(Id(400)), iterate it as part of the outer cycle query_a(Id(0)) + 2025-10-02T12:12:04.737233Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): execute_maybe_iterate: result.revisions = QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: true, + }, + ), + ), + } + 2025-10-02T12:12:04.737325Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Transferring ownership of query_b(Id(400)) to query_a(Id(0)) + 2025-10-02T12:12:04.737353Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: transitive_dependents of query query_b(Id(400)): [] + 2025-10-02T12:12:04.737364Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_b(Id(400))] + 2025-10-02T12:12:04.737372Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_b(Id(400)) + 2025-10-02T12:12:04.737385Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Acquired lock on syncs + 2025-10-02T12:12:04.737394Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Wake up blocked threads after transferring ownership to query_a(Id(0)) + 2025-10-02T12:12:04.737409Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: Unblocking runtimes blocked on query_b(Id(400)) with wait result Completed + 2025-10-02T12:12:04.737423Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Transfer ownership completed + 2025-10-02T12:12:04.737467Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.737521Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_b(Id(400)) results in a cycle + 2025-10-02T12:12:04.737571Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle + 2025-10-02T12:12:04.737590Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_b(Id(400)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.737777Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::function::execute: query_a(Id(0)): execute: I am a cycle head, comparing last provisional value with new value + 2025-10-02T12:12:04.737863Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::function::execute: query_a(Id(0)): execute: iterate again (IterationCount(1))... + 2025-10-02T12:12:04.737975Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::zalsa_local: report_tracked_read(input=query_b::interned_arguments(Id(400)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.738026Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: false, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738119Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: false, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738235Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): validate_provisional(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: false, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738347Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): validate_same_iteration(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: false, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738434Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.738470Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): executing query + 2025-10-02T12:12:04.738550Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.738591Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738702Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738811Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): validate_provisional(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.738933Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): validate_same_iteration(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739035Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.739073Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): executing query + 2025-10-02T12:12:04.739154Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_d::interned_arguments(Id(c00)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.739205Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739287Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739355Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): validate_provisional(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739440Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): validate_same_iteration(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739508Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.739533Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): executing query + 2025-10-02T12:12:04.739593Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.739635Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739746Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { + value: "Some()", + verified_at: AtomicRevision { + data: 1, + }, + revisions: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: true, + }, + ), + ), + }, + }) + 2025-10-02T12:12:04.739846Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::fetch: hit cycle at query_c(Id(800)), returning last provisional value: QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_d::interned_arguments(Id(c00)), + ), + Input( + query_d(Id(c00)), + ), + Input( + query_e::interned_arguments(Id(1000)), + ), + Input( + query_e(Id(1000)), + ), + Input( + query_b::interned_arguments(Id(400)), + ), + Input( + query_b(Id(400)), + ), + Input( + query_a::interned_arguments(Id(0)), + ), + Input( + query_a(Id(0)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + CycleHead { + database_key_index: query_b(Id(400)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: true, + }, + CycleHead { + database_key_index: query_a(Id(0)), + iteration_count: AtomicIterationCount( + 0, + ), + removed: true, + }, + ], + ), + iteration: AtomicIterationCount( + 1, + ), + cycle_converged: true, + nested_cycle: false, + }, + ), + ), + } + 2025-10-02T12:12:04.739947Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) + 2025-10-02T12:12:04.739966Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) + 2025-10-02T12:12:04.740053Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): execute_maybe_iterate: result.revisions = QueryRevisions { + changed_at: R1, + durability: Durability::HIGH, + origin: Derived( + [ + Input( + query_c::interned_arguments(Id(800)), + ), + Input( + query_c(Id(800)), + ), + ], + ), + accumulated_inputs: AtomicInputAccumulatedValues( + false, + ), + verified_final: false, + extra: QueryRevisionsExtra( + Some( + QueryRevisionsExtraInner { + accumulated: AccumulatedMap { + map: [], + }, + tracked_struct_ids: [], + cycle_heads: CycleHeads( + [ + CycleHead { + database_key_index: query_c(Id(800)), + iteration_count: AtomicIterationCount( + 1, + ), + removed: false, + }, + ], + ), + iteration: AtomicIterationCount( + 0, + ), + cycle_converged: false, + nested_cycle: false, + }, + ), + ), + } + 2025-10-02T12:12:04.740132Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Transferring ownership of query_d(Id(c00)) to query_c(Id(800)) + stderr ─── + WARNING: Shuttle only correctly models SeqCst atomics and treats all other Orderings as if they were SeqCst. Bugs caused by weaker orderings like Acquire may be missed. See https://docs.rs/shuttle/*/shuttle/sync/atomic/index.html#warning-about-relaxed-behaviors for details or to disable this warning. + test panicked in task 'task-1' + failing schedule: + " + 91019c0bf3908ce795e7b3842300000000000000000000000000000000000000000000000000 + 00000080aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + " + pass that string to `shuttle::replay` to replay the failure + + thread 'cycle_nested_deep::the_test' panicked at src/runtime/dependency_graph.rs:193:17: + Can't transfer the query query_d(Id(c00)) twice + note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace + + thread 'cycle_nested_deep::the_test' panicked at src/sync.rs:18:27: + called `Result::unwrap()` on an `Err` value: PoisonError { .. } + stack backtrace: + 0: 0x10305b3ec - std::backtrace_rs::backtrace::libunwind::trace::h72f4b72e0962905d + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/../../backtrace/src/backtrace/libunwind.rs:117:9 + 1: 0x10305b3ec - std::backtrace_rs::backtrace::trace_unsynchronized::hff394536698b6b10 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/../../backtrace/src/backtrace/mod.rs:66:14 + 2: 0x10305b3ec - std::sys::backtrace::_print_fmt::h64d1e3035850353e + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:66:9 + 3: 0x10305b3ec - ::fmt::hf35f9734f9a29483 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:39:26 + 4: 0x103077270 - core::fmt::rt::Argument::fmt::hedf6f2a66f855f69 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/fmt/rt.rs:173:76 + 5: 0x103077270 - core::fmt::write::h60ec6633daab7b35 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/fmt/mod.rs:1468:25 + 6: 0x103058f1c - std::io::default_write_fmt::h0e30d7b1295222cb + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/io/mod.rs:639:11 + 7: 0x103058f1c - std::io::Write::write_fmt::hc29709fdab2e34e2 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/io/mod.rs:1954:13 + 8: 0x10305b2a0 - std::sys::backtrace::BacktraceLock::print::hca95bffd78053951 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:42:9 + 9: 0x10305c8f4 - std::panicking::default_hook::{{closure}}::h357ed4fbef22679d + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:300:27 + 10: 0x10305c74c - std::panicking::default_hook::h0a4e133b151d5758 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:327:9 + 11: 0x1030179c0 - as core::ops::function::Fn>::call::hd52e5a21affa179d + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/alloc/src/boxed.rs:1985:9 + 12: 0x102ffdf94 - shuttle::runtime::failure::init_panic_hook::{{closure}}::{{closure}}::h1ab64f6baacd7740 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/runtime/failure.rs:151:13 + 13: 0x1030179c0 - as core::ops::function::Fn>::call::hd52e5a21affa179d + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/alloc/src/boxed.rs:1985:9 + 14: 0x10301b3a8 - generator::detail::gen::catch_unwind_filter::{{closure}}::{{closure}}::he34f8bc5fe676891 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:19:13 + 15: 0x10305d3d0 - as core::ops::function::Fn>::call::h5576f09b2ec107bd + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/alloc/src/boxed.rs:1985:9 + 16: 0x10305d3d0 - std::panicking::rust_panic_with_hook::h557a23724a5de839 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:841:13 + 17: 0x10305cfb0 - std::panicking::begin_panic_handler::{{closure}}::h269cace6208fef05 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:706:13 + 18: 0x10305b89c - std::sys::backtrace::__rust_end_short_backtrace::h5be0da278f3aaec7 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:174:18 + 19: 0x10305cc8c - __rustc[de2ca18b4c54d5b8]::rust_begin_unwind + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:697:5 + 20: 0x10316d568 - core::panicking::panic_fmt::h477ff48eff31ffa4 + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/panicking.rs:75:14 + 21: 0x10316d920 - core::result::unwrap_failed::h61c3c2f1df5908ff + at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/result.rs:1765:5 + 22: 0x102f6bd44 - core::result::Result::unwrap::h4cb36a2766f5cbbc + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/result.rs:1167:23 + 23: 0x102f6bd44 - salsa::sync::shim::Mutex::lock::h097a2902e2835134 + at /Users/micha/astral/salsa/src/sync.rs:18:27 + 24: 0x102f660d8 - salsa::function::sync::SyncTable::remove_from_map_and_unblock_queries::hcada563f686fc42d + at /Users/micha/astral/salsa/src/function/sync.rs:63:36 + 25: 0x102f82874 - ::drop::hd4c1c4d155089e2a + at /Users/micha/astral/salsa/src/function/sync.rs:258:18 + 26: 0x102f7bc80 - core::ptr::drop_in_place::h99cf3acc8c597698 + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/ptr/mod.rs:804:1 + 27: 0x102f66b90 - salsa::function::sync::ClaimGuard::transfer_to::h520bf794386fe3fe + at /Users/micha/astral/salsa/src/function/sync.rs:251:5 + 28: 0x102ad62dc - salsa::function::execute::>::execute::hbbba44ec316e5fb0 + at /Users/micha/astral/salsa/src/function/execute.rs:142:25 + 29: 0x102a55840 - salsa::function::fetch::>::fetch_cold::h13ddde2089e39769 + at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 + 30: 0x102a62b0c - salsa::function::fetch::>::fetch_cold_with_retry::h5e637465ac7ae2a8 + at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 + 31: 0x102b9957c - salsa::function::fetch::>::refresh_memo::{{closure}}::hf0f240cdd97cc72e + at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 + 32: 0x102bbb028 - core::option::Option::or_else::hac9e8fa26e9723e1 + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 + 33: 0x102a6c298 - salsa::function::fetch::>::refresh_memo::h1fdabb30fcd2c5b2 + at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 + 34: 0x102a6c298 - salsa::function::fetch::>::fetch::h25e191e18b833040 + at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 + 35: 0x102987448 - parallel::cycle_nested_deep::query_d::{{closure}}::h40a86cf268db705c + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 + 36: 0x102b62e54 - salsa::attach::Attached::attach::h69191faec846a3fc + at /Users/micha/astral/salsa/src/attach.rs:79:9 + 37: 0x102b6235c - salsa::attach::attach::{{closure}}::hac06f2d00674a955 + at /Users/micha/astral/salsa/src/attach.rs:135:15 + 38: 0x102b6ca60 - shuttle::thread::LocalKey::try_with::h23eb8439fc5ff46f + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 + 39: 0x102b6c204 - shuttle::thread::LocalKey::with::hde64f90422733f82 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 + 40: 0x102b612b0 - salsa::attach::attach::h00532bb5d035a688 + at /Users/micha/astral/salsa/src/attach.rs:133:14 + 41: 0x102ba53ec - parallel::cycle_nested_deep::query_d::h997bd2d53da3594c + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 + 42: 0x102ba4e88 - ::execute::inner_::h58c10d33ef43991d + at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:33:19 + 43: 0x102ba4e54 - ::execute::h4ba5f8612d9f040b + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:302:21 + 44: 0x102a85ab0 - salsa::function::execute::>::execute_query::he7b6a65f93890411 + at /Users/micha/astral/salsa/src/function/execute.rs:496:25 + 45: 0x102abdce8 - salsa::function::execute::>::execute_maybe_iterate::hd72c44ddb4226f70 + at /Users/micha/astral/salsa/src/function/execute.rs:193:17 + 46: 0x102acd744 - salsa::function::execute::>::execute::h0882cee98ff091df + at /Users/micha/astral/salsa/src/function/execute.rs:103:53 + 47: 0x102a5ea58 - salsa::function::fetch::>::fetch_cold::hd9d2da53a9b166e6 + at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 + 48: 0x102a626e4 - salsa::function::fetch::>::fetch_cold_with_retry::h5d19a0f97480a716 + at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 + 49: 0x102b9927c - salsa::function::fetch::>::refresh_memo::{{closure}}::hb6baaea8795ff477 + at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 + 50: 0x102bbbbac - core::option::Option::or_else::he0e7092c8ed529bd + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 + 51: 0x102a76f0c - salsa::function::fetch::>::refresh_memo::h80c760b58987550d + at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 + 52: 0x102a76f0c - salsa::function::fetch::>::fetch::h9635af1f41ec360a + at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 + 53: 0x10298732c - parallel::cycle_nested_deep::query_c::{{closure}}::h743db327b9af3447 + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 + 54: 0x102b6313c - salsa::attach::Attached::attach::h7b6e6af58a9552b1 + at /Users/micha/astral/salsa/src/attach.rs:79:9 + 55: 0x102b623e0 - salsa::attach::attach::{{closure}}::hb159059fba641f87 + at /Users/micha/astral/salsa/src/attach.rs:135:15 + 56: 0x102b6de88 - shuttle::thread::LocalKey::try_with::ha92a9a65177f0880 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 + 57: 0x102b6bd04 - shuttle::thread::LocalKey::with::h44b7eb74a13397d5 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 + 58: 0x102b61bd8 - salsa::attach::attach::hda9db5db82d6abc9 + at /Users/micha/astral/salsa/src/attach.rs:133:14 + 59: 0x102ba4da8 - parallel::cycle_nested_deep::query_c::h3b02265a48f48928 + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 + 60: 0x102ba4884 - ::execute::inner_::h6b773554e16a34a0 + at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:27:19 + 61: 0x102ba4858 - ::execute::h8910c585dfff7dbe + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:302:21 + 62: 0x102a83a00 - salsa::function::execute::>::execute_query::h82bd71c3502c9225 + at /Users/micha/astral/salsa/src/function/execute.rs:496:25 + 63: 0x102ac0544 - salsa::function::execute::>::execute_maybe_iterate::he0b0c27b8f519e40 + at /Users/micha/astral/salsa/src/function/execute.rs:193:17 + 64: 0x102ad808c - salsa::function::execute::>::execute::hcfc0be9d4576d96c + at /Users/micha/astral/salsa/src/function/execute.rs:103:53 + 65: 0x102a59e60 - salsa::function::fetch::>::fetch_cold::h86d35ed713468dc1 + at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 + 66: 0x102a5f930 - salsa::function::fetch::>::fetch_cold_with_retry::h024f4d8589d1f42e + at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 + 67: 0x102b98efc - salsa::function::fetch::>::refresh_memo::{{closure}}::h0b3e80e3e74d2ab2 + at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 + 68: 0x102bb9628 - core::option::Option::or_else::h26aedad5b3c440b3 + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 + 69: 0x102a761c8 - salsa::function::fetch::>::refresh_memo::h27ce91968b58898c + at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 + 70: 0x102a761c8 - salsa::function::fetch::>::fetch::h95a0616354aa7fe5 + at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 + 71: 0x102987210 - parallel::cycle_nested_deep::query_b::{{closure}}::h6153b3e16f6f308d + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 + 72: 0x102b62b6c - salsa::attach::Attached::attach::h366c7b2a5dcaaf40 + at /Users/micha/astral/salsa/src/attach.rs:79:9 + 73: 0x102b622d8 - salsa::attach::attach::{{closure}}::h964dbf8d43a5e190 + at /Users/micha/astral/salsa/src/attach.rs:135:15 + 74: 0x102b6e0ec - shuttle::thread::LocalKey::try_with::hb1cec56c147d7185 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 + 75: 0x102b6be84 - shuttle::thread::LocalKey::with::h73d1f6115f9ab512 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 + 76: 0x102b61920 - salsa::attach::attach::h93d8fca84ebe8ce4 + at /Users/micha/astral/salsa/src/attach.rs:133:14 + 77: 0x102ba47ac - parallel::cycle_nested_deep::query_b::h32a9c0dd0173785e + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 + 78: 0x102ba42b8 - ::execute::inner_::he1bf9a9d0d70d65c + at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:22:5 + 79: 0x102ba428c - ::execute::hb8e48c7382c38d64 + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:302:21 + 80: 0x102a82368 - salsa::function::execute::>::execute_query::h05c8c4ff8708fa62 + at /Users/micha/astral/salsa/src/function/execute.rs:496:25 + 81: 0x102ab63c4 - salsa::function::execute::>::execute_maybe_iterate::hca1607161050f543 + at /Users/micha/astral/salsa/src/function/execute.rs:193:17 + 82: 0x102ad1a54 - salsa::function::execute::>::execute::h5653235854bec89a + at /Users/micha/astral/salsa/src/function/execute.rs:103:53 + 83: 0x102a5c748 - salsa::function::fetch::>::fetch_cold::hbc17d262a9d424a9 + at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 + 84: 0x102a658c0 - salsa::function::fetch::>::fetch_cold_with_retry::hd7906d66d64c34fc + at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 + 85: 0x102b993fc - salsa::function::fetch::>::refresh_memo::{{closure}}::hdc8bdbb7481cf939 + at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 + 86: 0x102bb94a0 - core::option::Option::or_else::h15945e711ff49ebb + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 + 87: 0x102a739fc - salsa::function::fetch::>::refresh_memo::h349052041dfaba5e + at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 + 88: 0x102a739fc - salsa::function::fetch::>::fetch::h728018c20576397e + at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 + 89: 0x1029870f4 - parallel::cycle_nested_deep::query_a::{{closure}}::ha800b79161cac59a + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 + 90: 0x102b63ce8 - salsa::attach::Attached::attach::he620d3afe8a50323 + at /Users/micha/astral/salsa/src/attach.rs:79:9 + 91: 0x102b61e40 - salsa::attach::attach::{{closure}}::h20e17dcbdc3a27b2 + at /Users/micha/astral/salsa/src/attach.rs:135:15 + 92: 0x102b6e6d4 - shuttle::thread::LocalKey::try_with::hf3c4ce4a35f86079 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 + 93: 0x102b6be44 - shuttle::thread::LocalKey::with::h5bd597852a578503 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 + 94: 0x102b61ce4 - salsa::attach::attach::hf42685a6fc5d40f0 + at /Users/micha/astral/salsa/src/attach.rs:133:14 + 95: 0x102ba41e0 - parallel::cycle_nested_deep::query_a::h201ed8e9c820a879 + at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 + 96: 0x102988750 - parallel::cycle_nested_deep::the_test::{{closure}}::{{closure}}::hbf2d3e584c9b4e22 + at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:74:26 + 97: 0x102b7d8d0 - shuttle::thread::thread_fn::h31e827a0fc773584 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:201:15 + 98: 0x102b76d70 - shuttle::thread::spawn_named_unchecked::{{closure}}::heedde6390fb24d2c + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:175:53 + 99: 0x102d722fc - core::ops::function::FnOnce::call_once{{vtable.shim}}::h96b74a75ee21dfbb + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/ops/function.rs:253:5 + 100: 0x102ff23d8 - as core::ops::function::FnOnce>::call_once::hbca787c54f965c07 + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/alloc/src/boxed.rs:1971:9 + 101: 0x102feebd0 - shuttle::runtime::thread::continuation::Continuation::new::{{closure}}::h01859c82a029cf19 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/runtime/thread/continuation.rs:87:21 + 102: 0x102feab4c - generator::gen_impl::GeneratorImpl::init_code::{{closure}}::h927832c2239e7fd9 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/gen_impl.rs:357:21 + 103: 0x103013914 - generator::stack::StackBox::call_once::hdd06bfd95f3ee05c + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/stack/mod.rs:137:13 + 104: 0x103017fbc - generator::stack::Func::call_once::hec563a107185d2b2 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/stack/mod.rs:119:9 + 105: 0x10301b4ec - generator::detail::gen::gen_init_impl::{{closure}}::h76b877cffa1d3211 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:35:14 + 106: 0x103019818 - core::ops::function::FnOnce::call_once::h7d40e201be73c99b + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/ops/function.rs:253:5 + 107: 0x103015bcc - std::panicking::catch_unwind::do_call::h8d597d3211a2c793 + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/std/src/panicking.rs:589:40 + 108: 0x10301c1f8 - ___rust_try + 109: 0x103019104 - std::panicking::catch_unwind::h377e053cf722b98d + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/std/src/panicking.rs:552:19 + 110: 0x103019104 - std::panic::catch_unwind::ha1b33f20c29c2bc6 + at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/std/src/panic.rs:359:14 + 111: 0x10301b27c - generator::detail::gen::catch_unwind_filter::heff4f697a497fc75 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:23:5 + 112: 0x10301b3e4 - generator::detail::gen::gen_init_impl::h25bd90fa33700d57 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:50:25 + 113: 0x10301b134 - generator::detail::asm::gen_init::h7e777eabb9be3c19 + at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/aarch64_unix.rs:18:5 + + thread 'cycle_nested_deep::the_test' panicked at library/core/src/panicking.rs:233:5: + panic in a destructor during cleanup + thread caused non-unwinding panic. aborting. + + (test aborted with signal 6: SIGABRT) + + Cancelling due to test failure +──────────── + Summary [ 0.332s] 1 test run: 0 passed, 1 failed, 8 skipped + SIGABRT [ 0.331s] salsa::parallel cycle_nested_deep::the_test +error: test run failed diff --git a/src/function/execute.rs b/src/function/execute.rs index 85cb270b2..6dafa21a2 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -234,8 +234,12 @@ where // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { + let new_owner = cycle_heads + .iter() + .next() + .map(|head| head.database_key_index); completed_query.revisions.set_cycle_heads(cycle_heads); - break (new_value, completed_query, None); + break (new_value, completed_query, new_owner); } let last_provisional_value = if let Some(last_provisional) = previous_memo { diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 54e02243f..78e418942 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -486,6 +486,9 @@ where return false; } } + TryClaimHeadsResult::Running(running) => { + running.block_on(&mut cycle_heads_iter); + } TryClaimHeadsResult::Available(available_cycle_head) => { // Check the cycle heads recursively // if available_cycle_head.is_nested(zalsa) { diff --git a/src/function/memo.rs b/src/function/memo.rs index 55d28c23e..566f56321 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -540,7 +540,7 @@ pub(super) struct RunningCycleHead<'me> { } impl<'a> RunningCycleHead<'a> { - fn block_on(self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { + pub(crate) fn block_on(self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { let database_key_index = self.inner.database_key(); let key_index = database_key_index.key_index(); self.inner.block_on(cycle_heads.zalsa); diff --git a/src/function/sync.rs b/src/function/sync.rs index 67b2b018c..4256b90b3 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -68,9 +68,9 @@ impl SyncTable { .. } = syncs.remove(&key_index).expect("key claimed twice?"); - if !anyone_waiting { - return; - } + // if !anyone_waiting { + // return; + // } let database_key = DatabaseKeyIndex::new(self.ingredient, key_index); let wait_result = if thread::panicking() { @@ -84,9 +84,9 @@ impl SyncTable { .runtime() .unblock_queries_blocked_on(database_key, wait_result); - if !is_transfer_target { - return; - } + // if !is_transfer_target { + // return; + // } let transferred_dependents = zalsa.runtime().take_transferred_dependents(database_key); diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index ba6137798..51acc2d3f 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -157,12 +157,18 @@ impl DependencyGraph { .get(&database_key_index) .expect("transfered thread id not found"); - if *thread_id == thread::current().id() { + let current_id = thread::current().id(); + if *thread_id == thread::current().id() || self.depends_on(*thread_id, current_id) { if claim { if let Some(dependents) = self.transfered_dependents.get_mut(parent) { if let Some(index) = dependents.iter().position(|key| *key == database_key_index) { + tracing::debug!( + "Remove transfered dependent {:?} from {:?}", + database_key_index, + parent + ); dependents.swap_remove(index); } } @@ -179,29 +185,39 @@ impl DependencyGraph { new_owner: DatabaseKeyIndex, owning_thread: ThreadId, ) { - let dependents = match self.transfered.entry(query) { + match self.transfered.entry(query) { std::collections::hash_map::Entry::Vacant(entry) => { entry.insert((owning_thread, new_owner)); - None } - std::collections::hash_map::Entry::Occupied(mut entry) => { - let current_owner = entry.get().1; + std::collections::hash_map::Entry::Occupied(entry) => { + // This sucks, because we no longer know which sub locks we transferred in a previous iteration. + // *entry.get_mut() = (owning_thread, new_owner); - - self.transfered_dependents.remove(¤t_owner) } - } - .unwrap_or_default(); + }; + + let transitive_dependents = self + .transfered_dependents + .remove(&query) + .unwrap_or_default(); + + tracing::debug!( + "transitive_dependents of query {query:?}: {:?}", + transitive_dependents + ); - let all_dependents = self.transfered_dependents.entry(new_owner).or_default(); + let all_dependents = self.transfered_dependents.entry(query).or_default(); + all_dependents.push(query); - for entry in &dependents { + for entry in &transitive_dependents { + tracing::debug!("Transferring transitive dependent {entry:?} to {new_owner:?}"); *self.transfered.get_mut(entry).unwrap() = (owning_thread, new_owner); all_dependents.push(*entry); } + tracing::debug!("all dependents after transfer: {:?}", all_dependents); - tracing::debug!("Unblocking dependents of query {query:?}"); - for dependent in dependents { + tracing::debug!("Unblocking transitive dependents of query {query:?}"); + for dependent in transitive_dependents { self.unblock_runtimes_blocked_on(dependent, WaitResult::Completed); } } From 202fe551796df3b70710672df2afbe818b85d442 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 4 Oct 2025 09:19:21 +0200 Subject: [PATCH 06/45] More in progress work --- log.txt | 1881 --------------------------- src/function/execute.rs | 114 +- src/function/fetch.rs | 9 +- src/function/maybe_changed_after.rs | 9 +- src/function/sync.rs | 263 ++-- src/ingredient.rs | 1 + src/key.rs | 2 +- src/runtime.rs | 57 +- src/runtime/dependency_graph.rs | 194 ++- 9 files changed, 406 insertions(+), 2124 deletions(-) delete mode 100644 log.txt diff --git a/log.txt b/log.txt deleted file mode 100644 index d0fdf3021..000000000 --- a/log.txt +++ /dev/null @@ -1,1881 +0,0 @@ - Compiling salsa v0.23.0 (/Users/micha/astral/salsa) -warning: unreachable pattern - --> src/function/maybe_changed_after.rs:501:54 - | -489 | TryClaimHeadsResult::Running(running) => { - | ------------------------------------- matches all the relevant values -... -501 | TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no value can reach this - | - = note: `#[warn(unreachable_patterns)]` on by default - -warning: unused variable: `available_cycle_head` - --> src/function/maybe_changed_after.rs:492:52 - | -492 | TryClaimHeadsResult::Available(available_cycle_head) => { - | ^^^^^^^^^^^^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_available_cycle_head` - | - = note: `#[warn(unused_variables)]` on by default - -warning: unused variable: `available` - --> src/function/memo.rs:203:52 - | -203 | TryClaimHeadsResult::Available(available) => { - | ^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_available` - -warning: unused variable: `anyone_waiting` - --> src/function/sync.rs:66:13 - | -66 | anyone_waiting, - | ^^^^^^^^^^^^^^- - | | - | help: try removing the field - -warning: unused variable: `is_transfer_target` - --> src/function/sync.rs:67:13 - | -67 | is_transfer_target, - | ^^^^^^^^^^^^^^^^^^- - | | - | help: try removing the field - -warning: type `SyncTable` is more private than the item ` as Ingredient>::sync_table` - --> src/function.rs:395:5 - | -395 | fn sync_table(&self) -> &SyncTable { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ method ` as Ingredient>::sync_table` is reachable at visibility `pub` - | -note: but type `SyncTable` is only usable at visibility `pub(crate)` - --> src/function/sync.rs:14:1 - | - 14 | pub(crate) struct SyncTable { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: `#[warn(private_interfaces)]` on by default - -warning: type `SyncTable` is more private than the item `Ingredient::sync_table` - --> src/ingredient.rs:102:5 - | -102 | fn sync_table(&self) -> &crate::function::SyncTable { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ method `Ingredient::sync_table` is reachable at visibility `pub` - | -note: but type `SyncTable` is only usable at visibility `pub(crate)` - --> src/function/sync.rs:14:1 - | - 14 | pub(crate) struct SyncTable { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -warning: method `nested` is never used - --> src/cycle.rs:421:19 - | -420 | impl ProvisionalStatus { - | ---------------------- method in this implementation -421 | pub(crate) fn nested(&self) -> bool { - | ^^^^^^ - | - = note: `#[warn(dead_code)]` on by default - -warning: method `try_claim_heads` is never used - --> src/function/memo.rs:228:19 - | -101 | impl<'db, C: Configuration> Memo<'db, C> { - | ---------------------------------------- method in this implementation -... -228 | pub(super) fn try_claim_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool { - | ^^^^^^^^^^^^^^^ - -warning: fields `database_key_index` and `ingredient` are never read - --> src/function/memo.rs:555:5 - | -554 | pub(super) struct AvailableCycleHead<'me> { - | ------------------ fields in this struct -555 | database_key_index: DatabaseKeyIndex, - | ^^^^^^^^^^^^^^^^^^ -556 | _guard: ClaimGuard<'me>, -557 | ingredient: &'me dyn Ingredient, - | ^^^^^^^^^^ - -warning: methods `is_nested` and `queue_cycle_heads` are never used - --> src/function/memo.rs:561:19 - | -560 | impl<'a> AvailableCycleHead<'a> { - | ------------------------------- methods in this implementation -561 | pub(super) fn is_nested(&self, zalsa: &Zalsa) -> bool { - | ^^^^^^^^^ -... -567 | pub(super) fn queue_cycle_heads(&self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { - | ^^^^^^^^^^^^^^^^^ - -warning: method `is_transferred` is never used - --> src/function/sync.rs:188:14 - | -187 | impl OwnerId { - | ------------ method in this implementation -188 | const fn is_transferred(&self) -> bool { - | ^^^^^^^^^^^^^^ - -warning: `salsa` (lib) generated 12 warnings (run `cargo fix --lib -p salsa` to apply 2 suggestions) - Finished `test` profile [unoptimized + debuginfo] target(s) in 1.42s -──────────── - Nextest run ID 25a7d6b1-a9dd-486f-9846-2680591f3411 with nextest profile: default - Starting 1 test across 1 binary (8 tests skipped) - SIGABRT [ 0.331s] salsa::parallel cycle_nested_deep::the_test - stdout ─── - - running 1 test - 2025-10-02T12:12:04.732025Z DEBUG parallel::cycle_nested_deep: Starting new run - 2025-10-02T12:12:04.733661Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}: salsa::zalsa_local: report_tracked_read(input=query_a::interned_arguments(Id(0)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.733840Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::function::execute: query_a(Id(0)): executing query - 2025-10-02T12:12:04.734004Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::zalsa_local: report_tracked_read(input=query_b::interned_arguments(Id(400)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.734099Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): executing query - 2025-10-02T12:12:04.734224Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.734315Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): executing query - 2025-10-02T12:12:04.734440Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_d::interned_arguments(Id(c00)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.734543Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): executing query - 2025-10-02T12:12:04.734611Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.734678Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::fetch: hit cycle at query_c(Id(800)), inserting and returning fixpoint initial value - 2025-10-02T12:12:04.734769Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.734802Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.734933Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): execute_maybe_iterate: result.revisions = QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - } - 2025-10-02T12:12:04.735005Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Transferring ownership of query_d(Id(c00)) to query_c(Id(800)) - 2025-10-02T12:12:04.735038Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::runtime::dependency_graph: transitive_dependents of query query_d(Id(c00)): [] - 2025-10-02T12:12:04.735062Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_d(Id(c00))] - 2025-10-02T12:12:04.735074Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_d(Id(c00)) - 2025-10-02T12:12:04.735089Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Acquired lock on syncs - 2025-10-02T12:12:04.735100Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Transfer ownership completed - 2025-10-02T12:12:04.735136Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.735155Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::zalsa_local: report_tracked_read(input=query_d(Id(c00)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.735254Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_e::interned_arguments(Id(1000)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.735315Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::execute: query_e(Id(1000)): executing query - 2025-10-02T12:12:04.735373Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.735434Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: FixpointInitial, - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.735510Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: FixpointInitial, - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.735574Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::function::fetch: hit cycle at query_c(Id(800)), returning last provisional value: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: FixpointInitial, - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - } - 2025-10-02T12:12:04.735636Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.735656Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.735755Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::execute: query_e(Id(1000)): execute_maybe_iterate: result.revisions = QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - } - 2025-10-02T12:12:04.735818Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::sync: Transferring ownership of query_e(Id(1000)) to query_c(Id(800)) - 2025-10-02T12:12:04.735847Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::runtime::dependency_graph: transitive_dependents of query query_e(Id(1000)): [] - 2025-10-02T12:12:04.735857Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_e(Id(1000))] - 2025-10-02T12:12:04.735869Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_e(Id(1000)) - 2025-10-02T12:12:04.735884Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::sync: Acquired lock on syncs - 2025-10-02T12:12:04.735893Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::function::sync: Transfer ownership completed - 2025-10-02T12:12:04.735924Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.735942Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_e(Id(1000))}: salsa::zalsa_local: report_tracked_read(input=query_e(Id(1000)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.735992Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_b::interned_arguments(Id(400)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.736071Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_b(Id(400))}: salsa::function::fetch: hit cycle at query_b(Id(400)), inserting and returning fixpoint initial value - 2025-10-02T12:12:04.736133Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_b(Id(400)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.736151Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_b(Id(400)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.736202Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_a::interned_arguments(Id(0)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.736265Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_a(Id(0))}: salsa::function::fetch: hit cycle at query_a(Id(0)), inserting and returning fixpoint initial value - 2025-10-02T12:12:04.736322Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_a(Id(0))}:block_on_heads: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.736340Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_a(Id(0))}: salsa::zalsa_local: report_tracked_read(input=query_a(Id(0)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.736482Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): execute: I am a cycle head, comparing last provisional value with new value - 2025-10-02T12:12:04.736513Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: Detected nested cycle query_c(Id(800)), iterate it as part of the outer cycle query_b(Id(400)) - 2025-10-02T12:12:04.736533Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): execute_maybe_iterate: result.revisions = QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - } - 2025-10-02T12:12:04.736649Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Transferring ownership of query_c(Id(800)) to query_b(Id(400)) - 2025-10-02T12:12:04.736696Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: transitive_dependents of query query_c(Id(800)): [] - 2025-10-02T12:12:04.736712Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_c(Id(800))] - 2025-10-02T12:12:04.736725Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_c(Id(800)) - 2025-10-02T12:12:04.736741Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Acquired lock on syncs - 2025-10-02T12:12:04.736751Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Wake up blocked threads after transferring ownership to query_b(Id(400)) - 2025-10-02T12:12:04.736768Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::runtime::dependency_graph: Unblocking runtimes blocked on query_c(Id(800)) with wait result Completed - 2025-10-02T12:12:04.736783Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::sync: Transfer ownership completed - 2025-10-02T12:12:04.736839Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.736853Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_b(Id(400)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.736923Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle - 2025-10-02T12:12:04.736944Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.737149Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): execute: I am a cycle head, comparing last provisional value with new value - 2025-10-02T12:12:04.737195Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: Detected nested cycle query_b(Id(400)), iterate it as part of the outer cycle query_a(Id(0)) - 2025-10-02T12:12:04.737233Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): execute_maybe_iterate: result.revisions = QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: true, - }, - ), - ), - } - 2025-10-02T12:12:04.737325Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Transferring ownership of query_b(Id(400)) to query_a(Id(0)) - 2025-10-02T12:12:04.737353Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: transitive_dependents of query query_b(Id(400)): [] - 2025-10-02T12:12:04.737364Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: all dependents after transfer: [query_b(Id(400))] - 2025-10-02T12:12:04.737372Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: Unblocking transitive dependents of query query_b(Id(400)) - 2025-10-02T12:12:04.737385Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Acquired lock on syncs - 2025-10-02T12:12:04.737394Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Wake up blocked threads after transferring ownership to query_a(Id(0)) - 2025-10-02T12:12:04.737409Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::runtime::dependency_graph: Unblocking runtimes blocked on query_b(Id(400)) with wait result Completed - 2025-10-02T12:12:04.737423Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::sync: Transfer ownership completed - 2025-10-02T12:12:04.737467Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.737521Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_b(Id(400)) results in a cycle - 2025-10-02T12:12:04.737571Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle - 2025-10-02T12:12:04.737590Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_b(Id(400)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.737777Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::function::execute: query_a(Id(0)): execute: I am a cycle head, comparing last provisional value with new value - 2025-10-02T12:12:04.737863Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::function::execute: query_a(Id(0)): execute: iterate again (IterationCount(1))... - 2025-10-02T12:12:04.737975Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}: salsa::zalsa_local: report_tracked_read(input=query_b::interned_arguments(Id(400)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.738026Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: false, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738119Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: false, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738235Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): validate_provisional(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: false, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738347Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::maybe_changed_after: query_b(Id(400)): validate_same_iteration(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: false, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738434Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.738470Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::function::execute: query_b(Id(400)): executing query - 2025-10-02T12:12:04.738550Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.738591Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738702Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738811Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): validate_provisional(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.738933Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): validate_same_iteration(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739035Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::memo: Waiting for query_a(Id(0)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.739073Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::function::execute: query_c(Id(800)): executing query - 2025-10-02T12:12:04.739154Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_d::interned_arguments(Id(c00)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.739205Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739287Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739355Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): validate_provisional(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739440Z TRACE t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::maybe_changed_after: query_d(Id(c00)): validate_same_iteration(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739508Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.739533Z INFO t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): executing query - 2025-10-02T12:12:04.739593Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::zalsa_local: report_tracked_read(input=query_c::interned_arguments(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.739635Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739746Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::maybe_changed_after: query_c(Id(800)): shallow_verify_memo(memo = Memo { - value: "Some()", - verified_at: AtomicRevision { - data: 1, - }, - revisions: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: true, - }, - ), - ), - }, - }) - 2025-10-02T12:12:04.739846Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::function::fetch: hit cycle at query_c(Id(800)), returning last provisional value: QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_d::interned_arguments(Id(c00)), - ), - Input( - query_d(Id(c00)), - ), - Input( - query_e::interned_arguments(Id(1000)), - ), - Input( - query_e(Id(1000)), - ), - Input( - query_b::interned_arguments(Id(400)), - ), - Input( - query_b(Id(400)), - ), - Input( - query_a::interned_arguments(Id(0)), - ), - Input( - query_a(Id(0)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - CycleHead { - database_key_index: query_b(Id(400)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: true, - }, - CycleHead { - database_key_index: query_a(Id(0)), - iteration_count: AtomicIterationCount( - 0, - ), - removed: true, - }, - ], - ), - iteration: AtomicIterationCount( - 1, - ), - cycle_converged: true, - nested_cycle: false, - }, - ), - ), - } - 2025-10-02T12:12:04.739947Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}:block_on_heads: salsa::function::memo: Waiting for query_c(Id(800)) results in a cycle (because it is already in the query stack) - 2025-10-02T12:12:04.739966Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}:fetch{query=query_c(Id(800))}: salsa::zalsa_local: report_tracked_read(input=query_c(Id(800)), durability=Durability(2), changed_at=R1) - 2025-10-02T12:12:04.740053Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::execute: query_d(Id(c00)): execute_maybe_iterate: result.revisions = QueryRevisions { - changed_at: R1, - durability: Durability::HIGH, - origin: Derived( - [ - Input( - query_c::interned_arguments(Id(800)), - ), - Input( - query_c(Id(800)), - ), - ], - ), - accumulated_inputs: AtomicInputAccumulatedValues( - false, - ), - verified_final: false, - extra: QueryRevisionsExtra( - Some( - QueryRevisionsExtraInner { - accumulated: AccumulatedMap { - map: [], - }, - tracked_struct_ids: [], - cycle_heads: CycleHeads( - [ - CycleHead { - database_key_index: query_c(Id(800)), - iteration_count: AtomicIterationCount( - 1, - ), - removed: false, - }, - ], - ), - iteration: AtomicIterationCount( - 0, - ), - cycle_converged: false, - nested_cycle: false, - }, - ), - ), - } - 2025-10-02T12:12:04.740132Z DEBUG t1{thread_id=ThreadId { task_id: main-thread(1) }}:fetch{query=query_a(Id(0))}:fetch{query=query_b(Id(400))}:fetch{query=query_c(Id(800))}:fetch{query=query_d(Id(c00))}: salsa::function::sync: Transferring ownership of query_d(Id(c00)) to query_c(Id(800)) - stderr ─── - WARNING: Shuttle only correctly models SeqCst atomics and treats all other Orderings as if they were SeqCst. Bugs caused by weaker orderings like Acquire may be missed. See https://docs.rs/shuttle/*/shuttle/sync/atomic/index.html#warning-about-relaxed-behaviors for details or to disable this warning. - test panicked in task 'task-1' - failing schedule: - " - 91019c0bf3908ce795e7b3842300000000000000000000000000000000000000000000000000 - 00000080aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - " - pass that string to `shuttle::replay` to replay the failure - - thread 'cycle_nested_deep::the_test' panicked at src/runtime/dependency_graph.rs:193:17: - Can't transfer the query query_d(Id(c00)) twice - note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace - - thread 'cycle_nested_deep::the_test' panicked at src/sync.rs:18:27: - called `Result::unwrap()` on an `Err` value: PoisonError { .. } - stack backtrace: - 0: 0x10305b3ec - std::backtrace_rs::backtrace::libunwind::trace::h72f4b72e0962905d - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/../../backtrace/src/backtrace/libunwind.rs:117:9 - 1: 0x10305b3ec - std::backtrace_rs::backtrace::trace_unsynchronized::hff394536698b6b10 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/../../backtrace/src/backtrace/mod.rs:66:14 - 2: 0x10305b3ec - std::sys::backtrace::_print_fmt::h64d1e3035850353e - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:66:9 - 3: 0x10305b3ec - ::fmt::hf35f9734f9a29483 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:39:26 - 4: 0x103077270 - core::fmt::rt::Argument::fmt::hedf6f2a66f855f69 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/fmt/rt.rs:173:76 - 5: 0x103077270 - core::fmt::write::h60ec6633daab7b35 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/fmt/mod.rs:1468:25 - 6: 0x103058f1c - std::io::default_write_fmt::h0e30d7b1295222cb - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/io/mod.rs:639:11 - 7: 0x103058f1c - std::io::Write::write_fmt::hc29709fdab2e34e2 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/io/mod.rs:1954:13 - 8: 0x10305b2a0 - std::sys::backtrace::BacktraceLock::print::hca95bffd78053951 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:42:9 - 9: 0x10305c8f4 - std::panicking::default_hook::{{closure}}::h357ed4fbef22679d - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:300:27 - 10: 0x10305c74c - std::panicking::default_hook::h0a4e133b151d5758 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:327:9 - 11: 0x1030179c0 - as core::ops::function::Fn>::call::hd52e5a21affa179d - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/alloc/src/boxed.rs:1985:9 - 12: 0x102ffdf94 - shuttle::runtime::failure::init_panic_hook::{{closure}}::{{closure}}::h1ab64f6baacd7740 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/runtime/failure.rs:151:13 - 13: 0x1030179c0 - as core::ops::function::Fn>::call::hd52e5a21affa179d - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/alloc/src/boxed.rs:1985:9 - 14: 0x10301b3a8 - generator::detail::gen::catch_unwind_filter::{{closure}}::{{closure}}::he34f8bc5fe676891 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:19:13 - 15: 0x10305d3d0 - as core::ops::function::Fn>::call::h5576f09b2ec107bd - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/alloc/src/boxed.rs:1985:9 - 16: 0x10305d3d0 - std::panicking::rust_panic_with_hook::h557a23724a5de839 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:841:13 - 17: 0x10305cfb0 - std::panicking::begin_panic_handler::{{closure}}::h269cace6208fef05 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:706:13 - 18: 0x10305b89c - std::sys::backtrace::__rust_end_short_backtrace::h5be0da278f3aaec7 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/sys/backtrace.rs:174:18 - 19: 0x10305cc8c - __rustc[de2ca18b4c54d5b8]::rust_begin_unwind - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/std/src/panicking.rs:697:5 - 20: 0x10316d568 - core::panicking::panic_fmt::h477ff48eff31ffa4 - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/panicking.rs:75:14 - 21: 0x10316d920 - core::result::unwrap_failed::h61c3c2f1df5908ff - at /rustc/1159e78c4747b02ef996e55082b704c09b970588/library/core/src/result.rs:1765:5 - 22: 0x102f6bd44 - core::result::Result::unwrap::h4cb36a2766f5cbbc - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/result.rs:1167:23 - 23: 0x102f6bd44 - salsa::sync::shim::Mutex::lock::h097a2902e2835134 - at /Users/micha/astral/salsa/src/sync.rs:18:27 - 24: 0x102f660d8 - salsa::function::sync::SyncTable::remove_from_map_and_unblock_queries::hcada563f686fc42d - at /Users/micha/astral/salsa/src/function/sync.rs:63:36 - 25: 0x102f82874 - ::drop::hd4c1c4d155089e2a - at /Users/micha/astral/salsa/src/function/sync.rs:258:18 - 26: 0x102f7bc80 - core::ptr::drop_in_place::h99cf3acc8c597698 - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/ptr/mod.rs:804:1 - 27: 0x102f66b90 - salsa::function::sync::ClaimGuard::transfer_to::h520bf794386fe3fe - at /Users/micha/astral/salsa/src/function/sync.rs:251:5 - 28: 0x102ad62dc - salsa::function::execute::>::execute::hbbba44ec316e5fb0 - at /Users/micha/astral/salsa/src/function/execute.rs:142:25 - 29: 0x102a55840 - salsa::function::fetch::>::fetch_cold::h13ddde2089e39769 - at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 - 30: 0x102a62b0c - salsa::function::fetch::>::fetch_cold_with_retry::h5e637465ac7ae2a8 - at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 - 31: 0x102b9957c - salsa::function::fetch::>::refresh_memo::{{closure}}::hf0f240cdd97cc72e - at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 - 32: 0x102bbb028 - core::option::Option::or_else::hac9e8fa26e9723e1 - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 - 33: 0x102a6c298 - salsa::function::fetch::>::refresh_memo::h1fdabb30fcd2c5b2 - at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 - 34: 0x102a6c298 - salsa::function::fetch::>::fetch::h25e191e18b833040 - at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 - 35: 0x102987448 - parallel::cycle_nested_deep::query_d::{{closure}}::h40a86cf268db705c - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 - 36: 0x102b62e54 - salsa::attach::Attached::attach::h69191faec846a3fc - at /Users/micha/astral/salsa/src/attach.rs:79:9 - 37: 0x102b6235c - salsa::attach::attach::{{closure}}::hac06f2d00674a955 - at /Users/micha/astral/salsa/src/attach.rs:135:15 - 38: 0x102b6ca60 - shuttle::thread::LocalKey::try_with::h23eb8439fc5ff46f - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 - 39: 0x102b6c204 - shuttle::thread::LocalKey::with::hde64f90422733f82 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 - 40: 0x102b612b0 - salsa::attach::attach::h00532bb5d035a688 - at /Users/micha/astral/salsa/src/attach.rs:133:14 - 41: 0x102ba53ec - parallel::cycle_nested_deep::query_d::h997bd2d53da3594c - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 - 42: 0x102ba4e88 - ::execute::inner_::h58c10d33ef43991d - at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:33:19 - 43: 0x102ba4e54 - ::execute::h4ba5f8612d9f040b - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:302:21 - 44: 0x102a85ab0 - salsa::function::execute::>::execute_query::he7b6a65f93890411 - at /Users/micha/astral/salsa/src/function/execute.rs:496:25 - 45: 0x102abdce8 - salsa::function::execute::>::execute_maybe_iterate::hd72c44ddb4226f70 - at /Users/micha/astral/salsa/src/function/execute.rs:193:17 - 46: 0x102acd744 - salsa::function::execute::>::execute::h0882cee98ff091df - at /Users/micha/astral/salsa/src/function/execute.rs:103:53 - 47: 0x102a5ea58 - salsa::function::fetch::>::fetch_cold::hd9d2da53a9b166e6 - at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 - 48: 0x102a626e4 - salsa::function::fetch::>::fetch_cold_with_retry::h5d19a0f97480a716 - at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 - 49: 0x102b9927c - salsa::function::fetch::>::refresh_memo::{{closure}}::hb6baaea8795ff477 - at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 - 50: 0x102bbbbac - core::option::Option::or_else::he0e7092c8ed529bd - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 - 51: 0x102a76f0c - salsa::function::fetch::>::refresh_memo::h80c760b58987550d - at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 - 52: 0x102a76f0c - salsa::function::fetch::>::fetch::h9635af1f41ec360a - at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 - 53: 0x10298732c - parallel::cycle_nested_deep::query_c::{{closure}}::h743db327b9af3447 - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 - 54: 0x102b6313c - salsa::attach::Attached::attach::h7b6e6af58a9552b1 - at /Users/micha/astral/salsa/src/attach.rs:79:9 - 55: 0x102b623e0 - salsa::attach::attach::{{closure}}::hb159059fba641f87 - at /Users/micha/astral/salsa/src/attach.rs:135:15 - 56: 0x102b6de88 - shuttle::thread::LocalKey::try_with::ha92a9a65177f0880 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 - 57: 0x102b6bd04 - shuttle::thread::LocalKey::with::h44b7eb74a13397d5 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 - 58: 0x102b61bd8 - salsa::attach::attach::hda9db5db82d6abc9 - at /Users/micha/astral/salsa/src/attach.rs:133:14 - 59: 0x102ba4da8 - parallel::cycle_nested_deep::query_c::h3b02265a48f48928 - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 - 60: 0x102ba4884 - ::execute::inner_::h6b773554e16a34a0 - at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:27:19 - 61: 0x102ba4858 - ::execute::h8910c585dfff7dbe - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:302:21 - 62: 0x102a83a00 - salsa::function::execute::>::execute_query::h82bd71c3502c9225 - at /Users/micha/astral/salsa/src/function/execute.rs:496:25 - 63: 0x102ac0544 - salsa::function::execute::>::execute_maybe_iterate::he0b0c27b8f519e40 - at /Users/micha/astral/salsa/src/function/execute.rs:193:17 - 64: 0x102ad808c - salsa::function::execute::>::execute::hcfc0be9d4576d96c - at /Users/micha/astral/salsa/src/function/execute.rs:103:53 - 65: 0x102a59e60 - salsa::function::fetch::>::fetch_cold::h86d35ed713468dc1 - at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 - 66: 0x102a5f930 - salsa::function::fetch::>::fetch_cold_with_retry::h024f4d8589d1f42e - at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 - 67: 0x102b98efc - salsa::function::fetch::>::refresh_memo::{{closure}}::h0b3e80e3e74d2ab2 - at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 - 68: 0x102bb9628 - core::option::Option::or_else::h26aedad5b3c440b3 - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 - 69: 0x102a761c8 - salsa::function::fetch::>::refresh_memo::h27ce91968b58898c - at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 - 70: 0x102a761c8 - salsa::function::fetch::>::fetch::h95a0616354aa7fe5 - at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 - 71: 0x102987210 - parallel::cycle_nested_deep::query_b::{{closure}}::h6153b3e16f6f308d - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 - 72: 0x102b62b6c - salsa::attach::Attached::attach::h366c7b2a5dcaaf40 - at /Users/micha/astral/salsa/src/attach.rs:79:9 - 73: 0x102b622d8 - salsa::attach::attach::{{closure}}::h964dbf8d43a5e190 - at /Users/micha/astral/salsa/src/attach.rs:135:15 - 74: 0x102b6e0ec - shuttle::thread::LocalKey::try_with::hb1cec56c147d7185 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 - 75: 0x102b6be84 - shuttle::thread::LocalKey::with::h73d1f6115f9ab512 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 - 76: 0x102b61920 - salsa::attach::attach::h93d8fca84ebe8ce4 - at /Users/micha/astral/salsa/src/attach.rs:133:14 - 77: 0x102ba47ac - parallel::cycle_nested_deep::query_b::h32a9c0dd0173785e - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 - 78: 0x102ba42b8 - ::execute::inner_::he1bf9a9d0d70d65c - at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:22:5 - 79: 0x102ba428c - ::execute::hb8e48c7382c38d64 - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:302:21 - 80: 0x102a82368 - salsa::function::execute::>::execute_query::h05c8c4ff8708fa62 - at /Users/micha/astral/salsa/src/function/execute.rs:496:25 - 81: 0x102ab63c4 - salsa::function::execute::>::execute_maybe_iterate::hca1607161050f543 - at /Users/micha/astral/salsa/src/function/execute.rs:193:17 - 82: 0x102ad1a54 - salsa::function::execute::>::execute::h5653235854bec89a - at /Users/micha/astral/salsa/src/function/execute.rs:103:53 - 83: 0x102a5c748 - salsa::function::fetch::>::fetch_cold::hbc17d262a9d424a9 - at /Users/micha/astral/salsa/src/function/fetch.rs:235:25 - 84: 0x102a658c0 - salsa::function::fetch::>::fetch_cold_with_retry::hd7906d66d64c34fc - at /Users/micha/astral/salsa/src/function/fetch.rs:107:25 - 85: 0x102b993fc - salsa::function::fetch::>::refresh_memo::{{closure}}::hdc8bdbb7481cf939 - at /Users/micha/astral/salsa/src/function/fetch.rs:64:26 - 86: 0x102bb94a0 - core::option::Option::or_else::h15945e711ff49ebb - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/option.rs:1647:21 - 87: 0x102a739fc - salsa::function::fetch::>::refresh_memo::h349052041dfaba5e - at /Users/micha/astral/salsa/src/function/fetch.rs:63:18 - 88: 0x102a739fc - salsa::function::fetch::>::fetch::h728018c20576397e - at /Users/micha/astral/salsa/src/function/fetch.rs:30:25 - 89: 0x1029870f4 - parallel::cycle_nested_deep::query_a::{{closure}}::ha800b79161cac59a - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:474:72 - 90: 0x102b63ce8 - salsa::attach::Attached::attach::he620d3afe8a50323 - at /Users/micha/astral/salsa/src/attach.rs:79:9 - 91: 0x102b61e40 - salsa::attach::attach::{{closure}}::h20e17dcbdc3a27b2 - at /Users/micha/astral/salsa/src/attach.rs:135:15 - 92: 0x102b6e6d4 - shuttle::thread::LocalKey::try_with::hf3c4ce4a35f86079 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:445:12 - 93: 0x102b6be44 - shuttle::thread::LocalKey::with::h5bd597852a578503 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:420:14 - 94: 0x102b61ce4 - salsa::attach::attach::hf42685a6fc5d40f0 - at /Users/micha/astral/salsa/src/attach.rs:133:14 - 95: 0x102ba41e0 - parallel::cycle_nested_deep::query_a::h201ed8e9c820a879 - at /Users/micha/astral/salsa/components/salsa-macro-rules/src/setup_tracked_fn.rs:468:13 - 96: 0x102988750 - parallel::cycle_nested_deep::the_test::{{closure}}::{{closure}}::hbf2d3e584c9b4e22 - at /Users/micha/astral/salsa/tests/parallel/cycle_nested_deep.rs:74:26 - 97: 0x102b7d8d0 - shuttle::thread::thread_fn::h31e827a0fc773584 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:201:15 - 98: 0x102b76d70 - shuttle::thread::spawn_named_unchecked::{{closure}}::heedde6390fb24d2c - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/thread.rs:175:53 - 99: 0x102d722fc - core::ops::function::FnOnce::call_once{{vtable.shim}}::h96b74a75ee21dfbb - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/ops/function.rs:253:5 - 100: 0x102ff23d8 - as core::ops::function::FnOnce>::call_once::hbca787c54f965c07 - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/alloc/src/boxed.rs:1971:9 - 101: 0x102feebd0 - shuttle::runtime::thread::continuation::Continuation::new::{{closure}}::h01859c82a029cf19 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/shuttle-0.8.1/src/runtime/thread/continuation.rs:87:21 - 102: 0x102feab4c - generator::gen_impl::GeneratorImpl::init_code::{{closure}}::h927832c2239e7fd9 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/gen_impl.rs:357:21 - 103: 0x103013914 - generator::stack::StackBox::call_once::hdd06bfd95f3ee05c - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/stack/mod.rs:137:13 - 104: 0x103017fbc - generator::stack::Func::call_once::hec563a107185d2b2 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/stack/mod.rs:119:9 - 105: 0x10301b4ec - generator::detail::gen::gen_init_impl::{{closure}}::h76b877cffa1d3211 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:35:14 - 106: 0x103019818 - core::ops::function::FnOnce::call_once::h7d40e201be73c99b - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/core/src/ops/function.rs:253:5 - 107: 0x103015bcc - std::panicking::catch_unwind::do_call::h8d597d3211a2c793 - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/std/src/panicking.rs:589:40 - 108: 0x10301c1f8 - ___rust_try - 109: 0x103019104 - std::panicking::catch_unwind::h377e053cf722b98d - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/std/src/panicking.rs:552:19 - 110: 0x103019104 - std::panic::catch_unwind::ha1b33f20c29c2bc6 - at /Users/micha/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/src/rust/library/std/src/panic.rs:359:14 - 111: 0x10301b27c - generator::detail::gen::catch_unwind_filter::heff4f697a497fc75 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:23:5 - 112: 0x10301b3e4 - generator::detail::gen::gen_init_impl::h25bd90fa33700d57 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/gen.rs:50:25 - 113: 0x10301b134 - generator::detail::asm::gen_init::h7e777eabb9be3c19 - at /Users/micha/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.8.7/src/detail/aarch64_unix.rs:18:5 - - thread 'cycle_nested_deep::the_test' panicked at library/core/src/panicking.rs:233:5: - panic in a destructor during cleanup - thread caused non-unwinding panic. aborting. - - (test aborted with signal 6: SIGABRT) - - Cancelling due to test failure -──────────── - Summary [ 0.332s] 1 test run: 0 passed, 1 failed, 8 skipped - SIGABRT [ 0.331s] salsa::parallel cycle_nested_deep::the_test -error: test run failed diff --git a/src/function/execute.rs b/src/function/execute.rs index 6dafa21a2..e0ef90410 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -3,6 +3,7 @@ use smallvec::SmallVec; use crate::active_query::CompletedQuery; use crate::cycle::{CycleRecoveryStrategy, IterationCount}; use crate::function::memo::Memo; +use crate::function::sync::ReleaseMode; use crate::function::{ClaimGuard, Configuration, IngredientImpl}; use crate::ingredient::WaitForResult; use crate::plumbing::ZalsaLocal; @@ -29,12 +30,13 @@ where pub(super) fn execute<'db>( &'db self, db: &'db C::DbView, - zalsa: &'db Zalsa, + mut claim_guard: ClaimGuard<'db>, zalsa_local: &'db ZalsaLocal, - database_key_index: DatabaseKeyIndex, opt_old_memo: Option<&Memo<'db, C>>, - claim_guard: ClaimGuard, ) -> &'db Memo<'db, C> { + let database_key_index = claim_guard.database_key_index(); + let zalsa = claim_guard.zalsa(); + let id = database_key_index.key_index(); let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); @@ -46,16 +48,13 @@ where }) }); - let (new_value, mut completed_query, new_lock_owner) = match C::CYCLE_STRATEGY { - CycleRecoveryStrategy::Panic => { - let (new_value, completed_query) = Self::execute_query( - db, - zalsa, - zalsa_local.push_query(database_key_index, IterationCount::initial()), - opt_old_memo, - ); - (new_value, completed_query, None) - } + let (new_value, mut completed_query) = match C::CYCLE_STRATEGY { + CycleRecoveryStrategy::Panic => Self::execute_query( + db, + zalsa, + zalsa_local.push_query(database_key_index, IterationCount::initial()), + opt_old_memo, + ), CycleRecoveryStrategy::FallbackImmediate => { let (mut new_value, mut completed_query) = Self::execute_query( db, @@ -98,14 +97,13 @@ where completed_query.revisions.verified_final = AtomicBool::new(false); } - (new_value, completed_query, None) + (new_value, completed_query) } CycleRecoveryStrategy::Fixpoint => self.execute_maybe_iterate( db, opt_old_memo, - zalsa, + &mut claim_guard, zalsa_local, - database_key_index, memo_ingredient_index, ), }; @@ -138,10 +136,6 @@ where memo_ingredient_index, ); - if let Some(new_lock_owner) = new_lock_owner { - claim_guard.transfer_to(new_lock_owner); - } - new_memo } @@ -149,11 +143,13 @@ where &'db self, db: &'db C::DbView, opt_old_memo: Option<&Memo<'db, C>>, - zalsa: &'db Zalsa, + claim_guard: &mut ClaimGuard<'db>, zalsa_local: &'db ZalsaLocal, - database_key_index: DatabaseKeyIndex, memo_ingredient_index: MemoIngredientIndex, - ) -> (C::Output<'db>, CompletedQuery, Option) { + ) -> (C::Output<'db>, CompletedQuery) { + let database_key_index = claim_guard.database_key_index(); + let zalsa = claim_guard.zalsa(); + let id = database_key_index.key_index(); // Our provisional value from the previous iteration, when doing fixpoint iteration. @@ -179,8 +175,9 @@ where } let mut active_query = zalsa_local.push_query(database_key_index, iteration_count); + claim_guard.set_release_mode(ReleaseMode::Default); - let (new_value, completed_query, new_lock_owner) = loop { + let (new_value, completed_query) = loop { // Tracked struct ids that existed in the previous revision // but weren't recreated in the last iteration. It's important that we seed the next // query with these ids because the query might re-create them as part of the next iteration. @@ -194,7 +191,7 @@ where // If there are no cycle heads, break out of the loop (`cycle_heads_mut` returns `None` if the cycle head list is empty) let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() else { - break (new_value, completed_query, None); + break (new_value, completed_query); }; // TODO: Remove "removed" cycle heads" @@ -232,14 +229,33 @@ where } } + let outer_cycle = cycle_heads + .iter() + .filter(|head| head.database_key_index != database_key_index) + .find_map(|head| { + let head_ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + let result = + head_ingredient.wait_for(zalsa, head.database_key_index.key_index()); + tracing::debug!( + "Wait for result for {:?}: {result:?} {:?}", + head.database_key_index, + result + ); + + let is_outer_cycle = matches!(result, WaitForResult::Cycle(false)); + is_outer_cycle.then_some(head.database_key_index) + }); + // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { - let new_owner = cycle_heads - .iter() - .next() - .map(|head| head.database_key_index); + if let Some(new_owner) = outer_cycle { + claim_guard.set_release_mode(ReleaseMode::TransferTo(new_owner)); + } + completed_query.revisions.set_cycle_heads(cycle_heads); - break (new_value, completed_query, new_owner); + break (new_value, completed_query); } let last_provisional_value = if let Some(last_provisional) = previous_memo { @@ -275,37 +291,6 @@ where // where claiming it results in a cycle. In that case, both queries form a single connected component // that we can iterate together rather than having separate nested fixpoint iterations. - // SAFETY: We don't enter `query_stack_unchecked` recursively. - let outer_on_stack = unsafe { - zalsa_local.with_query_stack_unchecked(|stack| { - stack - .iter() - .rev() - .filter(|query| query.database_key_index != database_key_index) - .find(|query| cycle_heads.contains(&query.database_key_index)) - .map(|query| query.database_key_index) - }) - }; - - // Prefer queries on the stack over those on other threads to take this query out of the cycle as quickly as possible. - let outer_cycle = outer_on_stack.or_else(|| { - cycle_heads - .iter() - .filter(|head| head.database_key_index != database_key_index) - .find_map(|head| { - // let on_stack = unsafe zalsa_local.with_query_stack_unchecked(| stack| stack.iter().) - - let head_ingredient = - zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - - let is_outer_cycle = matches!( - head_ingredient.wait_for(zalsa, head.database_key_index.key_index()), - WaitForResult::Cycle(false) - ); - is_outer_cycle.then_some(head.database_key_index) - }) - }); - let this_converged = C::values_equal(&new_value, last_provisional_value); iteration_count = if outer_cycle.is_some() { @@ -350,8 +335,9 @@ where ); completed_query.revisions.set_cycle_heads(cycle_heads); + claim_guard.set_release_mode(ReleaseMode::TransferTo(outer_cycle)); - break (new_value, completed_query, Some(outer_cycle)); + break (new_value, completed_query); } // Verify that all cycles have converged, including all inner cycles. @@ -392,7 +378,7 @@ where *completed_query.revisions.verified_final.get_mut() = true; - break (new_value, completed_query, None); + break (new_value, completed_query); } completed_query.revisions.set_cycle_heads(cycle_heads); @@ -464,7 +450,7 @@ where revisions = &completed_query.revisions ); - (new_value, completed_query, new_lock_owner) + (new_value, completed_query) } #[inline] diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 82e1de0ff..57ca81063 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -232,14 +232,7 @@ where } } - let memo = self.execute( - db, - zalsa, - zalsa_local, - database_key_index, - opt_old_memo, - claim_guard, - ); + let memo = self.execute(db, claim_guard, zalsa_local, opt_old_memo); Some(memo) } diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 78e418942..ace8d19f1 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -227,14 +227,7 @@ where // `in_cycle` tracks if the enclosing query is in a cycle. `deep_verify.cycle_heads` tracks // if **this query** encountered a cycle (which means there's some provisional value somewhere floating around). if old_memo.value.is_some() && !cycle_heads.has_any() { - let memo = self.execute( - db, - zalsa, - zalsa_local, - database_key_index, - Some(old_memo), - _claim_guard, - ); + let memo = self.execute(db, _claim_guard, zalsa_local, Some(old_memo)); let changed_at = memo.revisions.changed_at; // Always assume that a provisional value has changed. diff --git a/src/function/sync.rs b/src/function/sync.rs index 4256b90b3..293b5c1bc 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -44,7 +44,7 @@ impl SyncTable { } } - fn make_transfer_target(&self, key_index: Id) -> Option { + fn make_transfer_target(&self, key_index: Id, zalsa: &Zalsa) -> Option { let mut read = self.syncs.lock(); read.get_mut(&key_index).map(|state| { state.anyone_waiting = true; @@ -52,96 +52,101 @@ impl SyncTable { match state.id { OwnerId::Thread(thread_id) => thread_id, - OwnerId::Transferred => { - panic!("Can't transfer ownership to a query that has been transferred") - } + OwnerId::Transferred => zalsa + .runtime() + .resolved_transferred_thread_id(DatabaseKeyIndex::new( + self.ingredient, + key_index, + )) + .unwrap(), } }) } - fn remove_from_map_and_unblock_queries(&self, zalsa: &Zalsa, key_index: Id) { - let mut syncs = self.syncs.lock(); - - let SyncState { - anyone_waiting, - is_transfer_target, - .. - } = syncs.remove(&key_index).expect("key claimed twice?"); - - // if !anyone_waiting { - // return; - // } - - let database_key = DatabaseKeyIndex::new(self.ingredient, key_index); - let wait_result = if thread::panicking() { - tracing::info!("Unblocking queries blocked on {database_key:?} after a panick"); - WaitResult::Panicked - } else { - WaitResult::Completed - }; - - zalsa - .runtime() - .unblock_queries_blocked_on(database_key, wait_result); - - // if !is_transfer_target { - // return; - // } - - let transferred_dependents = zalsa.runtime().take_transferred_dependents(database_key); - - drop(syncs); - - for dependent in transferred_dependents { - let ingredient = zalsa.lookup_ingredient(dependent.ingredient_index()); - ingredient - .sync_table() - .remove_from_map_and_unblock_queries(zalsa, dependent.key_index()); - } - } - pub(crate) fn try_claim<'me>( &'me self, zalsa: &'me Zalsa, key_index: Id, - reentry: bool, + allow_reentry: bool, ) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { - std::collections::hash_map::Entry::Occupied(occupied_entry) => { - let &mut SyncState { - ref mut id, - ref mut anyone_waiting, - ref mut is_transfer_target, - } = occupied_entry.into_mut(); + std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + let id = occupied_entry.get().id; let id = match id { - OwnerId::Thread(id) => *id, + OwnerId::Thread(id) => id, OwnerId::Transferred => { - match zalsa.runtime().transfered_thread_id( - DatabaseKeyIndex::new(self.ingredient, key_index), - reentry, - ) { - Ok(owner_thread_id) => { - if reentry { - *id = OwnerId::Thread(owner_thread_id); - *is_transfer_target = false; - - return ClaimResult::Claimed(ClaimGuard { + let current_id = thread::current().id(); + let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); + match zalsa + .runtime() + .block_on_transferred(database_key_index, current_id) + { + Ok((current_owner, owning_thread_id)) => { + let SyncState { id, .. } = occupied_entry.into_mut(); + + return if !allow_reentry { + tracing::debug!("Claiming {database_key_index:?} results in a cycle because re-entrant lock is not allowed"); + ClaimResult::Cycle(true) + } else { + tracing::debug!("Reentrant lock {database_key_index:?}"); + *id = OwnerId::Thread(current_id); + + zalsa.runtime().remove_transferred(database_key_index); + + if owning_thread_id != current_id { + zalsa.runtime().unblock_queries_blocked_on( + database_key_index, + WaitResult::Completed, + ); + zalsa.runtime().resume_transferred_queries( + database_key_index, + WaitResult::Completed, + ); + } + + ClaimResult::Claimed(ClaimGuard { key_index, zalsa, sync_table: self, - defused: false, - }); - } else { - return ClaimResult::Cycle(true); - } + mode: ReleaseMode::TransferTo(current_owner), + }) + }; + } + // Lock is owned by another thread, wait for it to be released. + Err(Some(thread_id)) => { + tracing::debug!("Waiting for transfered lock {database_key_index:?} to be released by thread {thread_id:?}"); + thread_id + } + // Lock was transferred but is no more. Replace the entry. + Err(None) => { + tracing::debug!( + "Claiming previously transferred lock {database_key_index:?}" + ); + + // Lock was transferred but it has since then been released. + occupied_entry.insert(SyncState { + id: OwnerId::Thread(thread::current().id()), + anyone_waiting: false, + is_transfer_target: false, + }); + return ClaimResult::Claimed(ClaimGuard { + key_index, + zalsa, + sync_table: self, + mode: ReleaseMode::Default, + }); } - Err(thread_id) => thread_id, } } }; + let &mut SyncState { + ref mut anyone_waiting, + .. + } = occupied_entry.into_mut(); + // NB: `Ordering::Relaxed` is sufficient here, // as there are no loads that are "gated" on this // value. Everything that is written is also protected @@ -168,7 +173,7 @@ impl SyncTable { key_index, zalsa, sync_table: self, - defused: false, + mode: ReleaseMode::Default, }) } } @@ -185,8 +190,8 @@ enum OwnerId { } impl OwnerId { - const fn is_transferred(&self) -> bool { - matches!(self, OwnerId::Transferred) + const fn is_thread(&self) -> bool { + matches!(self, OwnerId::Thread(_)) } } @@ -197,38 +202,84 @@ pub struct ClaimGuard<'me> { key_index: Id, zalsa: &'me Zalsa, sync_table: &'me SyncTable, - defused: bool, + mode: ReleaseMode, } -impl ClaimGuard<'_> { - pub(crate) fn transfer_to(mut self, new_owner: DatabaseKeyIndex) { - // TODO: If new_owner is already transferred, redirect to its owner instead. +impl<'me> ClaimGuard<'me> { + pub(crate) const fn zalsa(&self) -> &'me Zalsa { + self.zalsa + } + + pub(crate) const fn database_key_index(&self) -> DatabaseKeyIndex { + DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index) + } + + pub(crate) fn set_release_mode(&mut self, mode: ReleaseMode) { + self.mode = mode; + } + + fn release_default(&self) { + let mut syncs = self.sync_table.syncs.lock(); + let state = syncs.remove(&self.key_index).expect("key claimed twice?"); + + let database_key_index = self.database_key_index(); + tracing::debug!("release_and_unblock({database_key_index:?})"); + + let wait_result = if thread::panicking() { + tracing::info!("Unblocking queries blocked on {database_key_index:?} after a panick"); + WaitResult::Panicked + } else { + WaitResult::Completed + }; - let self_key = DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index); - tracing::debug!("Transferring ownership of {self_key:?} to {new_owner:?}",); + let SyncState { + anyone_waiting, + is_transfer_target, + .. + } = state; + + if !anyone_waiting { + return; + } + + let runtime = self.zalsa.runtime(); + runtime.unblock_queries_blocked_on(database_key_index, wait_result); + + if is_transfer_target { + tracing::debug!("unblock transferred queries owned by {database_key_index:?}"); + runtime.unblock_transferred_queries(database_key_index, wait_result); + } + } + + #[cold] + pub(crate) fn transfer(&self, new_owner: DatabaseKeyIndex) { + let self_key = self.database_key_index(); let owner_ingredient = self.zalsa.lookup_ingredient(new_owner.ingredient_index()); // Get the owning thread of `new_owner`. let owner_sync_table = owner_ingredient.sync_table(); let owner_thread_id = owner_sync_table - .make_transfer_target(new_owner.key_index()) + .make_transfer_target(new_owner.key_index(), self.zalsa) .expect("new owner to be a locked query"); - let mut syncs = self.sync_table.syncs.lock(); + tracing::debug!( + "Transferring ownership of {self_key:?} to {new_owner:?} ({owner_thread_id:?})" + ); - // FIXME: We need to update the sync tables here? No we don't, they're still transferred. - self.zalsa - .runtime() - .transfer_lock(self_key, new_owner, owner_thread_id); + let mut syncs = self.sync_table.syncs.lock(); - tracing::debug!("Acquired lock on syncs"); + self.zalsa.runtime().transfer_lock( + self_key, + thread::current().id(), + new_owner, + owner_thread_id, + ); let SyncState { anyone_waiting, id, .. } = syncs.get_mut(&self.key_index).expect("key claimed twice?"); - // Transfer ownership *id = OwnerId::Transferred; // TODO: Do we need to wake up any threads that are awaiting any of the dependents to update the dependency graph -> I think so. @@ -246,16 +297,19 @@ impl ClaimGuard<'_> { *anyone_waiting = false; tracing::debug!("Transfer ownership completed"); - - self.defused = true; } } impl Drop for ClaimGuard<'_> { fn drop(&mut self) { - if !self.defused { - self.sync_table - .remove_from_map_and_unblock_queries(self.zalsa, self.key_index); + // TODO, what to do if thread panics? Always force release? + match self.mode { + ReleaseMode::Default => { + self.release_default(); + } + ReleaseMode::TransferTo(new_owner) => { + self.transfer(new_owner); + } } } } @@ -265,3 +319,30 @@ impl std::fmt::Debug for SyncTable { f.debug_struct("SyncTable").finish() } } + +#[derive(Copy, Clone, Debug, Default)] +pub(crate) enum ReleaseMode { + /// The default release mode. + /// + /// Releases the lock of the current query for claims that are not transferred. Queries who's ownership + /// were transferred to this query will be transitively unlocked. + /// + /// If this lock is owned by another query (because it was transferred), then releasing is a no-op. + #[default] + Default, + + /// Transfers the ownership of the lock to the specified query. + /// + /// All waiting queries will be awakened so that they can retry and block on the new owner thread. + /// The new owner thread (or any thread it blocks on) will be able to acquire the lock (reentrant). + TransferTo(DatabaseKeyIndex), +} + +impl std::fmt::Debug for ClaimGuard<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ClaimGuard") + .field("key_index", &self.key_index) + .field("mode", &self.mode) + .finish_non_exhaustive() + } +} diff --git a/src/ingredient.rs b/src/ingredient.rs index ae30c362e..6ffd0d3a0 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -317,6 +317,7 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) write!(fmt, "{debug_name}({id:?})") } +#[derive(Debug)] pub enum WaitForResult<'me> { Running(Running<'me>), Available(ClaimGuard<'me>), diff --git a/src/key.rs b/src/key.rs index 82d922565..364015756 100644 --- a/src/key.rs +++ b/src/key.rs @@ -18,7 +18,7 @@ pub struct DatabaseKeyIndex { impl DatabaseKeyIndex { #[inline] - pub(crate) fn new(ingredient_index: IngredientIndex, key_index: Id) -> Self { + pub(crate) const fn new(ingredient_index: IngredientIndex, key_index: Id) -> Self { Self { key_index, ingredient_index, diff --git a/src/runtime.rs b/src/runtime.rs index a79e3402b..070698b43 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,5 +1,3 @@ -use smallvec::SmallVec; - use self::dependency_graph::DependencyGraph; use crate::durability::Durability; use crate::function::SyncGuard; @@ -270,34 +268,67 @@ impl Runtime { .unblock_runtimes_blocked_on(database_key, wait_result); } - pub(super) fn transfered_thread_id( + #[cold] + pub(crate) fn unblock_transferred_queries( + &self, + database_key: DatabaseKeyIndex, + wait_result: WaitResult, + ) { + self.dependency_graph + .lock() + .unblock_transferred_queries(database_key, wait_result); + } + + #[cold] + pub(crate) fn resume_transferred_queries( + &self, + database_key: DatabaseKeyIndex, + wait_result: WaitResult, + ) { + self.dependency_graph + .lock() + .resume_transferred_dependents(database_key, wait_result); + } + + pub(super) fn block_on_transferred( &self, query: DatabaseKeyIndex, - reentry: bool, - ) -> Result { + thread_id: ThreadId, + ) -> Result<(DatabaseKeyIndex, ThreadId), Option> { + self.dependency_graph + .lock() + .block_on_transferred(query, thread_id) + } + + pub(super) fn remove_transferred(&self, database_key: DatabaseKeyIndex) { self.dependency_graph .lock() - .transfered_thread_id(query, reentry) + .remove_transferred(database_key); } - pub(super) fn take_transferred_dependents( + pub(super) fn resolved_transferred_thread_id( &self, query: DatabaseKeyIndex, - ) -> SmallVec<[DatabaseKeyIndex; 4]> { + ) -> Option { self.dependency_graph .lock() - .take_transferred_dependents(query) + .resolved_transferred_id(query) + .map(|(id, _)| id) } pub(super) fn transfer_lock( &self, query: DatabaseKeyIndex, + current_thread: ThreadId, new_owner: DatabaseKeyIndex, - owning_thread: ThreadId, + new_owner_thread: ThreadId, ) { - self.dependency_graph - .lock() - .transfer_lock(query, new_owner, owning_thread); + self.dependency_graph.lock().transfer_lock( + query, + current_thread, + new_owner, + new_owner_thread, + ); } #[cfg(feature = "persistence")] diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 51acc2d3f..549305fa5 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -3,10 +3,12 @@ use std::pin::Pin; use rustc_hash::FxHashMap; use smallvec::SmallVec; +#[cfg(debug_assertions)] +use crate::hash::FxHashSet; use crate::key::DatabaseKeyIndex; use crate::runtime::dependency_graph::edge::EdgeCondvar; use crate::runtime::WaitResult; -use crate::sync::thread::{self, ThreadId}; +use crate::sync::thread::ThreadId; use crate::sync::MutexGuard; #[derive(Debug, Default)] @@ -138,87 +140,163 @@ impl DependencyGraph { } } - pub(super) fn take_transferred_dependents( + pub(super) fn unblock_transferred_queries( &mut self, - query: DatabaseKeyIndex, - ) -> SmallVec<[DatabaseKeyIndex; 4]> { - self.transfered_dependents - .remove(&query) - .unwrap_or_default() + database_key: DatabaseKeyIndex, + wait_result: WaitResult, + ) { + tracing::debug!("unblock_transferred_queries({database_key:?}"); + // If `database_key` is `c` and it has been transfered to `b` earlier, remove its entry. + if let Some((_, owner)) = self.transfered.remove(&database_key) { + let owner_dependents = self.transfered_dependents.get_mut(&owner).unwrap(); + let index = owner_dependents + .iter() + .position(|&x| x == database_key) + .unwrap(); + owner_dependents.swap_remove(index); + } + + let queries = self + .transfered_dependents + .remove(&database_key) + .unwrap_or_default(); + + for query in queries { + let (_, owner) = self.transfered.remove(&query).unwrap(); + debug_assert_eq!(owner, database_key); + + // Unblock transitively. + self.unblock_transferred_queries(query, wait_result); + + self.unblock_runtimes_blocked_on(query, wait_result); + } } - pub(super) fn transfered_thread_id( + /// Returns `Ok(thread_id)` if `database_key_index` is a query who's lock ownership has been transferred to `thread_id` (potentially over multiple steps) + /// and the lock was claimed. Returns `Err(Some(thread_id))` if the lock was not claimed. + /// + /// Returns `Err(None)` if `database_key_index` hasn't been transferred or its owning lock has since then been removed. + pub(super) fn block_on_transferred( &mut self, database_key_index: DatabaseKeyIndex, - claim: bool, - ) -> Result { - let (thread_id, parent) = self - .transfered - .get(&database_key_index) - .expect("transfered thread id not found"); - - let current_id = thread::current().id(); - if *thread_id == thread::current().id() || self.depends_on(*thread_id, current_id) { - if claim { - if let Some(dependents) = self.transfered_dependents.get_mut(parent) { - if let Some(index) = - dependents.iter().position(|key| *key == database_key_index) - { - tracing::debug!( - "Remove transfered dependent {:?} from {:?}", - database_key_index, - parent - ); - dependents.swap_remove(index); - } - } - } - Ok(*thread_id) + current_id: ThreadId, + ) -> Result<(DatabaseKeyIndex, ThreadId), Option> { + let owner_thread = self.resolved_transferred_id(database_key_index); + + let Some((thread_id, owner_key)) = owner_thread else { + return Err(None); + }; + + if thread_id == current_id || self.depends_on(thread_id, current_id) { + Ok((owner_key, thread_id)) } else { - Err(*thread_id) + Err(Some(thread_id)) + } + } + + pub(super) fn remove_transferred(&mut self, database_key: DatabaseKeyIndex) { + if let Some((_, owner)) = self.transfered.remove(&database_key) { + let dependents = self.transfered_dependents.get_mut(&owner).unwrap(); + let index = dependents.iter().position(|h| *h == database_key).unwrap(); + dependents.swap_remove(index); + } + } + + pub(super) fn resolved_transferred_id( + &self, + database_key: DatabaseKeyIndex, + ) -> Option<(ThreadId, DatabaseKeyIndex)> { + let mut owner_thread = None; + let mut owner_key = database_key; + + while let Some((next_thread, next_key)) = self.transfered.get(&owner_key) { + owner_thread = Some(*next_thread); + owner_key = *next_key; } + + owner_thread.map(|thread| (thread, owner_key)) } pub(super) fn transfer_lock( &mut self, query: DatabaseKeyIndex, + current_thread: ThreadId, new_owner: DatabaseKeyIndex, - owning_thread: ThreadId, + new_owner_thread: ThreadId, ) { + // if let Some((_, owner)) = self.transfered.remove(&new_owner) { + // let old_dependents = self.transfered_dependents.get_mut(&owner).unwrap(); + // let index = old_dependents.iter().position(|key| *key == query).unwrap(); + // old_dependents.swap_remove(index); + // } + + let mut owner_changed = current_thread != new_owner_thread; + + // TODO: Skip unblocks for transitive queries if the old owner is the same as the new owner? match self.transfered.entry(query) { std::collections::hash_map::Entry::Vacant(entry) => { - entry.insert((owning_thread, new_owner)); - } - std::collections::hash_map::Entry::Occupied(entry) => { - // This sucks, because we no longer know which sub locks we transferred in a previous iteration. - // - *entry.get_mut() = (owning_thread, new_owner); + // Transfer `c -> b` and there's no existing entry for `c`. + entry.insert((new_owner_thread, new_owner)); } - }; + std::collections::hash_map::Entry::Occupied(mut entry) => { + // `Transfer `c -> b` after a previous `c -> d` mapping. + // Update the owner and remove the query from the old owner's dependents. + let old_owner = entry.get().1; - let transitive_dependents = self - .transfered_dependents - .remove(&query) - .unwrap_or_default(); + owner_changed = true; + let old_dependents = self.transfered_dependents.get_mut(&old_owner).unwrap(); + let index = old_dependents.iter().position(|key| *key == query).unwrap(); + old_dependents.swap_remove(index); - tracing::debug!( - "transitive_dependents of query {query:?}: {:?}", - transitive_dependents - ); + entry.insert((new_owner_thread, new_owner)); + } + }; - let all_dependents = self.transfered_dependents.entry(query).or_default(); + // Register `c` as a dependent of `b`. + let all_dependents = self.transfered_dependents.entry(new_owner).or_default(); + assert!(!all_dependents.contains(&query)); + assert!(!all_dependents.contains(&new_owner)); all_dependents.push(query); - for entry in &transitive_dependents { - tracing::debug!("Transferring transitive dependent {entry:?} to {new_owner:?}"); - *self.transfered.get_mut(entry).unwrap() = (owning_thread, new_owner); - all_dependents.push(*entry); + if owner_changed { + self.resume_transferred_dependents(query, WaitResult::Completed); } - tracing::debug!("all dependents after transfer: {:?}", all_dependents); + } + + pub(super) fn resume_transferred_dependents( + &mut self, + query: DatabaseKeyIndex, + wait_result: WaitResult, + ) { + tracing::debug!("Resuming transitive dependents of query {query:?}"); + let Some(queries) = self.transfered_dependents.get(&query) else { + return; + }; + + #[cfg(debug_assertions)] + let mut stack = FxHashSet::default(); + #[cfg(debug_assertions)] + stack.insert(query); - tracing::debug!("Unblocking transitive dependents of query {query:?}"); - for dependent in transitive_dependents { - self.unblock_runtimes_blocked_on(dependent, WaitResult::Completed); + let mut queue: SmallVec<[_; 4]> = + queries.into_iter().map(|nested| (*nested, query)).collect(); + + while let Some((nested, parent)) = queue.pop() { + debug_assert_eq!(self.transfered.get(&nested).unwrap().1, parent); + + #[cfg(debug_assertions)] + if !stack.insert(nested) { + panic!("Encountered cycle while resuming the transferred dependents. between {nested:?} and {parent:?}. Current state of dependency graph: {self:#?}") + } + queue.extend( + self.transfered_dependents + .get(&nested) + .into_iter() + .flatten() + .map(|inner| (*inner, nested)), + ); + + self.unblock_runtimes_blocked_on(nested, wait_result); } } From 45fefe9fd577d8a8e0144bbef0a721240a50c753 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sun, 5 Oct 2025 20:06:39 +0200 Subject: [PATCH 07/45] More progress --- src/function.rs | 4 +- src/function/execute.rs | 60 ++++++++---- src/function/maybe_changed_after.rs | 31 +++--- src/function/memo.rs | 2 +- src/function/sync.rs | 143 +++++++++++++--------------- src/ingredient.rs | 7 +- src/runtime.rs | 123 +++++++++++++++++++----- src/runtime/dependency_graph.rs | 86 ++++++++++++----- 8 files changed, 299 insertions(+), 157 deletions(-) diff --git a/src/function.rs b/src/function.rs index 7572e3dfa..14acdff6d 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, SyncGuard, SyncTable}; +pub(crate) use sync::{ClaimGuard, SyncGuard, SyncState, SyncTable}; use std::any::Any; use std::fmt; @@ -412,7 +412,7 @@ where fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim(zalsa, key_index, false) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle(bool) => WaitForResult::Cycle(bool), + ClaimResult::Cycle { with, nested } => WaitForResult::Cycle { with, nested }, ClaimResult::Claimed(guard) => WaitForResult::Available(guard), } } diff --git a/src/function/execute.rs b/src/function/execute.rs index e0ef90410..7343268b3 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -1,7 +1,7 @@ use smallvec::SmallVec; use crate::active_query::CompletedQuery; -use crate::cycle::{CycleRecoveryStrategy, IterationCount}; +use crate::cycle::{CycleHeads, CycleRecoveryStrategy, IterationCount}; use crate::function::memo::Memo; use crate::function::sync::ReleaseMode; use crate::function::{ClaimGuard, Configuration, IngredientImpl}; @@ -229,24 +229,26 @@ where } } - let outer_cycle = cycle_heads - .iter() - .filter(|head| head.database_key_index != database_key_index) - .find_map(|head| { - let head_ingredient = - zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + let outer_cycle = outer_cycle(zalsa, &cycle_heads, database_key_index); - let result = - head_ingredient.wait_for(zalsa, head.database_key_index.key_index()); - tracing::debug!( - "Wait for result for {:?}: {result:?} {:?}", - head.database_key_index, - result - ); + // let outer_cycle = cycle_heads + // .iter() + // .filter(|head| head.database_key_index != database_key_index) + // .find_map(|head| { + // let head_ingredient = + // zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + // let result = + // head_ingredient.wait_for(zalsa, head.database_key_index.key_index()); + // tracing::debug!( + // "Wait for result for {:?}: {result:?} {:?}", + // head.database_key_index, + // result + // ); - let is_outer_cycle = matches!(result, WaitForResult::Cycle(false)); - is_outer_cycle.then_some(head.database_key_index) - }); + // let is_outer_cycle = matches!(result, WaitForResult::Cycle(false)); + // is_outer_cycle.then_some(head.database_key_index) + // }); // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { @@ -543,3 +545,27 @@ impl Drop for ClearCycleHeadIfPanicking<'_, C> { } } } + +fn outer_cycle( + zalsa: &Zalsa, + cycle_heads: &CycleHeads, + current_key: DatabaseKeyIndex, +) -> Option { + let candidates: SmallVec<[_; 4]> = cycle_heads + .iter() + .filter(|head| head.database_key_index != current_key) + .filter_map(|head| { + let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + match ingredient.wait_for(zalsa, head.database_key_index.key_index()) { + WaitForResult::Cycle { + with, + nested: false, + } => Some((head.database_key_index, with)), + _ => None, + } + }) + .collect(); + + // Do we need to pass the thread id here to account for a potential re-entrance? + zalsa.runtime().transfer_target(&candidates) +} diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index ace8d19f1..7f0b2dea4 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -414,6 +414,7 @@ where // // If we don't account for the iteration, then `a` (from iteration 0) will be finalized // because its cycle head `b` is now finalized, but `b` never pulled `a` in the last iteration. + // FIXME: Do we still need this? if iteration != cycle_head.iteration_count.load() { return false; } @@ -479,21 +480,23 @@ where return false; } } - TryClaimHeadsResult::Running(running) => { - running.block_on(&mut cycle_heads_iter); - } - TryClaimHeadsResult::Available(available_cycle_head) => { - // Check the cycle heads recursively - // if available_cycle_head.is_nested(zalsa) { - // available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); - // } else { - // return false; - // } - return false; - } - TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { + _ => { return false; - } + } // TryClaimHeadsResult::Running(running) => { + // running.block_on(&mut cycle_heads_iter); + // } + // TryClaimHeadsResult::Available(available_cycle_head) => { + // // Check the cycle heads recursively + // // if available_cycle_head.is_nested(zalsa) { + // // available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); + // // } else { + // // return false; + // // } + // return false; + // } + // TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { + // return false; + // } } } diff --git a/src/function/memo.rs b/src/function/memo.rs index 566f56321..9d1e22f75 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -682,7 +682,7 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { .. } => { match ingredient.wait_for(self.zalsa, head_key_index) { - WaitForResult::Cycle(..) => { + WaitForResult::Cycle { .. } => { // We hit a cycle blocking on the cycle head; this means this query actively // participates in the cycle and some other query is blocked on this thread. crate::tracing::debug!( diff --git a/src/function/sync.rs b/src/function/sync.rs index 293b5c1bc..91e88f75e 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -1,7 +1,7 @@ use rustc_hash::FxHashMap; use crate::key::DatabaseKeyIndex; -use crate::runtime::{BlockResult, Running, WaitResult}; +use crate::runtime::{BlockResult, ClaimTransferredResult, Running, WaitResult}; use crate::sync::thread::{self, ThreadId}; use crate::sync::Mutex; use crate::zalsa::Zalsa; @@ -20,7 +20,7 @@ pub(crate) enum ClaimResult<'a> { /// Can't claim the query because it is running on an other thread. Running(Running<'a>), /// Claiming the query results in a cycle. - Cycle(bool), + Cycle { with: ThreadId, nested: bool }, /// Successfully claimed the query. Claimed(ClaimGuard<'a>), } @@ -44,25 +44,6 @@ impl SyncTable { } } - fn make_transfer_target(&self, key_index: Id, zalsa: &Zalsa) -> Option { - let mut read = self.syncs.lock(); - read.get_mut(&key_index).map(|state| { - state.anyone_waiting = true; - state.is_transfer_target = true; - - match state.id { - OwnerId::Thread(thread_id) => thread_id, - OwnerId::Transferred => zalsa - .runtime() - .resolved_transferred_thread_id(DatabaseKeyIndex::new( - self.ingredient, - key_index, - )) - .unwrap(), - } - }) - } - pub(crate) fn try_claim<'me>( &'me self, zalsa: &'me Zalsa, @@ -79,66 +60,51 @@ impl SyncTable { OwnerId::Transferred => { let current_id = thread::current().id(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); - match zalsa + return match zalsa .runtime() - .block_on_transferred(database_key_index, current_id) + .claim_transferred(database_key_index, allow_reentry) { - Ok((current_owner, owning_thread_id)) => { + ClaimTransferredResult::OtherThread(other_thread) => { + occupied_entry.get_mut().anyone_waiting = true; + let thread_id = other_thread.id(); + + match other_thread.block(write) { + BlockResult::Cycle => ClaimResult::Cycle { + with: thread_id, + nested: false, + }, + BlockResult::Running(running) => ClaimResult::Running(running), + } + } + ClaimTransferredResult::Claimed { current_owner } => { let SyncState { id, .. } = occupied_entry.into_mut(); - return if !allow_reentry { - tracing::debug!("Claiming {database_key_index:?} results in a cycle because re-entrant lock is not allowed"); - ClaimResult::Cycle(true) - } else { - tracing::debug!("Reentrant lock {database_key_index:?}"); - *id = OwnerId::Thread(current_id); - - zalsa.runtime().remove_transferred(database_key_index); - - if owning_thread_id != current_id { - zalsa.runtime().unblock_queries_blocked_on( - database_key_index, - WaitResult::Completed, - ); - zalsa.runtime().resume_transferred_queries( - database_key_index, - WaitResult::Completed, - ); - } - - ClaimResult::Claimed(ClaimGuard { - key_index, - zalsa, - sync_table: self, - mode: ReleaseMode::TransferTo(current_owner), - }) - }; + *id = OwnerId::Thread(current_id); + + ClaimResult::Claimed(ClaimGuard { + key_index, + zalsa, + sync_table: self, + mode: ReleaseMode::TransferTo(current_owner), + }) } - // Lock is owned by another thread, wait for it to be released. - Err(Some(thread_id)) => { - tracing::debug!("Waiting for transfered lock {database_key_index:?} to be released by thread {thread_id:?}"); - thread_id + ClaimTransferredResult::Cycle { with, nested } => { + ClaimResult::Cycle { nested, with } } - // Lock was transferred but is no more. Replace the entry. - Err(None) => { - tracing::debug!( - "Claiming previously transferred lock {database_key_index:?}" - ); - - // Lock was transferred but it has since then been released. + ClaimTransferredResult::Released => { occupied_entry.insert(SyncState { id: OwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, }); - return ClaimResult::Claimed(ClaimGuard { + ClaimResult::Claimed(ClaimGuard { key_index, zalsa, sync_table: self, mode: ReleaseMode::Default, - }); + }) } - } + }; } }; @@ -160,7 +126,10 @@ impl SyncTable { write, ) { BlockResult::Running(blocked_on) => ClaimResult::Running(blocked_on), - BlockResult::Cycle => ClaimResult::Cycle(false), + BlockResult::Cycle => ClaimResult::Cycle { + nested: false, + with: id, + }, } } std::collections::hash_map::Entry::Vacant(vacant_entry) => { @@ -178,6 +147,25 @@ impl SyncTable { } } } + + fn make_transfer_target(&self, key_index: Id, zalsa: &Zalsa) -> Option { + let mut syncs = self.syncs.lock(); + syncs.get_mut(&key_index).map(|state| { + state.anyone_waiting = true; + state.is_transfer_target = true; + + match state.id { + OwnerId::Thread(thread_id) => thread_id, + OwnerId::Transferred => zalsa + .runtime() + .resolved_transferred_thread_id(DatabaseKeyIndex::new( + self.ingredient, + key_index, + )) + .unwrap(), + } + }) + } } #[derive(Copy, Clone, Debug)] @@ -218,20 +206,13 @@ impl<'me> ClaimGuard<'me> { self.mode = mode; } - fn release_default(&self) { + fn release_default(&self, wait_result: WaitResult) { let mut syncs = self.sync_table.syncs.lock(); let state = syncs.remove(&self.key_index).expect("key claimed twice?"); let database_key_index = self.database_key_index(); tracing::debug!("release_and_unblock({database_key_index:?})"); - let wait_result = if thread::panicking() { - tracing::info!("Unblocking queries blocked on {database_key_index:?} after a panick"); - WaitResult::Panicked - } else { - WaitResult::Completed - }; - let SyncState { anyone_waiting, is_transfer_target, @@ -243,12 +224,13 @@ impl<'me> ClaimGuard<'me> { } let runtime = self.zalsa.runtime(); - runtime.unblock_queries_blocked_on(database_key_index, wait_result); if is_transfer_target { tracing::debug!("unblock transferred queries owned by {database_key_index:?}"); runtime.unblock_transferred_queries(database_key_index, wait_result); } + + runtime.unblock_queries_blocked_on(database_key_index, wait_result); } #[cold] @@ -302,10 +284,19 @@ impl<'me> ClaimGuard<'me> { impl Drop for ClaimGuard<'_> { fn drop(&mut self) { + let wait_result = if thread::panicking() { + WaitResult::Panicked + } else { + WaitResult::Completed + }; + // TODO, what to do if thread panics? Always force release? match self.mode { ReleaseMode::Default => { - self.release_default(); + self.release_default(wait_result); + } + _ if matches!(wait_result, WaitResult::Panicked) => { + self.release_default(wait_result); } ReleaseMode::TransferTo(new_owner) => { self.transfer(new_owner); diff --git a/src/ingredient.rs b/src/ingredient.rs index 6ffd0d3a0..bb3f2fc95 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -321,12 +321,15 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) pub enum WaitForResult<'me> { Running(Running<'me>), Available(ClaimGuard<'me>), - Cycle(bool), + Cycle { + with: crate::sync::thread::ThreadId, + nested: bool, + }, } impl WaitForResult<'_> { pub const fn is_cycle(&self) -> bool { - matches!(self, WaitForResult::Cycle(_)) + matches!(self, WaitForResult::Cycle { .. }) } pub const fn is_running(&self) -> bool { diff --git a/src/runtime.rs b/src/runtime.rs index 070698b43..22ec6a08b 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -58,6 +58,60 @@ pub(crate) enum BlockResult<'me> { Cycle, } +pub(crate) enum ClaimTransferredResult<'me> { + /// The transferred query has been successfully claimed. + Claimed { current_owner: DatabaseKeyIndex }, + + /// The query is running on another thread. + OtherThread(OtherThread<'me>), + + /// Blocking resulted in a cycle. + /// + /// The lock is hold by the current thread or there's another thread that is waiting on the current thread, + /// and blocking this thread on the other thread would result in a deadlock/cycle. + Cycle { with: ThreadId, nested: bool }, + + /// Query is no longer a transferred query. + Released, +} + +pub(super) struct OtherThread<'me> { + dg: crate::sync::MutexGuard<'me, DependencyGraph>, + database_key: DatabaseKeyIndex, + other_id: ThreadId, +} + +impl<'me> OtherThread<'me> { + pub(super) fn id(&self) -> ThreadId { + self.other_id + } + + pub(super) fn block(self, query_mutex_guard: SyncGuard<'me>) -> BlockResult<'me> { + let thread_id = thread::current().id(); + // Cycle in the same thread. + if thread_id == self.other_id { + return BlockResult::Cycle; + } + + if self.dg.depends_on(self.other_id, thread_id) { + crate::tracing::debug!( + "block_on: cycle detected for {:?} in thread {thread_id:?} on {:?}", + self.database_key, + self.other_id + ); + return BlockResult::Cycle; + } + + BlockResult::Running(Running(Box::new(BlockedOnInner { + dg: self.dg, + query_mutex_guard, + database_key: self.database_key, + other_id: self.other_id, + thread_id, + }))) + } +} + pub struct Running<'me>(Box>); struct BlockedOnInner<'me> { @@ -279,31 +333,48 @@ impl Runtime { .unblock_transferred_queries(database_key, wait_result); } - #[cold] - pub(crate) fn resume_transferred_queries( - &self, - database_key: DatabaseKeyIndex, - wait_result: WaitResult, - ) { - self.dependency_graph - .lock() - .resume_transferred_dependents(database_key, wait_result); - } - - pub(super) fn block_on_transferred( + pub(super) fn claim_transferred( &self, query: DatabaseKeyIndex, - thread_id: ThreadId, - ) -> Result<(DatabaseKeyIndex, ThreadId), Option> { - self.dependency_graph - .lock() - .block_on_transferred(query, thread_id) - } + allow_reentry: bool, + ) -> ClaimTransferredResult<'_> { + let mut dg = self.dependency_graph.lock(); + let thread_id = thread::current().id(); - pub(super) fn remove_transferred(&self, database_key: DatabaseKeyIndex) { - self.dependency_graph - .lock() - .remove_transferred(database_key); + match dg.block_on_transferred(query, thread_id) { + Ok((current_owner, owning_thread_id)) => { + return if !allow_reentry { + tracing::debug!("Claiming {query:?} results in a cycle because re-entrant lock is not allowed"); + ClaimTransferredResult::Cycle { + with: owning_thread_id, + nested: true, + } + } else { + tracing::debug!("Reentrant lock {query:?}"); + dg.remove_transferred(query); + + // This seems wrong? + // if owning_thread_id != current_id { + dg.unblock_runtimes_blocked_on(query, WaitResult::Completed); + dg.resume_transferred_dependents(query, WaitResult::Completed); + + ClaimTransferredResult::Claimed { current_owner } + }; + } + // Lock is owned by another thread, wait for it to be released. + Err(Some(thread_id)) => { + tracing::debug!( + "Waiting for transfered lock {query:?} to be released by thread {thread_id:?}" + ); + ClaimTransferredResult::OtherThread(OtherThread { + dg, + database_key: query, + other_id: thread_id, + }) + } + // Lock was transferred but is no more. Replace the entry. + Err(None) => ClaimTransferredResult::Released, + } } pub(super) fn resolved_transferred_thread_id( @@ -331,6 +402,14 @@ impl Runtime { ); } + pub(crate) fn transfer_target( + &self, + candidates: &[(DatabaseKeyIndex, ThreadId)], + ) -> Option { + let dependency_graph = self.dependency_graph.lock(); + dependency_graph.transfer_target(candidates) + } + #[cfg(feature = "persistence")] pub(crate) fn deserialize_from(&mut self, other: &mut Runtime) { // The only field that is serialized is `revisions`. diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 549305fa5..6da1adaa6 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -1,7 +1,7 @@ use std::pin::Pin; use rustc_hash::FxHashMap; -use smallvec::SmallVec; +use smallvec::{smallvec, SmallVec}; #[cfg(debug_assertions)] use crate::hash::FxHashSet; @@ -145,8 +145,8 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { - tracing::debug!("unblock_transferred_queries({database_key:?}"); // If `database_key` is `c` and it has been transfered to `b` earlier, remove its entry. + tracing::debug!("unblock_transferred_queries({database_key:?}"); if let Some((_, owner)) = self.transfered.remove(&database_key) { let owner_dependents = self.transfered_dependents.get_mut(&owner).unwrap(); let index = owner_dependents @@ -156,18 +156,22 @@ impl DependencyGraph { owner_dependents.swap_remove(index); } - let queries = self - .transfered_dependents - .remove(&database_key) - .unwrap_or_default(); + let mut unblocked: SmallVec<[_; 4]> = SmallVec::new(); + let mut queue: SmallVec<[_; 4]> = smallvec![database_key]; + + while let Some(current) = queue.pop() { + self.transfered.remove(¤t); + let transitive = self + .transfered_dependents + .remove(¤t) + .unwrap_or_default(); - for query in queries { - let (_, owner) = self.transfered.remove(&query).unwrap(); - debug_assert_eq!(owner, database_key); + queue.extend(transitive); - // Unblock transitively. - self.unblock_transferred_queries(query, wait_result); + unblocked.push(current); + } + for query in unblocked { self.unblock_runtimes_blocked_on(query, wait_result); } } @@ -206,15 +210,18 @@ impl DependencyGraph { &self, database_key: DatabaseKeyIndex, ) -> Option<(ThreadId, DatabaseKeyIndex)> { - let mut owner_thread = None; - let mut owner_key = database_key; + let Some(&(mut resolved_thread, owner)) = self.transfered.get(&database_key) else { + return None; + }; + + let mut current_owner = owner; - while let Some((next_thread, next_key)) = self.transfered.get(&owner_key) { - owner_thread = Some(*next_thread); - owner_key = *next_key; + while let Some(&(next_thread, next_key)) = self.transfered.get(¤t_owner) { + resolved_thread = next_thread; + current_owner = next_key; } - owner_thread.map(|thread| (thread, owner_key)) + Some((resolved_thread, owner)) } pub(super) fn transfer_lock( @@ -224,12 +231,6 @@ impl DependencyGraph { new_owner: DatabaseKeyIndex, new_owner_thread: ThreadId, ) { - // if let Some((_, owner)) = self.transfered.remove(&new_owner) { - // let old_dependents = self.transfered_dependents.get_mut(&owner).unwrap(); - // let index = old_dependents.iter().position(|key| *key == query).unwrap(); - // old_dependents.swap_remove(index); - // } - let mut owner_changed = current_thread != new_owner_thread; // TODO: Skip unblocks for transitive queries if the old owner is the same as the new owner? @@ -263,6 +264,45 @@ impl DependencyGraph { } } + pub(super) fn transfer_target( + &self, + candidates: &[(DatabaseKeyIndex, ThreadId)], + ) -> Option { + if candidates.is_empty() { + return None; + } + + if let &[(key, _)] = candidates { + return Some(key); + } + + let mut possible_tranfer_targets: Vec<_> = candidates + .iter() + .filter_map(|&(key, thread)| { + // Ensure that transferring to this other thread won't introduce any cyclic wait dependency (where `thread` is blocked on `other_thread` and the other way round).) + let depends_on_another = candidates.iter().any(|&(_, other_thread)| { + other_thread != thread && self.depends_on(thread, other_thread) + }); + + (!depends_on_another).then_some(key) + }) + .collect(); + + if possible_tranfer_targets.is_empty() { + panic!( + "No possible transfer targets found for query {:?}", + candidates + ); + } else if let &[target] = &*possible_tranfer_targets { + Some(target) + } else { + possible_tranfer_targets + .into_iter() + .min_by_key(|target| (target.ingredient_index(), target.key_index())) + .map(|target| target) + } + } + pub(super) fn resume_transferred_dependents( &mut self, query: DatabaseKeyIndex, From 86e8e3c2c01e31dca2ed8882a170166db91ed343 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Mon, 6 Oct 2025 13:54:07 +0200 Subject: [PATCH 08/45] Fix most parallel tests --- src/function.rs | 2 +- src/function/execute.rs | 6 ++- src/function/sync.rs | 94 +++++++++++++++++++++++++-------- src/runtime.rs | 30 ++++++----- src/runtime/dependency_graph.rs | 65 +++++++++++++++-------- 5 files changed, 139 insertions(+), 58 deletions(-) diff --git a/src/function.rs b/src/function.rs index 14acdff6d..d6e983a1b 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, SyncGuard, SyncState, SyncTable}; +pub(crate) use sync::{ClaimGuard, SyncGuard, SyncTable}; use std::any::Any; use std::fmt; diff --git a/src/function/execute.rs b/src/function/execute.rs index 7343268b3..eb9eaa4d3 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -252,8 +252,10 @@ where // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { - if let Some(new_owner) = outer_cycle { - claim_guard.set_release_mode(ReleaseMode::TransferTo(new_owner)); + if let Some(outer) = outer_cycle { + claim_guard.set_release_mode(ReleaseMode::TransferTo(outer)); + } else { + claim_guard.set_release_mode(ReleaseMode::SelfOnly); } completed_query.revisions.set_cycle_heads(cycle_heads); diff --git a/src/function/sync.rs b/src/function/sync.rs index 91e88f75e..7eb4aee39 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -34,6 +34,7 @@ pub(crate) struct SyncState { anyone_waiting: bool, is_transfer_target: bool, + claimed_twice: bool, } impl SyncTable { @@ -64,7 +65,7 @@ impl SyncTable { .runtime() .claim_transferred(database_key_index, allow_reentry) { - ClaimTransferredResult::OtherThread(other_thread) => { + ClaimTransferredResult::ClaimedBy(other_thread) => { occupied_entry.get_mut().anyone_waiting = true; let thread_id = other_thread.id(); @@ -76,16 +77,27 @@ impl SyncTable { BlockResult::Running(running) => ClaimResult::Running(running), } } - ClaimTransferredResult::Claimed { current_owner } => { - let SyncState { id, .. } = occupied_entry.into_mut(); + ClaimTransferredResult::Reentrant => { + let SyncState { + id, claimed_twice, .. + } = occupied_entry.into_mut(); + + if *claimed_twice { + // TODO: Is this thread id correct? + return ClaimResult::Cycle { + with: current_id, + nested: false, + }; + } *id = OwnerId::Thread(current_id); + *claimed_twice = true; ClaimResult::Claimed(ClaimGuard { key_index, zalsa, sync_table: self, - mode: ReleaseMode::TransferTo(current_owner), + mode: ReleaseMode::SelfOnly, }) } ClaimTransferredResult::Cycle { with, nested } => { @@ -96,6 +108,7 @@ impl SyncTable { id: OwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, + claimed_twice: false, }); ClaimResult::Claimed(ClaimGuard { key_index, @@ -137,6 +150,7 @@ impl SyncTable { id: OwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, + claimed_twice: false, }); ClaimResult::Claimed(ClaimGuard { key_index, @@ -148,7 +162,12 @@ impl SyncTable { } } - fn make_transfer_target(&self, key_index: Id, zalsa: &Zalsa) -> Option { + fn make_transfer_target( + &self, + key_index: Id, + zalsa: &Zalsa, + ignore: DatabaseKeyIndex, + ) -> Option { let mut syncs = self.syncs.lock(); syncs.get_mut(&key_index).map(|state| { state.anyone_waiting = true; @@ -158,10 +177,10 @@ impl SyncTable { OwnerId::Thread(thread_id) => thread_id, OwnerId::Transferred => zalsa .runtime() - .resolved_transferred_thread_id(DatabaseKeyIndex::new( - self.ingredient, - key_index, - )) + .resolved_transferred_thread_id( + DatabaseKeyIndex::new(self.ingredient, key_index), + ignore, + ) .unwrap(), } }) @@ -210,21 +229,30 @@ impl<'me> ClaimGuard<'me> { let mut syncs = self.sync_table.syncs.lock(); let state = syncs.remove(&self.key_index).expect("key claimed twice?"); + self.release(wait_result, state); + } + + fn release(&self, wait_result: WaitResult, state: SyncState) { let database_key_index = self.database_key_index(); tracing::debug!("release_and_unblock({database_key_index:?})"); let SyncState { anyone_waiting, is_transfer_target, + claimed_twice, .. } = state; + let runtime = self.zalsa.runtime(); + + if claimed_twice { + runtime.remove_transferred(database_key_index); + } + if !anyone_waiting { return; } - let runtime = self.zalsa.runtime(); - if is_transfer_target { tracing::debug!("unblock transferred queries owned by {database_key_index:?}"); runtime.unblock_transferred_queries(database_key_index, wait_result); @@ -233,16 +261,35 @@ impl<'me> ClaimGuard<'me> { runtime.unblock_queries_blocked_on(database_key_index, wait_result); } + #[cold] + fn release_self(&self) { + tracing::debug!("release_self"); + let mut syncs = self.sync_table.syncs.lock(); + let std::collections::hash_map::Entry::Occupied(mut state) = syncs.entry(self.key_index) + else { + panic!("key claimed twice?"); + }; + + if state.get().claimed_twice { + state.get_mut().claimed_twice = false; + state.get_mut().id = OwnerId::Transferred; + } else { + self.release(WaitResult::Completed, state.remove()); + } + } + #[cold] pub(crate) fn transfer(&self, new_owner: DatabaseKeyIndex) { let self_key = self.database_key_index(); + let runtime = self.zalsa.runtime(); + let owner_ingredient = self.zalsa.lookup_ingredient(new_owner.ingredient_index()); // Get the owning thread of `new_owner`. let owner_sync_table = owner_ingredient.sync_table(); let owner_thread_id = owner_sync_table - .make_transfer_target(new_owner.key_index(), self.zalsa) + .make_transfer_target(new_owner.key_index(), self.zalsa, self_key) .expect("new owner to be a locked query"); tracing::debug!( @@ -251,18 +298,17 @@ impl<'me> ClaimGuard<'me> { let mut syncs = self.sync_table.syncs.lock(); - self.zalsa.runtime().transfer_lock( - self_key, - thread::current().id(), - new_owner, - owner_thread_id, - ); + runtime.transfer_lock(self_key, thread::current().id(), new_owner, owner_thread_id); let SyncState { - anyone_waiting, id, .. + anyone_waiting, + id, + claimed_twice, + .. } = syncs.get_mut(&self.key_index).expect("key claimed twice?"); *id = OwnerId::Transferred; + *claimed_twice = false; // TODO: Do we need to wake up any threads that are awaiting any of the dependents to update the dependency graph -> I think so. if *anyone_waiting { @@ -271,9 +317,7 @@ impl<'me> ClaimGuard<'me> { ); // Wake up all threads that were waiting on the query to complete so that they'll retry and block on the new owner. let database_key = DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index); - self.zalsa - .runtime() - .unblock_queries_blocked_on(database_key, WaitResult::Completed); + runtime.unblock_queries_blocked_on(database_key, WaitResult::Completed); } *anyone_waiting = false; @@ -296,8 +340,12 @@ impl Drop for ClaimGuard<'_> { self.release_default(wait_result); } _ if matches!(wait_result, WaitResult::Panicked) => { + tracing::debug!("Release after panicked"); self.release_default(wait_result); } + ReleaseMode::SelfOnly => { + self.release_self(); + } ReleaseMode::TransferTo(new_owner) => { self.transfer(new_owner); } @@ -322,6 +370,8 @@ pub(crate) enum ReleaseMode { #[default] Default, + SelfOnly, + /// Transfers the ownership of the lock to the specified query. /// /// All waiting queries will be awakened so that they can retry and block on the new owner thread. diff --git a/src/runtime.rs b/src/runtime.rs index 22ec6a08b..855da283e 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -60,10 +60,10 @@ pub(crate) enum BlockResult<'me> { pub(crate) enum ClaimTransferredResult<'me> { /// The transferred query has been successfully claimed. - Claimed { current_owner: DatabaseKeyIndex }, + Reentrant, /// The query is running on another thread. - OtherThread(OtherThread<'me>), + ClaimedBy(OtherThread<'me>), /// Blocking resulted in a cycle. /// @@ -342,23 +342,23 @@ impl Runtime { let thread_id = thread::current().id(); match dg.block_on_transferred(query, thread_id) { - Ok((current_owner, owning_thread_id)) => { + Ok(_) => { return if !allow_reentry { tracing::debug!("Claiming {query:?} results in a cycle because re-entrant lock is not allowed"); ClaimTransferredResult::Cycle { - with: owning_thread_id, + with: thread_id, nested: true, } } else { tracing::debug!("Reentrant lock {query:?}"); - dg.remove_transferred(query); + // dg.remove_transferred(query); - // This seems wrong? - // if owning_thread_id != current_id { - dg.unblock_runtimes_blocked_on(query, WaitResult::Completed); - dg.resume_transferred_dependents(query, WaitResult::Completed); + // // This seems wrong? + // // if owning_thread_id != current_id { + // dg.unblock_runtimes_blocked_on(query, WaitResult::Completed); + // dg.resume_transferred_dependents(query, WaitResult::Completed); - ClaimTransferredResult::Claimed { current_owner } + ClaimTransferredResult::Reentrant }; } // Lock is owned by another thread, wait for it to be released. @@ -366,7 +366,7 @@ impl Runtime { tracing::debug!( "Waiting for transfered lock {query:?} to be released by thread {thread_id:?}" ); - ClaimTransferredResult::OtherThread(OtherThread { + ClaimTransferredResult::ClaimedBy(OtherThread { dg, database_key: query, other_id: thread_id, @@ -377,13 +377,19 @@ impl Runtime { } } + #[cold] + pub(super) fn remove_transferred(&self, query: DatabaseKeyIndex) { + self.dependency_graph.lock().remove_transferred(query); + } + pub(super) fn resolved_transferred_thread_id( &self, query: DatabaseKeyIndex, + ignore: DatabaseKeyIndex, ) -> Option { self.dependency_graph .lock() - .resolved_transferred_id(query) + .resolved_transferred_id(query, Some(ignore)) .map(|(id, _)| id) } diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 6da1adaa6..11212026f 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -184,15 +184,15 @@ impl DependencyGraph { &mut self, database_key_index: DatabaseKeyIndex, current_id: ThreadId, - ) -> Result<(DatabaseKeyIndex, ThreadId), Option> { - let owner_thread = self.resolved_transferred_id(database_key_index); + ) -> Result> { + let owner_thread = self.resolved_transferred_id(database_key_index, None); let Some((thread_id, owner_key)) = owner_thread else { return Err(None); }; if thread_id == current_id || self.depends_on(thread_id, current_id) { - Ok((owner_key, thread_id)) + Ok(owner_key) } else { Err(Some(thread_id)) } @@ -209,6 +209,7 @@ impl DependencyGraph { pub(super) fn resolved_transferred_id( &self, database_key: DatabaseKeyIndex, + ignore: Option, ) -> Option<(ThreadId, DatabaseKeyIndex)> { let Some(&(mut resolved_thread, owner)) = self.transfered.get(&database_key) else { return None; @@ -217,6 +218,9 @@ impl DependencyGraph { let mut current_owner = owner; while let Some(&(next_thread, next_key)) = self.transfered.get(¤t_owner) { + if Some(next_key) == ignore { + break; + } resolved_thread = next_thread; current_owner = next_key; } @@ -233,6 +237,33 @@ impl DependencyGraph { ) { let mut owner_changed = current_thread != new_owner_thread; + // If we have `c -> a -> d` and we now insert a mapping `d -> c`, then remove the last segment (`a -> d`) + // to avoid cycles. + // A cycle between transfers can occur when a later iteration has a different outer most query than + // a previous iteration. The second iteration then hits `cycle_initial` for a different head, (e.g. for `c` where it previously was `d`). + let mut last_segment = self.transfered.entry(new_owner); + + while let std::collections::hash_map::Entry::Occupied(entry) = last_segment { + let next_target = entry.get().1; + if next_target == query { + tracing::debug!( + "Remove mapping from {:?} to {:?} to prevent a cycle", + entry.key(), + query + ); + let old_dependents = self.transfered_dependents.get_mut(&query).unwrap(); + let index = old_dependents + .iter() + .position(|key| key == entry.key()) + .unwrap(); + old_dependents.swap_remove(index); + entry.remove(); + break; + } + + last_segment = self.transfered.entry(next_target); + } + // TODO: Skip unblocks for transitive queries if the old owner is the same as the new owner? match self.transfered.entry(query) { std::collections::hash_map::Entry::Vacant(entry) => { @@ -272,11 +303,7 @@ impl DependencyGraph { return None; } - if let &[(key, _)] = candidates { - return Some(key); - } - - let mut possible_tranfer_targets: Vec<_> = candidates + let possible_tranfer_targets: Vec<_> = candidates .iter() .filter_map(|&(key, thread)| { // Ensure that transferring to this other thread won't introduce any cyclic wait dependency (where `thread` is blocked on `other_thread` and the other way round).) @@ -288,19 +315,15 @@ impl DependencyGraph { }) .collect(); - if possible_tranfer_targets.is_empty() { - panic!( - "No possible transfer targets found for query {:?}", - candidates - ); - } else if let &[target] = &*possible_tranfer_targets { - Some(target) - } else { - possible_tranfer_targets - .into_iter() - .min_by_key(|target| (target.ingredient_index(), target.key_index())) - .map(|target| target) - } + tracing::debug!("Possible transfer targets: {:?}", possible_tranfer_targets); + + let selection = possible_tranfer_targets + .into_iter() + .min_by_key(|target| (target.ingredient_index(), target.key_index())) + .map(|target| target); + + tracing::debug!("Selected transfer target: {selection:?}"); + selection } pub(super) fn resume_transferred_dependents( From 65d973f55398fc0c95d4fdd1682829250f6a5fc7 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Mon, 6 Oct 2025 19:20:08 +0200 Subject: [PATCH 09/45] More bugfixes --- src/runtime/dependency_graph.rs | 34 ++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 11212026f..dde40655b 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -237,13 +237,23 @@ impl DependencyGraph { ) { let mut owner_changed = current_thread != new_owner_thread; - // If we have `c -> a -> d` and we now insert a mapping `d -> c`, then remove the last segment (`a -> d`) - // to avoid cycles. + // If we have `c -> a -> d` and we now insert a mapping `d -> c`, rewrite the mapping to + // `d -> c -> a` to avoid cycles. + // + // A more complex is `e -> c -> a -> d -> b` where we now transfer `d -> c`. Respine + // ``` + // e -> c -> a -> b + // d / + // ``` + // + // The first part here only takes care of removing `d` form ` a -> d -> b` (so that it becomes `a -> b`). + // The `d -> c` mapping is inserted by the `match` statement below. + // // A cycle between transfers can occur when a later iteration has a different outer most query than // a previous iteration. The second iteration then hits `cycle_initial` for a different head, (e.g. for `c` where it previously was `d`). let mut last_segment = self.transfered.entry(new_owner); - while let std::collections::hash_map::Entry::Occupied(entry) = last_segment { + while let std::collections::hash_map::Entry::Occupied(mut entry) = last_segment { let next_target = entry.get().1; if next_target == query { tracing::debug!( @@ -251,13 +261,31 @@ impl DependencyGraph { entry.key(), query ); + + // Remove `b` from the dependents of `d` and remove the mapping from `a -> d`. let old_dependents = self.transfered_dependents.get_mut(&query).unwrap(); let index = old_dependents .iter() .position(|key| key == entry.key()) .unwrap(); old_dependents.swap_remove(index); + // `a` in `a -> d` + let previous_source = *entry.key(); entry.remove(); + + // If there's a `d -> b` mapping, remove `d` from `b`'s dependents and connect `a` with `b` + if let Some(next_next) = self.transfered.remove(&query) { + // connect `a` with `b` (okay to use `insert` because we removed the `a` mapping before). + self.transfered.insert(previous_source, next_next); + let next_next_dependents = + self.transfered_dependents.get_mut(&next_next.1).unwrap(); + let query_index = next_next_dependents + .iter() + .position(|key| *key == query) + .unwrap(); + next_next_dependents[query_index] = previous_source; + } + break; } From 3efde9148a17a7eb58dc3f1514103e9540579584 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Mon, 6 Oct 2025 19:29:40 +0200 Subject: [PATCH 10/45] Short circuit in some cases --- src/function/sync.rs | 3 +++ src/runtime/dependency_graph.rs | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/function/sync.rs b/src/function/sync.rs index 7eb4aee39..781872983 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -225,6 +225,7 @@ impl<'me> ClaimGuard<'me> { self.mode = mode; } + #[inline(always)] fn release_default(&self, wait_result: WaitResult) { let mut syncs = self.sync_table.syncs.lock(); let state = syncs.remove(&self.key_index).expect("key claimed twice?"); @@ -232,6 +233,7 @@ impl<'me> ClaimGuard<'me> { self.release(wait_result, state); } + #[inline(always)] fn release(&self, wait_result: WaitResult, state: SyncState) { let database_key_index = self.database_key_index(); tracing::debug!("release_and_unblock({database_key_index:?})"); @@ -327,6 +329,7 @@ impl<'me> ClaimGuard<'me> { } impl Drop for ClaimGuard<'_> { + #[inline] fn drop(&mut self) { let wait_result = if thread::panicking() { WaitResult::Panicked diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index dde40655b..0c55b0bb7 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -235,6 +235,10 @@ impl DependencyGraph { new_owner: DatabaseKeyIndex, new_owner_thread: ThreadId, ) { + if self.transfered.get(&query) == Some(&(new_owner_thread, new_owner)) { + return; + } + let mut owner_changed = current_thread != new_owner_thread; // If we have `c -> a -> d` and we now insert a mapping `d -> c`, rewrite the mapping to @@ -253,7 +257,7 @@ impl DependencyGraph { // a previous iteration. The second iteration then hits `cycle_initial` for a different head, (e.g. for `c` where it previously was `d`). let mut last_segment = self.transfered.entry(new_owner); - while let std::collections::hash_map::Entry::Occupied(mut entry) = last_segment { + while let std::collections::hash_map::Entry::Occupied(entry) = last_segment { let next_target = entry.get().1; if next_target == query { tracing::debug!( From 995acd7d10ace3f3c3dd14ece3f0b4c66ebeaf57 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Mon, 6 Oct 2025 19:58:57 +0200 Subject: [PATCH 11/45] Short circuit in drop --- src/function/sync.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/function/sync.rs b/src/function/sync.rs index 781872983..ee5e095d7 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -245,16 +245,16 @@ impl<'me> ClaimGuard<'me> { .. } = state; + if !anyone_waiting { + return; + } + let runtime = self.zalsa.runtime(); if claimed_twice { runtime.remove_transferred(database_key_index); } - if !anyone_waiting { - return; - } - if is_transfer_target { tracing::debug!("unblock transferred queries owned by {database_key_index:?}"); runtime.unblock_transferred_queries(database_key_index, wait_result); From 445f8122f744904815f286b15b3bb77c75544cad Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Mon, 6 Oct 2025 20:23:01 +0200 Subject: [PATCH 12/45] Delete some unused code --- src/cycle.rs | 12 --- src/function.rs | 6 +- src/function/execute.rs | 29 +------ src/function/fetch.rs | 5 +- src/function/maybe_changed_after.rs | 17 +--- src/function/memo.rs | 123 ++-------------------------- src/function/sync.rs | 47 +++-------- src/ingredient.rs | 4 +- src/runtime.rs | 15 +--- src/runtime/dependency_graph.rs | 27 ++++-- src/zalsa_local.rs | 29 +------ 11 files changed, 48 insertions(+), 266 deletions(-) diff --git a/src/cycle.rs b/src/cycle.rs index 0cf54336d..5202e2d02 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -407,22 +407,10 @@ pub enum ProvisionalStatus { Provisional { iteration: IterationCount, verified_at: Revision, - nested: bool, }, Final { iteration: IterationCount, verified_at: Revision, - nested: bool, }, FallbackImmediate, } - -impl ProvisionalStatus { - pub(crate) fn nested(&self) -> bool { - match self { - ProvisionalStatus::Provisional { nested, .. } => *nested, - ProvisionalStatus::Final { nested, .. } => *nested, - ProvisionalStatus::FallbackImmediate => false, - } - } -} diff --git a/src/function.rs b/src/function.rs index d6e983a1b..9db1f0e37 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, SyncGuard, SyncTable}; +pub(crate) use sync::{ClaimGuard, SyncGuard, SyncOwnerId, SyncTable}; use std::any::Any; use std::fmt; @@ -349,14 +349,12 @@ where ProvisionalStatus::Final { iteration, verified_at: memo.verified_at.load(), - nested: memo.revisions.is_nested_cycle(), } } } else { ProvisionalStatus::Provisional { iteration, verified_at: memo.verified_at.load(), - nested: memo.revisions.is_nested_cycle(), } }) } @@ -413,7 +411,7 @@ where match self.sync_table.try_claim(zalsa, key_index, false) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), ClaimResult::Cycle { with, nested } => WaitForResult::Cycle { with, nested }, - ClaimResult::Claimed(guard) => WaitForResult::Available(guard), + ClaimResult::Claimed(_) => WaitForResult::Available, } } diff --git a/src/function/execute.rs b/src/function/execute.rs index eb9eaa4d3..2ff5fe81f 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -167,10 +167,7 @@ where && old_memo.cycle_heads().contains(&database_key_index) { previous_memo = Some(old_memo); - - if old_memo.revisions.is_nested_cycle() { - iteration_count = old_memo.revisions.iteration(); - } + iteration_count = old_memo.revisions.iteration(); } } @@ -231,25 +228,6 @@ where let outer_cycle = outer_cycle(zalsa, &cycle_heads, database_key_index); - // let outer_cycle = cycle_heads - // .iter() - // .filter(|head| head.database_key_index != database_key_index) - // .find_map(|head| { - // let head_ingredient = - // zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - - // let result = - // head_ingredient.wait_for(zalsa, head.database_key_index.key_index()); - // tracing::debug!( - // "Wait for result for {:?}: {result:?} {:?}", - // head.database_key_index, - // result - // ); - - // let is_outer_cycle = matches!(result, WaitForResult::Cycle(false)); - // is_outer_cycle.then_some(head.database_key_index) - // }); - // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { if let Some(outer) = outer_cycle { @@ -331,12 +309,11 @@ where completed_query .revisions .set_cycle_converged(this_converged); - completed_query.revisions.mark_nested_cycle(); if let Some(outer_cycle) = outer_cycle { tracing::debug!( - "Detected nested cycle {database_key_index:?}, iterate it as part of the outer cycle {outer_cycle:?}" - ); + "Detected nested cycle {database_key_index:?}, iterate it as part of the outer cycle {outer_cycle:?}" + ); completed_query.revisions.set_cycle_heads(cycle_heads); claim_guard.set_release_mode(ReleaseMode::TransferTo(outer_cycle)); diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 57ca81063..d6afb819c 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -258,16 +258,13 @@ where if can_shallow_update.yes() { self.update_shallow(zalsa, database_key_index, memo, can_shallow_update); - if C::CYCLE_STRATEGY == CycleRecoveryStrategy::Fixpoint - && memo.revisions.is_nested_cycle() - { + if C::CYCLE_STRATEGY == CycleRecoveryStrategy::Fixpoint { // This feels strange. I feel like we need to preserve the cycle heads. Let's say a cycle head only sometimes participates in the cycle. // This doesn't mean that the value becomes final because of it. The query might as well be cyclic in the next iteration but // we then never re-executed that query because it was marked as `verified_final`. memo.revisions .cycle_heads() .clear_except(database_key_index); - memo.revisions.reset_nested_cycle(); } crate::tracing::debug!( diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 7f0b2dea4..c174d68cc 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -400,7 +400,6 @@ where ProvisionalStatus::Final { iteration, verified_at, - nested: _, } => { // Only consider the cycle head if it is from the same revision as the memo if verified_at != memo_verified_at { @@ -482,21 +481,7 @@ where } _ => { return false; - } // TryClaimHeadsResult::Running(running) => { - // running.block_on(&mut cycle_heads_iter); - // } - // TryClaimHeadsResult::Available(available_cycle_head) => { - // // Check the cycle heads recursively - // // if available_cycle_head.is_nested(zalsa) { - // // available_cycle_head.queue_cycle_heads(&mut cycle_heads_iter); - // // } else { - // // return false; - // // } - // return false; - // } - // TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Running(_) => { - // return false; - // } + } } } diff --git a/src/function/memo.rs b/src/function/memo.rs index 9d1e22f75..8ca012357 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -6,7 +6,7 @@ use std::ptr::NonNull; use smallvec::SmallVec; use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount, ProvisionalStatus}; -use crate::function::{ClaimGuard, Configuration, IngredientImpl}; +use crate::function::{Configuration, IngredientImpl}; use crate::ingredient::{Ingredient, WaitForResult}; use crate::key::DatabaseKeyIndex; use crate::revision::AtomicRevision; @@ -200,15 +200,8 @@ impl<'db, C: Configuration> Memo<'db, C> { TryClaimHeadsResult::Finalized => { all_cycles = false; } - TryClaimHeadsResult::Available(available) => { - // if available.is_nested(zalsa) { - // // This is a nested cycle. The lock of nested cycles is released - // // when there query completes. But we need to recurse - // // TODO: What about cycle initial values. Do we need to reset nested? - // available.queue_cycle_heads(&mut cycle_heads); - // } else { + TryClaimHeadsResult::Available => { all_cycles = false; - // } } TryClaimHeadsResult::Running(running) => { all_cycles = false; @@ -221,38 +214,6 @@ impl<'db, C: Configuration> Memo<'db, C> { } } - /// Tries to claim all cycle heads to see if they're finalized or available. - /// - /// Unlike `block_on_heads`, this code does not block on any cycle head. Instead it returns `false` if - /// claiming all cycle heads failed because one of them is running on another thread. - pub(super) fn try_claim_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool { - let _entered = crate::tracing::debug_span!("try_claim_heads").entered(); - - let cycle_heads = self.revisions.cycle_heads(); - if cycle_heads.is_empty() { - return true; - } - - let mut cycle_heads = - TryClaimCycleHeadsIter::new(zalsa, zalsa_local, self.revisions.cycle_heads()); - - while let Some(claim_result) = cycle_heads.next() { - match claim_result { - TryClaimHeadsResult::Cycle { .. } | TryClaimHeadsResult::Finalized => {} - TryClaimHeadsResult::Available(available) => { - if available.is_nested(zalsa) { - available.queue_cycle_heads(&mut cycle_heads); - } - } - TryClaimHeadsResult::Running(_) => { - return false; - } - } - } - - true - } - /// Cycle heads that should be propagated to dependent queries. #[inline(always)] pub(super) fn cycle_heads(&self) -> &CycleHeads { @@ -263,53 +224,6 @@ impl<'db, C: Configuration> Memo<'db, C> { } } - // pub(super) fn root_cycle_heads( - // &self, - // zalsa: &Zalsa, - // database_key_index: DatabaseKeyIndex, - // ) -> impl Iterator { - // let mut queue: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]> = self - // .cycle_heads() - // .iter() - // .filter(|head| head.database_key_index != database_key_index) - // .map(|head| (head.database_key_index, head.iteration_count.load())) - // .collect(); - - // let mut visited: FxHashSet<_> = queue.iter().copied().collect(); - // let mut roots: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]> = SmallVec::new(); - - // while let Some((next_key, next_iteration_count)) = queue.pop() { - // let ingredient = zalsa.lookup_ingredient(next_key.ingredient_index()); - // let nested = match ingredient.provisional_status(zalsa, next_key.key_index()) { - // Some( - // ProvisionalStatus::Final { nested, .. } - // | ProvisionalStatus::Provisional { nested, .. }, - // ) => nested, - // None | Some(ProvisionalStatus::FallbackImmediate) => false, - // }; - - // if nested { - // // If this is a nested cycle head, keep following its cycle heads until we find a root. - // queue.extend( - // ingredient - // .cycle_heads(zalsa, next_key.key_index()) - // // TODO: Do we need to include the removed heads here? - // // I think so - // .iter() - // .filter_map(|head| { - // let entry = (head.database_key_index, head.iteration_count.load()); - // visited.insert(entry).then_some(entry) - // }), - // ); - // continue; - // } - - // roots.push((next_key, next_iteration_count)); - // } - - // roots.into_iter() - // } - /// Mark memo as having been verified in the `revision_now`, which should /// be the current revision. /// The caller is responsible to update the memo's `accumulated` state if their accumulated @@ -528,7 +442,7 @@ pub(super) enum TryClaimHeadsResult<'me> { Finalized, /// The cycle head is not finalized, but it can be claimed. - Available(AvailableCycleHead<'me>), + Available, /// The cycle head is currently executed on another thread. Running(RunningCycleHead<'me>), @@ -551,28 +465,6 @@ impl<'a> RunningCycleHead<'a> { } } -pub(super) struct AvailableCycleHead<'me> { - database_key_index: DatabaseKeyIndex, - _guard: ClaimGuard<'me>, - ingredient: &'me dyn Ingredient, -} - -impl<'a> AvailableCycleHead<'a> { - pub(super) fn is_nested(&self, zalsa: &Zalsa) -> bool { - self.ingredient - .provisional_status(zalsa, self.database_key_index.key_index()) - .is_some_and(|status| status.nested()) - } - - pub(super) fn queue_cycle_heads(&self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { - let nested_heads = self - .ingredient - .cycle_heads(cycle_heads.zalsa, self.database_key_index.key_index()); - - cycle_heads.queue_ingredient_heads(nested_heads); - } -} - /// Iterator to try claiming the transitive cycle heads of a memo. pub(super) struct TryClaimCycleHeadsIter<'a> { zalsa: &'a Zalsa, @@ -664,7 +556,6 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { .unwrap_or(ProvisionalStatus::Provisional { iteration: IterationCount::initial(), verified_at: Revision::start(), - nested: false, }); match cycle_head_kind { @@ -704,14 +595,10 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { ingredient, })) } - WaitForResult::Available(guard) => { + WaitForResult::Available => { crate::tracing::debug!("Query {head_database_key:?} is available",); - Some(TryClaimHeadsResult::Available(AvailableCycleHead { - _guard: guard, - ingredient, - database_key_index: head_database_key, - })) + Some(TryClaimHeadsResult::Available) } } } diff --git a/src/function/sync.rs b/src/function/sync.rs index ee5e095d7..e80bff222 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -27,7 +27,7 @@ pub(crate) enum ClaimResult<'a> { pub(crate) struct SyncState { /// The thread id that is owning this query (actively executing it or iterating it as part of a larger cycle). - id: OwnerId, + id: SyncOwnerId, /// Set to true if any other queries are blocked, /// waiting for this query to complete. @@ -57,8 +57,8 @@ impl SyncTable { let id = occupied_entry.get().id; let id = match id { - OwnerId::Thread(id) => id, - OwnerId::Transferred => { + SyncOwnerId::Thread(id) => id, + SyncOwnerId::Transferred => { let current_id = thread::current().id(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); return match zalsa @@ -90,7 +90,7 @@ impl SyncTable { }; } - *id = OwnerId::Thread(current_id); + *id = SyncOwnerId::Thread(current_id); *claimed_twice = true; ClaimResult::Claimed(ClaimGuard { @@ -105,7 +105,7 @@ impl SyncTable { } ClaimTransferredResult::Released => { occupied_entry.insert(SyncState { - id: OwnerId::Thread(thread::current().id()), + id: SyncOwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, claimed_twice: false, @@ -147,7 +147,7 @@ impl SyncTable { } std::collections::hash_map::Entry::Vacant(vacant_entry) => { vacant_entry.insert(SyncState { - id: OwnerId::Thread(thread::current().id()), + id: SyncOwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, claimed_twice: false, @@ -162,33 +162,19 @@ impl SyncTable { } } - fn make_transfer_target( - &self, - key_index: Id, - zalsa: &Zalsa, - ignore: DatabaseKeyIndex, - ) -> Option { + fn make_transfer_target(&self, key_index: Id) -> Option { let mut syncs = self.syncs.lock(); syncs.get_mut(&key_index).map(|state| { state.anyone_waiting = true; state.is_transfer_target = true; - match state.id { - OwnerId::Thread(thread_id) => thread_id, - OwnerId::Transferred => zalsa - .runtime() - .resolved_transferred_thread_id( - DatabaseKeyIndex::new(self.ingredient, key_index), - ignore, - ) - .unwrap(), - } + state.id }) } } #[derive(Copy, Clone, Debug)] -enum OwnerId { +pub(crate) enum SyncOwnerId { /// Entry is owned by this thread Thread(thread::ThreadId), /// Entry has been transferred and is owned by another thread. @@ -196,12 +182,6 @@ enum OwnerId { Transferred, } -impl OwnerId { - const fn is_thread(&self) -> bool { - matches!(self, OwnerId::Thread(_)) - } -} - /// Marks an active 'claim' in the synchronization map. The claim is /// released when this value is dropped. #[must_use] @@ -274,7 +254,7 @@ impl<'me> ClaimGuard<'me> { if state.get().claimed_twice { state.get_mut().claimed_twice = false; - state.get_mut().id = OwnerId::Transferred; + state.get_mut().id = SyncOwnerId::Transferred; } else { self.release(WaitResult::Completed, state.remove()); } @@ -284,14 +264,12 @@ impl<'me> ClaimGuard<'me> { pub(crate) fn transfer(&self, new_owner: DatabaseKeyIndex) { let self_key = self.database_key_index(); - let runtime = self.zalsa.runtime(); - let owner_ingredient = self.zalsa.lookup_ingredient(new_owner.ingredient_index()); // Get the owning thread of `new_owner`. let owner_sync_table = owner_ingredient.sync_table(); let owner_thread_id = owner_sync_table - .make_transfer_target(new_owner.key_index(), self.zalsa, self_key) + .make_transfer_target(new_owner.key_index()) .expect("new owner to be a locked query"); tracing::debug!( @@ -300,6 +278,7 @@ impl<'me> ClaimGuard<'me> { let mut syncs = self.sync_table.syncs.lock(); + let runtime = self.zalsa.runtime(); runtime.transfer_lock(self_key, thread::current().id(), new_owner, owner_thread_id); let SyncState { @@ -309,7 +288,7 @@ impl<'me> ClaimGuard<'me> { .. } = syncs.get_mut(&self.key_index).expect("key claimed twice?"); - *id = OwnerId::Transferred; + *id = SyncOwnerId::Transferred; *claimed_twice = false; // TODO: Do we need to wake up any threads that are awaiting any of the dependents to update the dependency graph -> I think so. diff --git a/src/ingredient.rs b/src/ingredient.rs index bb3f2fc95..97907c515 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -3,7 +3,7 @@ use std::fmt; use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount, ProvisionalStatus}; use crate::database::RawDatabase; -use crate::function::{ClaimGuard, VerifyCycleHeads, VerifyResult}; +use crate::function::{VerifyCycleHeads, VerifyResult}; use crate::hash::{FxHashSet, FxIndexSet}; use crate::runtime::Running; use crate::sync::Arc; @@ -320,7 +320,7 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) #[derive(Debug)] pub enum WaitForResult<'me> { Running(Running<'me>), - Available(ClaimGuard<'me>), + Available, Cycle { with: crate::sync::thread::ThreadId, nested: bool, diff --git a/src/runtime.rs b/src/runtime.rs index 855da283e..c1f66149e 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,6 +1,6 @@ use self::dependency_graph::DependencyGraph; use crate::durability::Durability; -use crate::function::SyncGuard; +use crate::function::{SyncGuard, SyncOwnerId}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::thread::{self, ThreadId}; @@ -382,23 +382,12 @@ impl Runtime { self.dependency_graph.lock().remove_transferred(query); } - pub(super) fn resolved_transferred_thread_id( - &self, - query: DatabaseKeyIndex, - ignore: DatabaseKeyIndex, - ) -> Option { - self.dependency_graph - .lock() - .resolved_transferred_id(query, Some(ignore)) - .map(|(id, _)| id) - } - pub(super) fn transfer_lock( &self, query: DatabaseKeyIndex, current_thread: ThreadId, new_owner: DatabaseKeyIndex, - new_owner_thread: ThreadId, + new_owner_thread: SyncOwnerId, ) { self.dependency_graph.lock().transfer_lock( query, diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 0c55b0bb7..8481591cd 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -3,6 +3,7 @@ use std::pin::Pin; use rustc_hash::FxHashMap; use smallvec::{smallvec, SmallVec}; +use crate::function::SyncOwnerId; #[cfg(debug_assertions)] use crate::hash::FxHashSet; use crate::key::DatabaseKeyIndex; @@ -211,9 +212,7 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, ignore: Option, ) -> Option<(ThreadId, DatabaseKeyIndex)> { - let Some(&(mut resolved_thread, owner)) = self.transfered.get(&database_key) else { - return None; - }; + let &(mut resolved_thread, owner) = self.transfered.get(&database_key)?; let mut current_owner = owner; @@ -233,14 +232,21 @@ impl DependencyGraph { query: DatabaseKeyIndex, current_thread: ThreadId, new_owner: DatabaseKeyIndex, - new_owner_thread: ThreadId, + new_owner_thread: SyncOwnerId, ) { - if self.transfered.get(&query) == Some(&(new_owner_thread, new_owner)) { - return; - } + let new_owner_thread = match new_owner_thread { + SyncOwnerId::Thread(thread) => thread, + SyncOwnerId::Transferred => { + self.resolved_transferred_id(new_owner, Some(query)) + .unwrap() + .0 + } + }; let mut owner_changed = current_thread != new_owner_thread; + // TODO: Can we move this into the occupied branch? It's pointless to run this check if there's no existing mapping. + // If we have `c -> a -> d` and we now insert a mapping `d -> c`, rewrite the mapping to // `d -> c -> a` to avoid cycles. // @@ -303,6 +309,10 @@ impl DependencyGraph { entry.insert((new_owner_thread, new_owner)); } std::collections::hash_map::Entry::Occupied(mut entry) => { + if entry.get() == &(new_owner_thread, new_owner) { + return; + } + // `Transfer `c -> b` after a previous `c -> d` mapping. // Update the owner and remove the query from the old owner's dependents. let old_owner = entry.get().1; @@ -351,8 +361,7 @@ impl DependencyGraph { let selection = possible_tranfer_targets .into_iter() - .min_by_key(|target| (target.ingredient_index(), target.key_index())) - .map(|target| target); + .min_by_key(|target| (target.ingredient_index(), target.key_index())); tracing::debug!("Selected transfer target: {selection:?}"); selection diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index c4ee34316..57427f3d6 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -15,7 +15,7 @@ use crate::cycle::{empty_cycle_heads, AtomicIterationCount, CycleHeads, Iteratio use crate::durability::Durability; use crate::key::DatabaseKeyIndex; use crate::runtime::Stamp; -use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::atomic::AtomicBool; use crate::table::{PageIndex, Slot, Table}; use crate::tracked_struct::{Disambiguator, Identity, IdentityHash}; use crate::zalsa::{IngredientIndex, Zalsa}; @@ -515,7 +515,6 @@ impl QueryRevisionsExtra { cycle_heads, tracked_struct_ids, iteration: iteration.into(), - nested_cycle: false.into(), cycle_converged: converged, })) }; @@ -567,12 +566,6 @@ struct QueryRevisionsExtraInner { iteration: AtomicIterationCount, cycle_converged: bool, - - #[cfg_attr( - feature = "persistence", - serde(with = "crate::zalsa_local::persistence::atomic_bool") - )] - nested_cycle: AtomicBool, } impl QueryRevisionsExtraInner { @@ -585,7 +578,6 @@ impl QueryRevisionsExtraInner { cycle_heads, iteration: _, cycle_converged: _, - nested_cycle: _, } = self; #[cfg(feature = "accumulator")] @@ -682,25 +674,6 @@ impl QueryRevisions { } } - pub(crate) fn is_nested_cycle(&self) -> bool { - match &self.extra.0 { - Some(extra) => extra.nested_cycle.load(Ordering::Relaxed), - None => false, - } - } - - pub(crate) fn reset_nested_cycle(&self) { - if let Some(extra) = &self.extra.0 { - extra.nested_cycle.store(false, Ordering::Release) - } - } - - pub(crate) fn mark_nested_cycle(&mut self) { - if let Some(extra) = &mut self.extra.0 { - *extra.nested_cycle.get_mut() = true - } - } - pub(crate) fn iteration(&self) -> IterationCount { match &self.extra.0 { Some(extra) => extra.iteration.load(), From 824559a391a2d212608a75d98027486ba6e1a35d Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 8 Oct 2025 13:55:07 +0200 Subject: [PATCH 13/45] A working solution --- src/function/fetch.rs | 7 +- src/function/maybe_changed_after.rs | 5 +- src/function/memo.rs | 185 +++++++++++----------------- src/function/sync.rs | 11 -- src/runtime.rs | 4 - src/runtime/dependency_graph.rs | 178 ++++++++++++++++---------- src/zalsa_local.rs | 1 + 7 files changed, 187 insertions(+), 204 deletions(-) diff --git a/src/function/fetch.rs b/src/function/fetch.rs index d6afb819c..2bca25c0f 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -140,9 +140,10 @@ where let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); if let Some(memo) = memo { - // This isn't strictly necessary, but if this is a provisional memo for an inner cycle, - // await all outer cycle heads to give the thread driving it a chance to complete - // (we don't want multiple threads competing for the queries participating in the same cycle). + // TODO: Why is this now necessary? Is it because we wake up all other threads? + // and they could "steal" our claim? Is this an argument for implementing + // resume internally in DG instead of using unblock followed by another lock call. + // Do we need the same in maybe changed after? if memo.value.is_some() && memo.may_be_provisional() { memo.block_on_heads(zalsa, zalsa_local); } diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index c174d68cc..e434324f9 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -413,7 +413,6 @@ where // // If we don't account for the iteration, then `a` (from iteration 0) will be finalized // because its cycle head `b` is now finalized, but `b` never pulled `a` in the last iteration. - // FIXME: Do we still need this? if iteration != cycle_head.iteration_count.load() { return false; } @@ -462,9 +461,9 @@ where cycle_heads: &CycleHeads, verified_at: Revision, ) -> bool { - let mut cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); + let cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); - while let Some(cycle_head) = cycle_heads_iter.next() { + for cycle_head in cycle_heads_iter { match cycle_head { TryClaimHeadsResult::Cycle { head_iteration_count, diff --git a/src/function/memo.rs b/src/function/memo.rs index 8ca012357..c5c35c816 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -3,11 +3,11 @@ use std::fmt::{Debug, Formatter}; use std::mem::transmute; use std::ptr::NonNull; -use smallvec::SmallVec; - -use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount, ProvisionalStatus}; +use crate::cycle::{ + empty_cycle_heads, CycleHeads, CycleHeadsIterator, IterationCount, ProvisionalStatus, +}; use crate::function::{Configuration, IngredientImpl}; -use crate::ingredient::{Ingredient, WaitForResult}; +use crate::ingredient::WaitForResult; use crate::key::DatabaseKeyIndex; use crate::revision::AtomicRevision; use crate::runtime::Running; @@ -191,21 +191,36 @@ impl<'db, C: Configuration> Memo<'db, C> { heads: &CycleHeads, ) -> bool { let _entered = crate::tracing::debug_span!("block_on_heads").entered(); - let mut cycle_heads = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, heads); + let cycle_heads = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, heads); let mut all_cycles = true; - while let Some(claim_result) = cycle_heads.next() { + for claim_result in cycle_heads { match claim_result { - TryClaimHeadsResult::Cycle { .. } => {} - TryClaimHeadsResult::Finalized => { - all_cycles = false; + TryClaimHeadsResult::Cycle { + current_iteration_count, + head_iteration_count, + .. + } => { + // We need to refetch if the head now has a new iteration count. + // This is to avoid a race between thread A and B: + // * thread A is in `blocks_on` (`retry_provisional`) for the memo `c`. It owns the lock for `e` + // * thread B owns `d` and calls `c`. `c` didn't depend on `e` in the first iteration. + // Thread B completes the first iteration (which bumps the iteration count on `c`). + // `c` now depends on E in the second iteration, introducing a new cycle head. + // Thread B transfers ownership of `c` to thread A (which awakes A). + // * Thread A now continues, there are no other cycle heads, so all queries result in a cycle. + // However, `d` has now a new iteration count, so it's important that we refetch `c`. + + if current_iteration_count != head_iteration_count { + all_cycles = false; + } } TryClaimHeadsResult::Available => { all_cycles = false; } TryClaimHeadsResult::Running(running) => { all_cycles = false; - running.block_on(&mut cycle_heads); + running.block_on(zalsa); } } } @@ -431,99 +446,49 @@ mod persistence { } pub(super) enum TryClaimHeadsResult<'me> { - /// Claiming every cycle head results in a cycle head. + /// Claiming the cycle head results in a cycle. Cycle { head_iteration_count: IterationCount, current_iteration_count: IterationCount, verified_at: Revision, }, - /// The cycle head has been finalized. - Finalized, - /// The cycle head is not finalized, but it can be claimed. Available, /// The cycle head is currently executed on another thread. - Running(RunningCycleHead<'me>), -} - -pub(super) struct RunningCycleHead<'me> { - inner: Running<'me>, - ingredient: &'me dyn Ingredient, -} - -impl<'a> RunningCycleHead<'a> { - pub(crate) fn block_on(self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { - let database_key_index = self.inner.database_key(); - let key_index = database_key_index.key_index(); - self.inner.block_on(cycle_heads.zalsa); - - let nested_heads = self.ingredient.cycle_heads(cycle_heads.zalsa, key_index); - - cycle_heads.queue_ingredient_heads(nested_heads); - } + Running(Running<'me>), } /// Iterator to try claiming the transitive cycle heads of a memo. pub(super) struct TryClaimCycleHeadsIter<'a> { zalsa: &'a Zalsa, zalsa_local: &'a ZalsaLocal, - queue: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]>, - queued: SmallVec<[(DatabaseKeyIndex, IterationCount); 4]>, + cycle_heads: CycleHeadsIterator<'a>, } impl<'a> TryClaimCycleHeadsIter<'a> { pub(super) fn new( zalsa: &'a Zalsa, zalsa_local: &'a ZalsaLocal, - cycle_heads: &CycleHeads, + cycle_heads: &'a CycleHeads, ) -> Self { - let queue: SmallVec<_> = cycle_heads - .iter() - .map(|head| (head.database_key_index, head.iteration_count.load())) - .collect(); - let queued = queue.iter().copied().collect(); - Self { zalsa, zalsa_local, - queue, - queued, + cycle_heads: cycle_heads.iter(), } } - - fn queue_ingredient_heads(&mut self, cycle_heads: &CycleHeads) { - // Recursively wait for all cycle heads that this head depends on. It's important - // that we fetch those from the updated memo because the cycle heads can change - // between iterations and new cycle heads can be added if a query depeonds on - // some cycle heads depending on a specific condition being met - // (`a` calls `b` and `c` in iteration 0 but `c` and `d` in iteration 1 or later). - // IMPORTANT: It's critical that we get the cycle head from the latest memo - // here, in case the memo has become part of another cycle (we need to block on that too!). - self.queue.extend( - cycle_heads - .iter() - .map(|head| (head.database_key_index, head.iteration_count.load())) - .filter(|head| { - let already_checked = self.queued.contains(head); - - if already_checked { - false - } else { - self.queued.push(*head); - true - } - }), - ) - } } impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { type Item = TryClaimHeadsResult<'me>; fn next(&mut self) -> Option { - let (head_database_key, head_iteration_count) = self.queue.pop()?; + let head = self.cycle_heads.next()?; + + let head_database_key = head.database_key_index; + let head_iteration_count = head.iteration_count.load(); // The most common case is that the head is already in the query stack. So let's check that first. // SAFETY: We do not access the query stack reentrantly. @@ -551,56 +516,44 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { .zalsa .lookup_ingredient(head_database_key.ingredient_index()); - let cycle_head_kind = ingredient - .provisional_status(self.zalsa, head_key_index) - .unwrap_or(ProvisionalStatus::Provisional { - iteration: IterationCount::initial(), - verified_at: Revision::start(), - }); - - match cycle_head_kind { - ProvisionalStatus::Final { .. } | ProvisionalStatus::FallbackImmediate => { - // This cycle is already finalized, so we don't need to wait on it; - // keep looping through cycle heads. - crate::tracing::trace!( - "Dependent cycle head {head_database_key:?} has been finalized." - ); - Some(TryClaimHeadsResult::Finalized) - } - ProvisionalStatus::Provisional { - iteration, - verified_at, - .. - } => { - match ingredient.wait_for(self.zalsa, head_key_index) { - WaitForResult::Cycle { .. } => { - // We hit a cycle blocking on the cycle head; this means this query actively - // participates in the cycle and some other query is blocked on this thread. - crate::tracing::debug!( - "Waiting for {head_database_key:?} results in a cycle" - ); - Some(TryClaimHeadsResult::Cycle { - current_iteration_count: iteration, - head_iteration_count, - verified_at, - }) + match ingredient.wait_for(self.zalsa, head_key_index) { + WaitForResult::Cycle { .. } => { + // We hit a cycle blocking on the cycle head; this means this query actively + // participates in the cycle and some other query is blocked on this thread. + crate::tracing::debug!("Waiting for {head_database_key:?} results in a cycle"); + + let provisional_status = ingredient + .provisional_status(self.zalsa, head_key_index) + .expect("cycle head memo to exist"); + let (current_iteration_count, verified_at) = match provisional_status { + ProvisionalStatus::Provisional { + iteration, + verified_at, } - WaitForResult::Running(running) => { - crate::tracing::debug!( - "Ingredient {head_database_key:?} is running: {running:?}" - ); - - Some(TryClaimHeadsResult::Running(RunningCycleHead { - inner: running, - ingredient, - })) + | ProvisionalStatus::Final { + iteration, + verified_at, + } => (iteration, verified_at), + ProvisionalStatus::FallbackImmediate => { + (IterationCount::initial(), self.zalsa.current_revision()) } - WaitForResult::Available => { - crate::tracing::debug!("Query {head_database_key:?} is available",); + }; - Some(TryClaimHeadsResult::Available) - } - } + Some(TryClaimHeadsResult::Cycle { + current_iteration_count, + head_iteration_count, + verified_at, + }) + } + WaitForResult::Running(running) => { + crate::tracing::debug!("Ingredient {head_database_key:?} is running: {running:?}"); + + Some(TryClaimHeadsResult::Running(running)) + } + WaitForResult::Available => { + crate::tracing::debug!("Query {head_database_key:?} is available",); + + Some(TryClaimHeadsResult::Available) } } } diff --git a/src/function/sync.rs b/src/function/sync.rs index e80bff222..5206b98a1 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -290,17 +290,6 @@ impl<'me> ClaimGuard<'me> { *id = SyncOwnerId::Transferred; *claimed_twice = false; - - // TODO: Do we need to wake up any threads that are awaiting any of the dependents to update the dependency graph -> I think so. - if *anyone_waiting { - tracing::debug!( - "Wake up blocked threads after transferring ownership to {new_owner:?}" - ); - // Wake up all threads that were waiting on the query to complete so that they'll retry and block on the new owner. - let database_key = DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index); - runtime.unblock_queries_blocked_on(database_key, WaitResult::Completed); - } - *anyone_waiting = false; tracing::debug!("Transfer ownership completed"); diff --git a/src/runtime.rs b/src/runtime.rs index c1f66149e..47d560f2f 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -123,10 +123,6 @@ struct BlockedOnInner<'me> { } impl Running<'_> { - pub(crate) fn database_key(&self) -> DatabaseKeyIndex { - self.0.database_key - } - /// Blocks on the other thread to complete the computation. pub(crate) fn block_on(self, zalsa: &Zalsa) { let BlockedOnInner { diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 8481591cd..7e26a4e13 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -1,11 +1,8 @@ -use std::pin::Pin; - use rustc_hash::FxHashMap; use smallvec::{smallvec, SmallVec}; +use std::pin::Pin; use crate::function::SyncOwnerId; -#[cfg(debug_assertions)] -use crate::hash::FxHashSet; use crate::key::DatabaseKeyIndex; use crate::runtime::dependency_graph::edge::EdgeCondvar; use crate::runtime::WaitResult; @@ -31,11 +28,11 @@ pub(super) struct DependencyGraph { /// A `K -> Q` pair indicates that `K`'s lock is now owned by /// `Q` (The thread id of `Q` and its database key) - transfered: FxHashMap, + transferred: FxHashMap, /// A `K -> Qs` pair indicates that `K`'s lock is now owned by /// `Qs` (The thread id of `Qs` and their database keys) - transfered_dependents: FxHashMap>, + transferred_dependents: FxHashMap>, } impl DependencyGraph { @@ -43,8 +40,16 @@ impl DependencyGraph { /// /// (i.e., there is a path from `from_id` to `to_id` in the graph.) pub(super) fn depends_on(&self, from_id: ThreadId, to_id: ThreadId) -> bool { + Self::depends_on_impl(&self.edges, from_id, to_id) + } + + fn depends_on_impl( + edges: &FxHashMap, + from_id: ThreadId, + to_id: ThreadId, + ) -> bool { let mut p = from_id; - while let Some(q) = self.edges.get(&p).map(|edge| edge.blocked_on_id) { + while let Some(q) = edges.get(&p).map(|edge| edge.blocked_on_id) { if q == to_id { return true; } @@ -141,6 +146,19 @@ impl DependencyGraph { } } + /// Unblock the runtime with the given id with the given wait-result. + /// This will cause it resume execution (though it will have to grab + /// the lock on this data structure first, to recover the wait result). + fn unblock_runtime(&mut self, id: ThreadId, wait_result: WaitResult) { + tracing::debug!("Unblocking runtime {id:?} with wait result {wait_result:?}"); + let edge = self.edges.remove(&id).expect("not blocked"); + self.wait_results.insert(id, wait_result); + + // Now that we have inserted the `wait_results`, + // notify the thread. + edge.notify(); + } + pub(super) fn unblock_transferred_queries( &mut self, database_key: DatabaseKeyIndex, @@ -148,8 +166,8 @@ impl DependencyGraph { ) { // If `database_key` is `c` and it has been transfered to `b` earlier, remove its entry. tracing::debug!("unblock_transferred_queries({database_key:?}"); - if let Some((_, owner)) = self.transfered.remove(&database_key) { - let owner_dependents = self.transfered_dependents.get_mut(&owner).unwrap(); + if let Some((_, owner)) = self.transferred.remove(&database_key) { + let owner_dependents = self.transferred_dependents.get_mut(&owner).unwrap(); let index = owner_dependents .iter() .position(|&x| x == database_key) @@ -161,9 +179,9 @@ impl DependencyGraph { let mut queue: SmallVec<[_; 4]> = smallvec![database_key]; while let Some(current) = queue.pop() { - self.transfered.remove(¤t); + self.transferred.remove(¤t); let transitive = self - .transfered_dependents + .transferred_dependents .remove(¤t) .unwrap_or_default(); @@ -200,8 +218,8 @@ impl DependencyGraph { } pub(super) fn remove_transferred(&mut self, database_key: DatabaseKeyIndex) { - if let Some((_, owner)) = self.transfered.remove(&database_key) { - let dependents = self.transfered_dependents.get_mut(&owner).unwrap(); + if let Some((_, owner)) = self.transferred.remove(&database_key) { + let dependents = self.transferred_dependents.get_mut(&owner).unwrap(); let index = dependents.iter().position(|h| *h == database_key).unwrap(); dependents.swap_remove(index); } @@ -212,11 +230,11 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, ignore: Option, ) -> Option<(ThreadId, DatabaseKeyIndex)> { - let &(mut resolved_thread, owner) = self.transfered.get(&database_key)?; + let &(mut resolved_thread, owner) = self.transferred.get(&database_key)?; let mut current_owner = owner; - while let Some(&(next_thread, next_key)) = self.transfered.get(¤t_owner) { + while let Some(&(next_thread, next_key)) = self.transferred.get(¤t_owner) { if Some(next_key) == ignore { break; } @@ -261,7 +279,7 @@ impl DependencyGraph { // // A cycle between transfers can occur when a later iteration has a different outer most query than // a previous iteration. The second iteration then hits `cycle_initial` for a different head, (e.g. for `c` where it previously was `d`). - let mut last_segment = self.transfered.entry(new_owner); + let mut last_segment = self.transferred.entry(new_owner); while let std::collections::hash_map::Entry::Occupied(entry) = last_segment { let next_target = entry.get().1; @@ -273,7 +291,7 @@ impl DependencyGraph { ); // Remove `b` from the dependents of `d` and remove the mapping from `a -> d`. - let old_dependents = self.transfered_dependents.get_mut(&query).unwrap(); + let old_dependents = self.transferred_dependents.get_mut(&query).unwrap(); let index = old_dependents .iter() .position(|key| key == entry.key()) @@ -284,11 +302,11 @@ impl DependencyGraph { entry.remove(); // If there's a `d -> b` mapping, remove `d` from `b`'s dependents and connect `a` with `b` - if let Some(next_next) = self.transfered.remove(&query) { + if let Some(next_next) = self.transferred.remove(&query) { // connect `a` with `b` (okay to use `insert` because we removed the `a` mapping before). - self.transfered.insert(previous_source, next_next); + self.transferred.insert(previous_source, next_next); let next_next_dependents = - self.transfered_dependents.get_mut(&next_next.1).unwrap(); + self.transferred_dependents.get_mut(&next_next.1).unwrap(); let query_index = next_next_dependents .iter() .position(|key| *key == query) @@ -299,11 +317,11 @@ impl DependencyGraph { break; } - last_segment = self.transfered.entry(next_target); + last_segment = self.transferred.entry(next_target); } // TODO: Skip unblocks for transitive queries if the old owner is the same as the new owner? - match self.transfered.entry(query) { + match self.transferred.entry(query) { std::collections::hash_map::Entry::Vacant(entry) => { // Transfer `c -> b` and there's no existing entry for `c`. entry.insert((new_owner_thread, new_owner)); @@ -318,7 +336,7 @@ impl DependencyGraph { let old_owner = entry.get().1; owner_changed = true; - let old_dependents = self.transfered_dependents.get_mut(&old_owner).unwrap(); + let old_dependents = self.transferred_dependents.get_mut(&old_owner).unwrap(); let index = old_dependents.iter().position(|key| *key == query).unwrap(); old_dependents.swap_remove(index); @@ -327,13 +345,15 @@ impl DependencyGraph { }; // Register `c` as a dependent of `b`. - let all_dependents = self.transfered_dependents.entry(new_owner).or_default(); + let all_dependents = self.transferred_dependents.entry(new_owner).or_default(); assert!(!all_dependents.contains(&query)); assert!(!all_dependents.contains(&new_owner)); all_dependents.push(query); + tracing::debug!("Wake up blocked threads after transferring ownership to {new_owner:?}"); if owner_changed { - self.resume_transferred_dependents(query, WaitResult::Completed); + self.unblock_transfer_target(query, new_owner_thread); + self.update_transferred_edges(query, new_owner_thread); } } @@ -345,7 +365,7 @@ impl DependencyGraph { return None; } - let possible_tranfer_targets: Vec<_> = candidates + let possible_transfer_targets: Vec<_> = candidates .iter() .filter_map(|&(key, thread)| { // Ensure that transferring to this other thread won't introduce any cyclic wait dependency (where `thread` is blocked on `other_thread` and the other way round).) @@ -357,9 +377,9 @@ impl DependencyGraph { }) .collect(); - tracing::debug!("Possible transfer targets: {:?}", possible_tranfer_targets); + tracing::debug!("Possible transfer targets: {:?}", possible_transfer_targets); - let selection = possible_tranfer_targets + let selection = possible_transfer_targets .into_iter() .min_by_key(|target| (target.ingredient_index(), target.key_index())); @@ -367,54 +387,79 @@ impl DependencyGraph { selection } - pub(super) fn resume_transferred_dependents( - &mut self, - query: DatabaseKeyIndex, - wait_result: WaitResult, - ) { - tracing::debug!("Resuming transitive dependents of query {query:?}"); - let Some(queries) = self.transfered_dependents.get(&query) else { - return; - }; + /// Finds the one query in the dependents of the `source_query` (the one that is transferred to a new owner) + /// on which the `new_owner_id` thread blocks on and unblocks it, to ensure progress. + fn unblock_transfer_target(&mut self, source_query: DatabaseKeyIndex, new_owner_id: ThreadId) { + let mut queue: SmallVec<[_; 4]> = smallvec![source_query]; + + while let Some(current) = queue.pop() { + if let Some(dependents) = self.query_dependents.get_mut(¤t) { + for (i, id) in dependents.iter().enumerate() { + if *id == new_owner_id || Self::depends_on_impl(&self.edges, new_owner_id, *id) + { + let thread_id = dependents.swap_remove(i); + if dependents.is_empty() { + self.query_dependents.remove(¤t); + } + + self.unblock_runtime(thread_id, WaitResult::Completed); + + return; + } + } + }; - #[cfg(debug_assertions)] - let mut stack = FxHashSet::default(); - #[cfg(debug_assertions)] - stack.insert(query); + queue.extend( + self.transferred_dependents + .get(¤t) + .iter() + .copied() + .flatten() + .copied(), + ); + } + } - let mut queue: SmallVec<[_; 4]> = - queries.into_iter().map(|nested| (*nested, query)).collect(); + fn update_transferred_edges(&mut self, query: DatabaseKeyIndex, new_owner_thread: ThreadId) { + tracing::info!("Resuming transitive dependents of query {query:?}"); - while let Some((nested, parent)) = queue.pop() { - debug_assert_eq!(self.transfered.get(&nested).unwrap().1, parent); + let mut queue: SmallVec<[_; 4]> = smallvec![query]; - #[cfg(debug_assertions)] - if !stack.insert(nested) { - panic!("Encountered cycle while resuming the transferred dependents. between {nested:?} and {parent:?}. Current state of dependency graph: {self:#?}") - } + while let Some(query) = queue.pop() { queue.extend( - self.transfered_dependents - .get(&nested) - .into_iter() + self.transferred_dependents + .get(&query) + .iter() + .copied() .flatten() - .map(|inner| (*inner, nested)), + .copied(), ); - self.unblock_runtimes_blocked_on(nested, wait_result); - } - } + let Some(dependents) = self.query_dependents.get_mut(&query) else { + continue; + }; - /// Unblock the runtime with the given id with the given wait-result. - /// This will cause it resume execution (though it will have to grab - /// the lock on this data structure first, to recover the wait result). - fn unblock_runtime(&mut self, id: ThreadId, wait_result: WaitResult) { - tracing::debug!("Unblocking runtime {id:?} with wait result {wait_result:?}"); - let edge = self.edges.remove(&id).expect("not blocked"); - self.wait_results.insert(id, wait_result); + for dependent in dependents.iter_mut() { + let edge = self.edges.get_mut(dependent).unwrap(); - // Now that we have inserted the `wait_results`, - // notify the thread. - edge.notify(); + tracing::info!( + "Rewrite edge from {:?} to {new_owner_thread:?}", + edge.blocked_on_id + ); + edge.blocked_on_id = new_owner_thread; + } + + #[cfg(debug_assertions)] + { + for id in self.query_dependents.get(&query).into_iter().flatten() { + debug_assert!( + !self.depends_on(new_owner_thread, *id), + "Circular reference between blocked edges: {:#?}", + self.edges + ); + } + } + } } } @@ -443,7 +488,6 @@ mod edge { /// Signalled whenever a query with dependents completes. /// Allows those dependents to check if they are ready to unblock. - // condvar: unsafe<'stack_frame> Pin<&'stack_frame Condvar>, condvar: Pin<&'static EdgeCondvar>, } diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 57427f3d6..35f3915cb 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -1261,6 +1261,7 @@ pub(crate) mod persistence { } } + #[cfg(feature = "persistence")] pub(super) mod atomic_bool { use crate::sync::atomic::{AtomicBool, Ordering}; From f7f630e4f14ca2b183cde624f9450285979c06c2 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 8 Oct 2025 15:02:39 +0200 Subject: [PATCH 14/45] Simplify more --- src/function.rs | 4 +-- src/function/execute.rs | 40 ++++++++++++++-------- src/function/fetch.rs | 8 ++--- src/function/memo.rs | 2 +- src/function/sync.rs | 25 ++++---------- src/ingredient.rs | 7 ++-- src/runtime.rs | 23 +++---------- src/runtime/dependency_graph.rs | 30 ---------------- tests/parallel/cycle_a_t1_b_t2.rs | 2 +- tests/parallel/cycle_a_t1_b_t2_fallback.rs | 11 ++++-- tests/parallel/main.rs | 2 +- 11 files changed, 56 insertions(+), 98 deletions(-) diff --git a/src/function.rs b/src/function.rs index 9db1f0e37..b3dbb58ce 100644 --- a/src/function.rs +++ b/src/function.rs @@ -370,7 +370,7 @@ where .set_iteration_count(Self::database_key_index(self, input), iteration_count); } - fn set_cycle_finalized(&self, zalsa: &Zalsa, input: Id) { + fn finalize_cycle_head(&self, zalsa: &Zalsa, input: Id) { let Some(memo) = self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) else { @@ -410,7 +410,7 @@ where fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim(zalsa, key_index, false) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle { with, nested } => WaitForResult::Cycle { with, nested }, + ClaimResult::Cycle { inner: nested } => WaitForResult::Cycle { inner: nested }, ClaimResult::Claimed(_) => WaitForResult::Available, } } diff --git a/src/function/execute.rs b/src/function/execute.rs index 2ff5fe81f..51e10a07b 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -226,7 +226,7 @@ where } } - let outer_cycle = outer_cycle(zalsa, &cycle_heads, database_key_index); + let outer_cycle = outer_cycle(zalsa, zalsa_local, &cycle_heads, database_key_index); // Did the new result we got depend on our own provisional value, in a cycle? if !cycle_heads.contains(&database_key_index) { @@ -354,7 +354,7 @@ where let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - ingredient.set_cycle_finalized(zalsa, head.database_key_index.key_index()); + ingredient.finalize_cycle_head(zalsa, head.database_key_index.key_index()); } *completed_query.revisions.verified_final.get_mut() = true; @@ -527,24 +527,34 @@ impl Drop for ClearCycleHeadIfPanicking<'_, C> { fn outer_cycle( zalsa: &Zalsa, + zalsa_local: &ZalsaLocal, cycle_heads: &CycleHeads, current_key: DatabaseKeyIndex, ) -> Option { - let candidates: SmallVec<[_; 4]> = cycle_heads + cycle_heads .iter() .filter(|head| head.database_key_index != current_key) - .filter_map(|head| { - let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - match ingredient.wait_for(zalsa, head.database_key_index.key_index()) { - WaitForResult::Cycle { - with, - nested: false, - } => Some((head.database_key_index, with)), - _ => None, + .find(|head| { + // SAFETY: We don't call into with_query_stack recursively + let is_on_stack = unsafe { + zalsa_local.with_query_stack_unchecked(|stack| { + stack + .iter() + .rev() + .any(|query| query.database_key_index == head.database_key_index) + }) + }; + + if is_on_stack { + return true; } - }) - .collect(); - // Do we need to pass the thread id here to account for a potential re-entrance? - zalsa.runtime().transfer_target(&candidates) + let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + matches!( + ingredient.wait_for(zalsa, head.database_key_index.key_index()), + WaitForResult::Cycle { inner: false } + ) + }) + .map(|head| head.database_key_index) } diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 2bca25c0f..dc29f0f51 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -137,17 +137,17 @@ where ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); + // TOOO: Try to restrict this to `FALLBACK_IMMEDIATE`. + // There's an issue that `cycle_nested_deep_conditional_changed` hangs + // if I remove this but why? Should this be handled in `maybe_changed_after` instead? let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); if let Some(memo) = memo { - // TODO: Why is this now necessary? Is it because we wake up all other threads? - // and they could "steal" our claim? Is this an argument for implementing - // resume internally in DG instead of using unblock followed by another lock call. - // Do we need the same in maybe changed after? if memo.value.is_some() && memo.may_be_provisional() { memo.block_on_heads(zalsa, zalsa_local); } } + return None; } ClaimResult::Cycle { .. } => { diff --git a/src/function/memo.rs b/src/function/memo.rs index c5c35c816..7883e7b58 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -182,7 +182,7 @@ impl<'db, C: Configuration> Memo<'db, C> { return true; } - return block_on_heads_cold(zalsa, zalsa_local, self.cycle_heads()); + return block_on_heads_cold(zalsa, zalsa_local, cycle_heads); #[inline(never)] fn block_on_heads_cold( diff --git a/src/function/sync.rs b/src/function/sync.rs index 5206b98a1..d90933c6e 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -2,7 +2,7 @@ use rustc_hash::FxHashMap; use crate::key::DatabaseKeyIndex; use crate::runtime::{BlockResult, ClaimTransferredResult, Running, WaitResult}; -use crate::sync::thread::{self, ThreadId}; +use crate::sync::thread::{self}; use crate::sync::Mutex; use crate::zalsa::Zalsa; use crate::{Id, IngredientIndex}; @@ -20,7 +20,7 @@ pub(crate) enum ClaimResult<'a> { /// Can't claim the query because it is running on an other thread. Running(Running<'a>), /// Claiming the query results in a cycle. - Cycle { with: ThreadId, nested: bool }, + Cycle { inner: bool }, /// Successfully claimed the query. Claimed(ClaimGuard<'a>), } @@ -67,13 +67,9 @@ impl SyncTable { { ClaimTransferredResult::ClaimedBy(other_thread) => { occupied_entry.get_mut().anyone_waiting = true; - let thread_id = other_thread.id(); match other_thread.block(write) { - BlockResult::Cycle => ClaimResult::Cycle { - with: thread_id, - nested: false, - }, + BlockResult::Cycle => ClaimResult::Cycle { inner: false }, BlockResult::Running(running) => ClaimResult::Running(running), } } @@ -83,11 +79,7 @@ impl SyncTable { } = occupied_entry.into_mut(); if *claimed_twice { - // TODO: Is this thread id correct? - return ClaimResult::Cycle { - with: current_id, - nested: false, - }; + return ClaimResult::Cycle { inner: false }; } *id = SyncOwnerId::Thread(current_id); @@ -100,8 +92,8 @@ impl SyncTable { mode: ReleaseMode::SelfOnly, }) } - ClaimTransferredResult::Cycle { with, nested } => { - ClaimResult::Cycle { nested, with } + ClaimTransferredResult::Cycle { inner: nested } => { + ClaimResult::Cycle { inner: nested } } ClaimTransferredResult::Released => { occupied_entry.insert(SyncState { @@ -139,10 +131,7 @@ impl SyncTable { write, ) { BlockResult::Running(blocked_on) => ClaimResult::Running(blocked_on), - BlockResult::Cycle => ClaimResult::Cycle { - nested: false, - with: id, - }, + BlockResult::Cycle => ClaimResult::Cycle { inner: false }, } } std::collections::hash_map::Entry::Vacant(vacant_entry) => { diff --git a/src/ingredient.rs b/src/ingredient.rs index 97907c515..4737193be 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -175,7 +175,7 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { unreachable!("increment_iteration_count should only be called on cycle heads and only functions can be cycle heads"); } - fn set_cycle_finalized(&self, _zalsa: &Zalsa, _input: Id) { + fn finalize_cycle_head(&self, _zalsa: &Zalsa, _input: Id) { unreachable!("finalize_cycle_head should only be called on cycle heads and only functions can be cycle heads"); } @@ -321,10 +321,7 @@ pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) pub enum WaitForResult<'me> { Running(Running<'me>), Available, - Cycle { - with: crate::sync::thread::ThreadId, - nested: bool, - }, + Cycle { inner: bool }, } impl WaitForResult<'_> { diff --git a/src/runtime.rs b/src/runtime.rs index 47d560f2f..b4b0660b9 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -69,7 +69,7 @@ pub(crate) enum ClaimTransferredResult<'me> { /// /// The lock is hold by the current thread or there's another thread that is waiting on the current thread, /// and blocking this thread on the other thread would result in a deadlock/cycle. - Cycle { with: ThreadId, nested: bool }, + Cycle { inner: bool }, /// Query is no longer a transferred query. Released, @@ -82,10 +82,6 @@ pub(super) struct OtherThread<'me> { } impl<'me> OtherThread<'me> { - pub(super) fn id(&self) -> ThreadId { - self.other_id - } - pub(super) fn block(self, query_mutex_guard: SyncGuard<'me>) -> BlockResult<'me> { let thread_id = thread::current().id(); // Cycle in the same thread. @@ -339,12 +335,9 @@ impl Runtime { match dg.block_on_transferred(query, thread_id) { Ok(_) => { - return if !allow_reentry { + if !allow_reentry { tracing::debug!("Claiming {query:?} results in a cycle because re-entrant lock is not allowed"); - ClaimTransferredResult::Cycle { - with: thread_id, - nested: true, - } + ClaimTransferredResult::Cycle { inner: true } } else { tracing::debug!("Reentrant lock {query:?}"); // dg.remove_transferred(query); @@ -355,7 +348,7 @@ impl Runtime { // dg.resume_transferred_dependents(query, WaitResult::Completed); ClaimTransferredResult::Reentrant - }; + } } // Lock is owned by another thread, wait for it to be released. Err(Some(thread_id)) => { @@ -393,14 +386,6 @@ impl Runtime { ); } - pub(crate) fn transfer_target( - &self, - candidates: &[(DatabaseKeyIndex, ThreadId)], - ) -> Option { - let dependency_graph = self.dependency_graph.lock(); - dependency_graph.transfer_target(candidates) - } - #[cfg(feature = "persistence")] pub(crate) fn deserialize_from(&mut self, other: &mut Runtime) { // The only field that is serialized is `revisions`. diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 7e26a4e13..a5d9a38e4 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -357,36 +357,6 @@ impl DependencyGraph { } } - pub(super) fn transfer_target( - &self, - candidates: &[(DatabaseKeyIndex, ThreadId)], - ) -> Option { - if candidates.is_empty() { - return None; - } - - let possible_transfer_targets: Vec<_> = candidates - .iter() - .filter_map(|&(key, thread)| { - // Ensure that transferring to this other thread won't introduce any cyclic wait dependency (where `thread` is blocked on `other_thread` and the other way round).) - let depends_on_another = candidates.iter().any(|&(_, other_thread)| { - other_thread != thread && self.depends_on(thread, other_thread) - }); - - (!depends_on_another).then_some(key) - }) - .collect(); - - tracing::debug!("Possible transfer targets: {:?}", possible_transfer_targets); - - let selection = possible_transfer_targets - .into_iter() - .min_by_key(|target| (target.ingredient_index(), target.key_index())); - - tracing::debug!("Selected transfer target: {selection:?}"); - selection - } - /// Finds the one query in the dependents of the `source_query` (the one that is transferred to a new owner) /// on which the `new_owner_id` thread blocks on and unblocks it, to ensure progress. fn unblock_transfer_target(&mut self, source_query: DatabaseKeyIndex, new_owner_id: ThreadId) { diff --git a/tests/parallel/cycle_a_t1_b_t2.rs b/tests/parallel/cycle_a_t1_b_t2.rs index d9d5ca365..ad21b7963 100644 --- a/tests/parallel/cycle_a_t1_b_t2.rs +++ b/tests/parallel/cycle_a_t1_b_t2.rs @@ -62,7 +62,7 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue { #[test_log::test] fn the_test() { crate::sync::check(|| { - tracing::debug!("New run"); + tracing::debug!("Starting new run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); diff --git a/tests/parallel/cycle_a_t1_b_t2_fallback.rs b/tests/parallel/cycle_a_t1_b_t2_fallback.rs index 8005a9c23..b2d6631cc 100644 --- a/tests/parallel/cycle_a_t1_b_t2_fallback.rs +++ b/tests/parallel/cycle_a_t1_b_t2_fallback.rs @@ -55,11 +55,18 @@ fn the_test() { use crate::Knobs; crate::sync::check(|| { + tracing::debug!("Starting new run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); - let t1 = thread::spawn(move || query_a(&db_t1)); - let t2 = thread::spawn(move || query_b(&db_t2)); + let t1 = thread::spawn(move || { + let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); + query_a(&db_t1) + }); + let t2 = thread::spawn(move || { + let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); + query_b(&db_t2) + }); let (r_t1, r_t2) = (t1.join(), t2.join()); diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs index a764a864c..6c450faa1 100644 --- a/tests/parallel/main.rs +++ b/tests/parallel/main.rs @@ -33,7 +33,7 @@ pub(crate) mod sync { pub use shuttle::thread; pub fn check(f: impl Fn() + Send + Sync + 'static) { - shuttle::check_pct(f, 1000, 50); + shuttle::check_pct(f, 10000, 50); } } From 307aae50d21a2addbdc73fa16cce5d200c06becc Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 8 Oct 2025 16:27:27 +0200 Subject: [PATCH 15/45] Avoid repeated query lookups in `transfer_lock` --- src/function/execute.rs | 3 +- src/function/fetch.rs | 15 ++-- src/function/sync.rs | 1 + src/runtime/dependency_graph.rs | 131 ++++++++++++++++---------------- 4 files changed, 76 insertions(+), 74 deletions(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index 51e10a07b..ebd0ae2e4 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -8,6 +8,7 @@ use crate::function::{ClaimGuard, Configuration, IngredientImpl}; use crate::ingredient::WaitForResult; use crate::plumbing::ZalsaLocal; use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::tracing; use crate::tracked_struct::Identity; use crate::zalsa::{MemoIngredientIndex, Zalsa}; use crate::zalsa_local::{ActiveQueryGuard, QueryRevisions}; @@ -367,7 +368,7 @@ where // `iteration_count` can't overflow as we check it against `MAX_ITERATIONS` // which is less than `u32::MAX`. iteration_count = iteration_count.increment().unwrap_or_else(|| { - tracing::warn!("{database_key_index:?}: execute: too many cycle iterations"); + ::tracing::warn!("{database_key_index:?}: execute: too many cycle iterations"); panic!("{database_key_index:?}: execute: too many cycle iterations") }); diff --git a/src/function/fetch.rs b/src/function/fetch.rs index dc29f0f51..91c5138c4 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -137,14 +137,13 @@ where ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); - // TOOO: Try to restrict this to `FALLBACK_IMMEDIATE`. - // There's an issue that `cycle_nested_deep_conditional_changed` hangs - // if I remove this but why? Should this be handled in `maybe_changed_after` instead? - let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); - - if let Some(memo) = memo { - if memo.value.is_some() && memo.may_be_provisional() { - memo.block_on_heads(zalsa, zalsa_local); + if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate { + let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); + + if let Some(memo) = memo { + if memo.value.is_some() && memo.may_be_provisional() { + memo.block_on_heads(zalsa, zalsa_local); + } } } diff --git a/src/function/sync.rs b/src/function/sync.rs index d90933c6e..b577d91a8 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -4,6 +4,7 @@ use crate::key::DatabaseKeyIndex; use crate::runtime::{BlockResult, ClaimTransferredResult, Running, WaitResult}; use crate::sync::thread::{self}; use crate::sync::Mutex; +use crate::tracing; use crate::zalsa::Zalsa; use crate::{Id, IngredientIndex}; diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index a5d9a38e4..a8e0a6c58 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -8,6 +8,7 @@ use crate::runtime::dependency_graph::edge::EdgeCondvar; use crate::runtime::WaitResult; use crate::sync::thread::ThreadId; use crate::sync::MutexGuard; +use crate::tracing; #[derive(Debug, Default)] pub(super) struct DependencyGraph { @@ -261,97 +262,97 @@ impl DependencyGraph { } }; - let mut owner_changed = current_thread != new_owner_thread; - - // TODO: Can we move this into the occupied branch? It's pointless to run this check if there's no existing mapping. - - // If we have `c -> a -> d` and we now insert a mapping `d -> c`, rewrite the mapping to - // `d -> c -> a` to avoid cycles. - // - // A more complex is `e -> c -> a -> d -> b` where we now transfer `d -> c`. Respine - // ``` - // e -> c -> a -> b - // d / - // ``` - // - // The first part here only takes care of removing `d` form ` a -> d -> b` (so that it becomes `a -> b`). - // The `d -> c` mapping is inserted by the `match` statement below. - // - // A cycle between transfers can occur when a later iteration has a different outer most query than - // a previous iteration. The second iteration then hits `cycle_initial` for a different head, (e.g. for `c` where it previously was `d`). - let mut last_segment = self.transferred.entry(new_owner); - - while let std::collections::hash_map::Entry::Occupied(entry) = last_segment { - let next_target = entry.get().1; - if next_target == query { - tracing::debug!( - "Remove mapping from {:?} to {:?} to prevent a cycle", - entry.key(), - query - ); - - // Remove `b` from the dependents of `d` and remove the mapping from `a -> d`. - let old_dependents = self.transferred_dependents.get_mut(&query).unwrap(); - let index = old_dependents - .iter() - .position(|key| key == entry.key()) - .unwrap(); - old_dependents.swap_remove(index); - // `a` in `a -> d` - let previous_source = *entry.key(); - entry.remove(); - - // If there's a `d -> b` mapping, remove `d` from `b`'s dependents and connect `a` with `b` - if let Some(next_next) = self.transferred.remove(&query) { - // connect `a` with `b` (okay to use `insert` because we removed the `a` mapping before). - self.transferred.insert(previous_source, next_next); - let next_next_dependents = - self.transferred_dependents.get_mut(&next_next.1).unwrap(); - let query_index = next_next_dependents - .iter() - .position(|key| *key == query) - .unwrap(); - next_next_dependents[query_index] = previous_source; - } + let mut thread_changed = current_thread != new_owner_thread; - break; - } - - last_segment = self.transferred.entry(next_target); - } - - // TODO: Skip unblocks for transitive queries if the old owner is the same as the new owner? match self.transferred.entry(query) { std::collections::hash_map::Entry::Vacant(entry) => { // Transfer `c -> b` and there's no existing entry for `c`. entry.insert((new_owner_thread, new_owner)); } std::collections::hash_map::Entry::Occupied(mut entry) => { + // If we transfer to the same owner as before, return immediately. if entry.get() == &(new_owner_thread, new_owner) { return; } // `Transfer `c -> b` after a previous `c -> d` mapping. // Update the owner and remove the query from the old owner's dependents. - let old_owner = entry.get().1; + let (old_owner_thread, old_owner) = *entry.get(); + + // We simply assume here that the thread has changed because we'd have to walk the entire + // transferred chaine of `old_owner` to know if the thread has changed. This won't safe us much + // compared to just updating all dependent threads. + thread_changed = true; - owner_changed = true; + // For the example below, remove `d` from `b`'s dependents.` let old_dependents = self.transferred_dependents.get_mut(&old_owner).unwrap(); let index = old_dependents.iter().position(|key| *key == query).unwrap(); old_dependents.swap_remove(index); entry.insert((new_owner_thread, new_owner)); + + // If we have `c -> a -> d` and we now insert a mapping `d -> c`, rewrite the mapping to + // `d -> c -> a` to avoid cycles. + // + // Or, starting with `e -> c -> a -> d -> b` insert `d -> c`. We need to respine the tree to + // ``` + // e -> c -> a -> b + // d / + // ``` + // + // + // A cycle between transfers can occur when a later iteration has a different outer most query than + // a previous iteration. The second iteration then hits `cycle_initial` for a different head, (e.g. for `c` where it previously was `d`). + let mut last_segment = self.transferred.entry(new_owner); + + while let std::collections::hash_map::Entry::Occupied(mut entry) = last_segment { + let source = *entry.key(); + let next_target = entry.get().1; + + // If it's `a -> d`, remove `a -> d` and insert an edge from `a -> b` + if next_target == query { + tracing::trace!( + "Remap edge {source:?} -> {next_target:?} to {source:?} -> {old_owner:?} to prevent a cycle", + ); + + // Remove `a` from the dependents of `d` and remove the mapping from `a -> d`. + let query_dependents = self.transferred_dependents.get_mut(&query).unwrap(); + let index = query_dependents + .iter() + .copied() + .position(|key| key == source) + .unwrap(); + query_dependents.swap_remove(index); + + // if the old mapping was `c -> d` and we now insert `d -> c`, remove `d -> c` + if old_owner == new_owner { + entry.remove(); + } else { + // otherwise (when `d` pointed to some other query, e.g. `b` in the example), + // add an edge from `a` to `b` + entry.insert((old_owner_thread, old_owner)); + + let old_owner_dependents = + self.transferred_dependents.get_mut(&old_owner).unwrap(); + old_owner_dependents.push(source); + } + + break; + } + + last_segment = self.transferred.entry(next_target); + } } }; // Register `c` as a dependent of `b`. let all_dependents = self.transferred_dependents.entry(new_owner).or_default(); - assert!(!all_dependents.contains(&query)); - assert!(!all_dependents.contains(&new_owner)); + debug_assert!(!all_dependents.contains(&query)); + debug_assert!(!all_dependents.contains(&new_owner)); all_dependents.push(query); - tracing::debug!("Wake up blocked threads after transferring ownership to {new_owner:?}"); - if owner_changed { + if thread_changed { + tracing::debug!("Unblocking new owner of transfer target {new_owner:?}"); self.unblock_transfer_target(query, new_owner_thread); self.update_transferred_edges(query, new_owner_thread); } From 9c7982115e995ff26859cb2672dc8d7155c603a0 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 8 Oct 2025 16:46:06 +0200 Subject: [PATCH 16/45] Use recursion for unblocking --- src/runtime/dependency_graph.rs | 55 ++++++++++++++------------------- 1 file changed, 23 insertions(+), 32 deletions(-) diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index a8e0a6c58..9f6da8a97 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -165,8 +165,22 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { - // If `database_key` is `c` and it has been transfered to `b` earlier, remove its entry. - tracing::debug!("unblock_transferred_queries({database_key:?}"); + fn unblock_recursive( + me: &mut DependencyGraph, + query: DatabaseKeyIndex, + wait_result: WaitResult, + ) { + me.transferred.remove(&query); + if let Some(transitive) = me.transferred_dependents.remove(&query) { + for query in transitive { + me.unblock_runtimes_blocked_on(query, wait_result); + unblock_recursive(me, query, wait_result); + } + } + } + + // If `database_key` is `c` and it has been transferred to `b` earlier, remove its entry. + tracing::trace!("unblock_transferred_queries({database_key:?}"); if let Some((_, owner)) = self.transferred.remove(&database_key) { let owner_dependents = self.transferred_dependents.get_mut(&owner).unwrap(); let index = owner_dependents @@ -176,24 +190,7 @@ impl DependencyGraph { owner_dependents.swap_remove(index); } - let mut unblocked: SmallVec<[_; 4]> = SmallVec::new(); - let mut queue: SmallVec<[_; 4]> = smallvec![database_key]; - - while let Some(current) = queue.pop() { - self.transferred.remove(¤t); - let transitive = self - .transferred_dependents - .remove(¤t) - .unwrap_or_default(); - - queue.extend(transitive); - - unblocked.push(current); - } - - for query in unblocked { - self.unblock_runtimes_blocked_on(query, wait_result); - } + unblock_recursive(self, database_key, wait_result); } /// Returns `Ok(thread_id)` if `database_key_index` is a query who's lock ownership has been transferred to `thread_id` (potentially over multiple steps) @@ -392,7 +389,7 @@ impl DependencyGraph { } fn update_transferred_edges(&mut self, query: DatabaseKeyIndex, new_owner_thread: ThreadId) { - tracing::info!("Resuming transitive dependents of query {query:?}"); + tracing::trace!("Resuming transitive dependents of query {query:?}"); let mut queue: SmallVec<[_; 4]> = smallvec![query]; @@ -418,17 +415,11 @@ impl DependencyGraph { edge.blocked_on_id ); edge.blocked_on_id = new_owner_thread; - } - - #[cfg(debug_assertions)] - { - for id in self.query_dependents.get(&query).into_iter().flatten() { - debug_assert!( - !self.depends_on(new_owner_thread, *id), - "Circular reference between blocked edges: {:#?}", - self.edges - ); - } + debug_assert!( + !DependencyGraph::depends_on_impl(&self.edges, new_owner_thread, *dependent), + "Circular reference between blocked edges: {:#?}", + self.edges + ); } } } From 4dc09397b4beb3617f5c60ca736bed059bec1afa Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 8 Oct 2025 19:03:17 +0200 Subject: [PATCH 17/45] Fix hang in `maybe_changed_after` --- src/function/fetch.rs | 29 ---------------------------- src/function/maybe_changed_after.rs | 30 +++++++++++++++++++---------- src/function/memo.rs | 8 ++++---- src/function/sync.rs | 1 + src/tracing.rs | 8 +++++++- 5 files changed, 32 insertions(+), 44 deletions(-) diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 91c5138c4..5dd645006 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -200,35 +200,6 @@ where // still valid for the current revision. return unsafe { Some(self.extend_memo_lifetime(old_memo)) }; } - - // If this is a provisional memo from the same revision, await all its cycle heads because - // we need to ensure that only one thread is iterating on a cycle at a given time. - // For example, if we have a nested cycle like so: - // ``` - // a -> b -> c -> b - // -> a - // - // d -> b - // ``` - // thread 1 calls `a` and `a` completes the inner cycle `b -> c` but hasn't finished the outer cycle `a` yet. - // thread 2 now calls `b`. We don't want that thread 2 iterates `b` while thread 1 is iterating `a` at the same time - // because it can result in thread b overriding provisional memos that thread a has accessed already and still relies upon. - // - // By waiting, we ensure that thread 1 completes a (based on a provisional value for `b`) and `b` - // becomes the new outer cycle, which thread 2 drives to completion. - if old_memo.may_be_provisional() - && old_memo.verified_at.load() == zalsa.current_revision() - { - // Try to claim all cycle heads of the provisional memo. If we can't because - // some head is running on another thread, drop our claim guard to give that thread - // a chance to take ownership of this query and complete it as part of its fixpoint iteration. - // We will then block on the cycle head and retry once all cycle heads completed. - // if !old_memo.try_claim_heads(zalsa, zalsa_local) { - // drop(claim_guard); - // old_memo.block_on_heads(zalsa, zalsa_local); - // return None; - // } - } } } diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index e434324f9..4a723703b 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -84,9 +84,12 @@ where loop { let database_key_index = self.database_key_index(id); - crate::tracing::debug!( - "{database_key_index:?}: maybe_changed_after(revision = {revision:?})" - ); + let _span = crate::tracing::info_span!( + "maybe_changed_after", + ?revision, + query=?database_key_index + ) + .entered(); // Check if we have a verified version: this is the hot path. let memo_guard = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); @@ -141,7 +144,7 @@ where ) -> Option { let database_key_index = self.database_key_index(key_index); - let _claim_guard = match self.sync_table.try_claim(zalsa, key_index, true) { + let _claim_guard = match self.sync_table.try_claim(zalsa, key_index, false) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); @@ -175,10 +178,8 @@ where // If `validate_maybe_provisional` returns `true`, but only because all cycle heads are from the same iteration, // carry over the cycle heads so that the caller verifies them. - if old_memo.may_be_provisional() { - for head in old_memo.cycle_heads() { - cycle_heads.insert_head(head.database_key_index); - } + for head in old_memo.cycle_heads() { + cycle_heads.insert_head(head.database_key_index); } return Some(if old_memo.revisions.changed_at > revision { @@ -467,7 +468,7 @@ where match cycle_head { TryClaimHeadsResult::Cycle { head_iteration_count, - current_iteration_count, + memo_iteration_count: current_iteration_count, verified_at: head_verified_at, } => { if head_verified_at != verified_at { @@ -487,7 +488,7 @@ where true } - crate::tracing::trace!( + crate::tracing::info!( "{database_key_index:?}: validate_same_iteration(memo = {memo:#?})", memo = memo.tracing_debug() ); @@ -506,6 +507,15 @@ where return false; } + // Always return `false` if this is a cycle initial memo (or the last provisional memo in an iteration) + // as this value has obviously not finished computing yet. + if cycle_heads + .iter() + .all(|head| head.database_key_index == database_key_index) + { + return false; + } + validate_same_iteration_cold(zalsa, zalsa_local, cycle_heads, verified_at) } diff --git a/src/function/memo.rs b/src/function/memo.rs index 7883e7b58..f547d9074 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -197,7 +197,7 @@ impl<'db, C: Configuration> Memo<'db, C> { for claim_result in cycle_heads { match claim_result { TryClaimHeadsResult::Cycle { - current_iteration_count, + memo_iteration_count: current_iteration_count, head_iteration_count, .. } => { @@ -449,7 +449,7 @@ pub(super) enum TryClaimHeadsResult<'me> { /// Claiming the cycle head results in a cycle. Cycle { head_iteration_count: IterationCount, - current_iteration_count: IterationCount, + memo_iteration_count: IterationCount, verified_at: Revision, }, @@ -506,7 +506,7 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { ); return Some(TryClaimHeadsResult::Cycle { head_iteration_count, - current_iteration_count, + memo_iteration_count: current_iteration_count, verified_at: self.zalsa.current_revision(), }); } @@ -540,7 +540,7 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { }; Some(TryClaimHeadsResult::Cycle { - current_iteration_count, + memo_iteration_count: current_iteration_count, head_iteration_count, verified_at, }) diff --git a/src/function/sync.rs b/src/function/sync.rs index b577d91a8..342b54e22 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -62,6 +62,7 @@ impl SyncTable { SyncOwnerId::Transferred => { let current_id = thread::current().id(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); + return match zalsa .runtime() .claim_transferred(database_key_index, allow_reentry) diff --git a/src/tracing.rs b/src/tracing.rs index 47f95d00e..e3e475987 100644 --- a/src/tracing.rs +++ b/src/tracing.rs @@ -25,6 +25,12 @@ macro_rules! debug_span { }; } +macro_rules! info_span { + ($($x:tt)*) => { + crate::tracing::span!(INFO, $($x)*) + }; +} + macro_rules! event { ($level:ident, $($x:tt)*) => {{ let event = { @@ -51,4 +57,4 @@ macro_rules! span { }}; } -pub(crate) use {debug, debug_span, event, info, span, trace}; +pub(crate) use {debug, debug_span, event, info, info_span, span, trace}; From 2273fc77e3925dce719fe50b1c0f1fc01c7cd464 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Wed, 8 Oct 2025 19:23:06 +0200 Subject: [PATCH 18/45] Move claiming of transferred memos into a separate function --- src/function/sync.rs | 125 ++++++++++++++++++++++++------------------- 1 file changed, 71 insertions(+), 54 deletions(-) diff --git a/src/function/sync.rs b/src/function/sync.rs index 342b54e22..23e6b91e1 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -1,7 +1,8 @@ use rustc_hash::FxHashMap; +use std::collections::hash_map::OccupiedEntry; use crate::key::DatabaseKeyIndex; -use crate::runtime::{BlockResult, ClaimTransferredResult, Running, WaitResult}; +use crate::runtime::{BlockResult, ClaimTransferredResult, OtherThread, Running, WaitResult}; use crate::sync::thread::{self}; use crate::sync::Mutex; use crate::tracing; @@ -54,64 +55,23 @@ impl SyncTable { ) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { - std::collections::hash_map::Entry::Occupied(mut occupied_entry) => { + std::collections::hash_map::Entry::Occupied(occupied_entry) => { let id = occupied_entry.get().id; let id = match id { SyncOwnerId::Thread(id) => id, SyncOwnerId::Transferred => { - let current_id = thread::current().id(); - let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); - - return match zalsa - .runtime() - .claim_transferred(database_key_index, allow_reentry) - { - ClaimTransferredResult::ClaimedBy(other_thread) => { - occupied_entry.get_mut().anyone_waiting = true; - - match other_thread.block(write) { - BlockResult::Cycle => ClaimResult::Cycle { inner: false }, - BlockResult::Running(running) => ClaimResult::Running(running), - } - } - ClaimTransferredResult::Reentrant => { - let SyncState { - id, claimed_twice, .. - } = occupied_entry.into_mut(); - - if *claimed_twice { - return ClaimResult::Cycle { inner: false }; - } - - *id = SyncOwnerId::Thread(current_id); - *claimed_twice = true; - - ClaimResult::Claimed(ClaimGuard { - key_index, - zalsa, - sync_table: self, - mode: ReleaseMode::SelfOnly, - }) - } - ClaimTransferredResult::Cycle { inner: nested } => { - ClaimResult::Cycle { inner: nested } - } - ClaimTransferredResult::Released => { - occupied_entry.insert(SyncState { - id: SyncOwnerId::Thread(thread::current().id()), - anyone_waiting: false, - is_transfer_target: false, - claimed_twice: false, - }); - ClaimResult::Claimed(ClaimGuard { - key_index, - zalsa, - sync_table: self, - mode: ReleaseMode::Default, - }) - } - }; + return match self.try_claim_transferred( + zalsa, + occupied_entry, + allow_reentry, + ) { + Ok(claimed) => claimed, + Err(other_thread) => match other_thread.block(write) { + BlockResult::Cycle => ClaimResult::Cycle { inner: false }, + BlockResult::Running(running) => ClaimResult::Running(running), + }, + } } }; @@ -153,6 +113,63 @@ impl SyncTable { } } + #[cold] + fn try_claim_transferred<'me>( + &'me self, + zalsa: &'me Zalsa, + mut entry: OccupiedEntry, + allow_reentry: bool, + ) -> Result, OtherThread<'me>> { + let key_index = *entry.key(); + let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); + + match zalsa + .runtime() + .claim_transferred(database_key_index, allow_reentry) + { + ClaimTransferredResult::ClaimedBy(other_thread) => { + entry.get_mut().anyone_waiting = true; + Err(other_thread) + } + ClaimTransferredResult::Reentrant => { + let SyncState { + id, claimed_twice, .. + } = entry.into_mut(); + + if *claimed_twice { + return Ok(ClaimResult::Cycle { inner: false }); + } + + *id = SyncOwnerId::Thread(thread::current().id()); + *claimed_twice = true; + + Ok(ClaimResult::Claimed(ClaimGuard { + key_index, + zalsa, + sync_table: self, + mode: ReleaseMode::SelfOnly, + })) + } + ClaimTransferredResult::Cycle { inner: nested } => { + Ok(ClaimResult::Cycle { inner: nested }) + } + ClaimTransferredResult::Released => { + entry.insert(SyncState { + id: SyncOwnerId::Thread(thread::current().id()), + anyone_waiting: false, + is_transfer_target: false, + claimed_twice: false, + }); + Ok(ClaimResult::Claimed(ClaimGuard { + key_index, + zalsa, + sync_table: self, + mode: ReleaseMode::Default, + })) + } + } + } + fn make_transfer_target(&self, key_index: Id) -> Option { let mut syncs = self.syncs.lock(); syncs.get_mut(&key_index).map(|state| { From 8d5dab7e25b96c9032178e40e74a76658926ef62 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 09:38:14 +0200 Subject: [PATCH 19/45] More aggressive use of attributes --- src/function/sync.rs | 4 ++++ src/runtime.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/function/sync.rs b/src/function/sync.rs index 23e6b91e1..fc949aa7f 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -47,6 +47,7 @@ impl SyncTable { } } + #[inline] pub(crate) fn try_claim<'me>( &'me self, zalsa: &'me Zalsa, @@ -114,6 +115,7 @@ impl SyncTable { } #[cold] + #[inline(never)] fn try_claim_transferred<'me>( &'me self, zalsa: &'me Zalsa, @@ -252,6 +254,7 @@ impl<'me> ClaimGuard<'me> { } #[cold] + #[inline(never)] fn release_self(&self) { tracing::debug!("release_self"); let mut syncs = self.sync_table.syncs.lock(); @@ -269,6 +272,7 @@ impl<'me> ClaimGuard<'me> { } #[cold] + #[inline(never)] pub(crate) fn transfer(&self, new_owner: DatabaseKeyIndex) { let self_key = self.database_key_index(); diff --git a/src/runtime.rs b/src/runtime.rs index b4b0660b9..55d3857d9 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -256,7 +256,7 @@ impl Runtime { let r_old = self.current_revision(); let r_new = r_old.next(); self.revisions[0] = r_new; - crate::tracing::debug!("new_revision: {r_old:?} -> {r_new:?}"); + crate::tracing::info!("new_revision: {r_old:?} -> {r_new:?}"); r_new } From 7f3d2eeb8f291a0d853c13e93be85cd8babcdcc5 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 10:09:29 +0200 Subject: [PATCH 20/45] Make re-entrant a const parameter --- src/function.rs | 2 +- src/function/fetch.rs | 2 +- src/function/maybe_changed_after.rs | 2 +- src/function/sync.rs | 15 +++++---------- src/runtime.rs | 11 ++--------- src/zalsa_local.rs | 19 ------------------- 6 files changed, 10 insertions(+), 41 deletions(-) diff --git a/src/function.rs b/src/function.rs index b3dbb58ce..9ae01992c 100644 --- a/src/function.rs +++ b/src/function.rs @@ -408,7 +408,7 @@ where /// * [`WaitResult::Cycle`] Claiming the `key_index` results in a cycle because it's on the current's thread query stack or /// running on another thread that is blocked on this thread. fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { - match self.sync_table.try_claim(zalsa, key_index, false) { + match self.sync_table.try_claim::(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), ClaimResult::Cycle { inner: nested } => WaitForResult::Cycle { inner: nested }, ClaimResult::Claimed(_) => WaitForResult::Available, diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 5dd645006..1f2cf8ff1 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -132,7 +132,7 @@ where ) -> Option<&'db Memo<'db, C>> { let database_key_index = self.database_key_index(id); // Try to claim this query: if someone else has claimed it already, go back and start again. - let claim_guard = match self.sync_table.try_claim(zalsa, id, true) { + let claim_guard = match self.sync_table.try_claim::(zalsa, id) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 4a723703b..7d03e6e7a 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -144,7 +144,7 @@ where ) -> Option { let database_key_index = self.database_key_index(key_index); - let _claim_guard = match self.sync_table.try_claim(zalsa, key_index, false) { + let _claim_guard = match self.sync_table.try_claim::(zalsa, key_index) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); diff --git a/src/function/sync.rs b/src/function/sync.rs index fc949aa7f..fc67ccd3a 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -48,11 +48,10 @@ impl SyncTable { } #[inline] - pub(crate) fn try_claim<'me>( + pub(crate) fn try_claim<'me, const REENTRANT: bool>( &'me self, zalsa: &'me Zalsa, key_index: Id, - allow_reentry: bool, ) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { @@ -62,11 +61,8 @@ impl SyncTable { let id = match id { SyncOwnerId::Thread(id) => id, SyncOwnerId::Transferred => { - return match self.try_claim_transferred( - zalsa, - occupied_entry, - allow_reentry, - ) { + return match self.try_claim_transferred::(zalsa, occupied_entry) + { Ok(claimed) => claimed, Err(other_thread) => match other_thread.block(write) { BlockResult::Cycle => ClaimResult::Cycle { inner: false }, @@ -116,18 +112,17 @@ impl SyncTable { #[cold] #[inline(never)] - fn try_claim_transferred<'me>( + fn try_claim_transferred<'me, const REENTRANT: bool>( &'me self, zalsa: &'me Zalsa, mut entry: OccupiedEntry, - allow_reentry: bool, ) -> Result, OtherThread<'me>> { let key_index = *entry.key(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); match zalsa .runtime() - .claim_transferred(database_key_index, allow_reentry) + .claim_transferred::(database_key_index) { ClaimTransferredResult::ClaimedBy(other_thread) => { entry.get_mut().anyone_waiting = true; diff --git a/src/runtime.rs b/src/runtime.rs index 55d3857d9..6b6b6c9f7 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -325,27 +325,20 @@ impl Runtime { .unblock_transferred_queries(database_key, wait_result); } - pub(super) fn claim_transferred( + pub(super) fn claim_transferred( &self, query: DatabaseKeyIndex, - allow_reentry: bool, ) -> ClaimTransferredResult<'_> { let mut dg = self.dependency_graph.lock(); let thread_id = thread::current().id(); match dg.block_on_transferred(query, thread_id) { Ok(_) => { - if !allow_reentry { + if !REENTRANT { tracing::debug!("Claiming {query:?} results in a cycle because re-entrant lock is not allowed"); ClaimTransferredResult::Cycle { inner: true } } else { tracing::debug!("Reentrant lock {query:?}"); - // dg.remove_transferred(query); - - // // This seems wrong? - // // if owning_thread_id != current_id { - // dg.unblock_runtimes_blocked_on(query, WaitResult::Completed); - // dg.resume_transferred_dependents(query, WaitResult::Completed); ClaimTransferredResult::Reentrant } diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 35f3915cb..c191c1dd2 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -1260,23 +1260,4 @@ pub(crate) mod persistence { serde::Deserialize::deserialize(deserializer).map(AtomicBool::new) } } - - #[cfg(feature = "persistence")] - pub(super) mod atomic_bool { - use crate::sync::atomic::{AtomicBool, Ordering}; - - pub fn serialize(value: &AtomicBool, serializer: S) -> Result - where - S: serde::Serializer, - { - serde::Serialize::serialize(&value.load(Ordering::Relaxed), serializer) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - serde::Deserialize::deserialize(deserializer).map(AtomicBool::new) - } - } } From 85ef5163a25e7eea188dc0f06bb3255d4966b0a7 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 11:05:04 +0200 Subject: [PATCH 21/45] Smaller clean-ups --- src/active_query.rs | 3 +- src/cycle.rs | 48 ++++++++-- src/function.rs | 2 +- src/function/execute.rs | 14 +-- src/function/fetch.rs | 15 +-- src/function/maybe_changed_after.rs | 142 ++++++++++++++-------------- src/function/memo.rs | 22 +---- src/function/sync.rs | 17 +--- src/ingredient.rs | 10 -- src/runtime/dependency_graph.rs | 11 +-- src/tracing.rs | 2 + src/zalsa_local.rs | 17 ++-- tests/backtrace.rs | 6 +- tests/cycle.rs | 3 +- 14 files changed, 147 insertions(+), 165 deletions(-) diff --git a/src/active_query.rs b/src/active_query.rs index 11cf5d2eb..d830fece1 100644 --- a/src/active_query.rs +++ b/src/active_query.rs @@ -225,7 +225,6 @@ impl ActiveQuery { active_tracked_structs, mem::take(cycle_heads), iteration_count, - false, ); let revisions = QueryRevisions { @@ -518,7 +517,7 @@ impl fmt::Display for Backtrace { } write!( fmt, - "{:?} -> {}", + "{:?} -> iteration = {}", head.database_key_index, head.iteration_count )?; } diff --git a/src/cycle.rs b/src/cycle.rs index 5202e2d02..42e131e3f 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -102,6 +102,14 @@ pub enum CycleRecoveryStrategy { pub struct CycleHead { pub(crate) database_key_index: DatabaseKeyIndex, pub(crate) iteration_count: AtomicIterationCount, + + /// Marks a cycle head as removed within its `CycleHeads` container. + /// + /// Cycle heads are marked as removed when the memo from the last iteration (a provisional memo) + /// is used as the initial value for the next iteration. It's necessary to remove all but its own + /// head from the `CycleHeads` container, because the query might now depend on fewer cycles + /// (in case of conditional dependencies). However, we can't actually remove the cycle head + /// within `fetch_cold_cycle` because we only have a readonly memo. That's what `removed` is used for. #[cfg_attr(feature = "persistence", serde(skip))] removed: AtomicBool, } @@ -130,6 +138,11 @@ impl IterationCount { self.0 == 0 } + /// Iteration count reserved for panicked cycles. + /// + /// Using a special iteration count ensures that `validate_same_iteration` and `validate_provisional` + /// return `false` for queries depending on this panicked cycle, because the iteration count is guaranteed + /// to be different (which isn't guaranteed if the panicked memo uses [`Self::initial`]). pub(crate) const fn panicked() -> Self { Self(u8::MAX) } @@ -150,7 +163,7 @@ impl IterationCount { impl std::fmt::Display for IterationCount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "iteration={}", self.0) + self.0.fmt(f) } } @@ -162,6 +175,10 @@ impl AtomicIterationCount { IterationCount(self.0.load(Ordering::Relaxed)) } + pub(crate) fn load_mut(&mut self) -> IterationCount { + IterationCount(*self.0.get_mut()) + } + pub(crate) fn store(&self, value: IterationCount) { self.0.store(value.0, Ordering::Release); } @@ -214,10 +231,13 @@ impl CycleHeads { self.0.is_empty() } - pub(crate) fn initial(database_key_index: DatabaseKeyIndex) -> Self { + pub(crate) fn initial( + database_key_index: DatabaseKeyIndex, + iteration_count: IterationCount, + ) -> Self { Self(thin_vec![CycleHead { database_key_index, - iteration_count: IterationCount::initial().into(), + iteration_count: iteration_count.into(), removed: false.into() }]) } @@ -233,17 +253,24 @@ impl CycleHeads { .any(|head| head.database_key_index == *value && !head.removed.load(Ordering::Relaxed)) } - pub(crate) fn clear_except(&self, except: DatabaseKeyIndex) { + /// Removes all cycle heads except `except` by marking them as removed. + /// + /// Note that the heads aren't actually removed. They're only marked as removed and will be + /// skipped when iterating. This is because we might not have a mutable reference. + pub(crate) fn remove_all_except(&self, except: DatabaseKeyIndex) { for head in self.0.iter() { if head.database_key_index == except { continue; } - // TODO: verify ordering head.removed.store(true, Ordering::Release); } } + /// Updates the iteration count for the head `cycle_head_index` to `new_iteration_count`. + /// + /// Unlike [`update_iteration_count`], this method takes a `&mut self` reference. It should + /// be preferred if possible, as it avoids atomic operations. pub(crate) fn update_iteration_count_mut( &mut self, cycle_head_index: DatabaseKeyIndex, @@ -258,6 +285,9 @@ impl CycleHeads { } } + /// Updates the iteration count for the head `cycle_head_index` to `new_iteration_count`. + /// + /// Unlike [`update_iteration_count_mut`], this method takes a `&self` reference. pub(crate) fn update_iteration_count( &self, cycle_head_index: DatabaseKeyIndex, @@ -282,6 +312,8 @@ impl CycleHeads { } pub(crate) fn insert(&mut self, head: &CycleHead) -> bool { + debug_assert!(!head.removed.load(Ordering::Relaxed)); + if let Some(existing) = self .0 .iter_mut() @@ -294,12 +326,9 @@ impl CycleHeads { true } else { - let existing_count = existing.iteration_count.load(); + let existing_count = existing.iteration_count.load_mut(); let head_count = head.iteration_count.load(); - // It's now possible that a query can depend on different iteration counts of the same query - // This because some queries (inner) read the provisional value of the last iteration - // while outer queries read the value from the last iteration (which is i+1 if the head didn't converge). assert_eq!( existing_count, head_count, "Can't merge cycle heads {:?} with different iteration counts ({existing_count:?}, {head_count:?})", @@ -309,7 +338,6 @@ impl CycleHeads { false } } else { - debug_assert!(!head.removed.load(Ordering::Relaxed)); self.0.push(head.clone()); true } diff --git a/src/function.rs b/src/function.rs index 9ae01992c..9e5e3551f 100644 --- a/src/function.rs +++ b/src/function.rs @@ -410,7 +410,7 @@ where fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim::(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle { inner: nested } => WaitForResult::Cycle { inner: nested }, + ClaimResult::Cycle { inner } => WaitForResult::Cycle { inner }, ClaimResult::Claimed(_) => WaitForResult::Available, } } diff --git a/src/function/execute.rs b/src/function/execute.rs index ebd0ae2e4..44a475d9b 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -67,8 +67,6 @@ where if let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() { // Did the new result we got depend on our own provisional value, in a cycle? if cycle_heads.contains(&database_key_index) { - let id = database_key_index.key_index(); - // Ignore the computed value, leave the fallback value there. let memo = self .get_memo_from_table_for(zalsa, id, memo_ingredient_index) @@ -126,18 +124,16 @@ where self.diff_outputs(zalsa, database_key_index, old_memo, &completed_query); } - let new_memo = self.insert_memo( + self.insert_memo( zalsa, - database_key_index.key_index(), + id, Memo::new( Some(new_value), zalsa.current_revision(), completed_query.revisions, ), memo_ingredient_index, - ); - - new_memo + ) } fn execute_maybe_iterate<'db>( @@ -512,9 +508,7 @@ impl<'a, C: Configuration> ClearCycleHeadIfPanicking<'a, C> { impl Drop for ClearCycleHeadIfPanicking<'_, C> { fn drop(&mut self) { if std::thread::panicking() { - let mut revisions = - QueryRevisions::fixpoint_initial(self.ingredient.database_key_index(self.id)); - revisions.update_iteration_count_mut( + let revisions = QueryRevisions::fixpoint_initial( self.ingredient.database_key_index(self.id), IterationCount::panicked(), ); diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 1f2cf8ff1..43a5352f1 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -141,7 +141,7 @@ where let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); if let Some(memo) = memo { - if memo.value.is_some() && memo.may_be_provisional() { + if memo.value.is_some() { memo.block_on_heads(zalsa, zalsa_local); } } @@ -230,12 +230,9 @@ where self.update_shallow(zalsa, database_key_index, memo, can_shallow_update); if C::CYCLE_STRATEGY == CycleRecoveryStrategy::Fixpoint { - // This feels strange. I feel like we need to preserve the cycle heads. Let's say a cycle head only sometimes participates in the cycle. - // This doesn't mean that the value becomes final because of it. The query might as well be cyclic in the next iteration but - // we then never re-executed that query because it was marked as `verified_final`. memo.revisions .cycle_heads() - .clear_except(database_key_index); + .remove_all_except(database_key_index); } crate::tracing::debug!( @@ -267,7 +264,8 @@ where "hit cycle at {database_key_index:#?}, \ inserting and returning fixpoint initial value" ); - let revisions = QueryRevisions::fixpoint_initial(database_key_index); + let revisions = + QueryRevisions::fixpoint_initial(database_key_index, IterationCount::initial()); let initial_value = C::cycle_initial(db, C::id_to_input(zalsa, id)); self.insert_memo( zalsa, @@ -286,7 +284,10 @@ where let mut completed_query = active_query.pop(); completed_query .revisions - .set_cycle_heads(CycleHeads::initial(database_key_index)); + .set_cycle_heads(CycleHeads::initial( + database_key_index, + IterationCount::initial(), + )); // We need this for `cycle_heads()` to work. We will unset this in the outer `execute()`. *completed_query.revisions.verified_final.get_mut() = false; self.insert_memo( diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 7d03e6e7a..5795178d4 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -84,12 +84,9 @@ where loop { let database_key_index = self.database_key_index(id); - let _span = crate::tracing::info_span!( - "maybe_changed_after", - ?revision, - query=?database_key_index - ) - .entered(); + crate::tracing::debug!( + "{database_key_index:?}: maybe_changed_after(revision = {revision:?})" + ); // Check if we have a verified version: this is the hot path. let memo_guard = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); @@ -144,7 +141,7 @@ where ) -> Option { let database_key_index = self.database_key_index(key_index); - let _claim_guard = match self.sync_table.try_claim::(zalsa, key_index) { + let claim_guard = match self.sync_table.try_claim::(zalsa, key_index) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); @@ -228,7 +225,7 @@ where // `in_cycle` tracks if the enclosing query is in a cycle. `deep_verify.cycle_heads` tracks // if **this query** encountered a cycle (which means there's some provisional value somewhere floating around). if old_memo.value.is_some() && !cycle_heads.has_any() { - let memo = self.execute(db, _claim_guard, zalsa_local, Some(old_memo)); + let memo = self.execute(db, claim_guard, zalsa_local, Some(old_memo)); let changed_at = memo.revisions.changed_at; // Always assume that a provisional value has changed. @@ -366,28 +363,57 @@ where database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, ) -> bool { - !memo.may_be_provisional() - || self.validate_provisional(zalsa, database_key_index, memo) - || self.validate_same_iteration(zalsa, zalsa_local, database_key_index, memo) + if !memo.may_be_provisional() { + return true; + } + + let cycle_heads = memo.cycle_heads(); + + if cycle_heads.is_empty() { + return true; + } + + // Always return `false` if this is a cycle initial memo (or the last provisional memo in an iteration) + // as this value has obviously not finished computing yet. + if cycle_heads + .iter() + .all(|head| head.database_key_index == database_key_index) + { + return false; + } + + crate::tracing::trace!( + "{database_key_index:?}: validate_may_be_provisional(memo = {memo:#?})", + memo = memo.tracing_debug() + ); + + let verified_at = memo.verified_at.load(); + + self.validate_provisional(zalsa, database_key_index, memo, verified_at, cycle_heads) + || self.validate_same_iteration( + zalsa, + zalsa_local, + database_key_index, + verified_at, + cycle_heads, + ) } /// Check if this memo's cycle heads have all been finalized. If so, mark it verified final and /// return true, if not return false. - #[inline] fn validate_provisional( &self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, + memo_verified_at: Revision, + cycle_heads: &CycleHeads, ) -> bool { crate::tracing::trace!( - "{database_key_index:?}: validate_provisional(memo = {memo:#?})", - memo = memo.tracing_debug() + "{database_key_index:?}: validate_provisional({database_key_index:?})", ); - let memo_verified_at = memo.verified_at.load(); - - for cycle_head in memo.revisions.cycle_heads() { + for cycle_head in cycle_heads { // Test if our cycle heads (with the same revision) are now finalized. let Some(kind) = zalsa .lookup_ingredient(cycle_head.database_key_index.ingredient_index()) @@ -446,77 +472,47 @@ where /// If this is a provisional memo, validate that it was cached in the same iteration of the /// same cycle(s) that we are still executing. If so, it is valid for reuse. This avoids /// runaway re-execution of the same queries within a fixpoint iteration. - #[inline] fn validate_same_iteration( &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, database_key_index: DatabaseKeyIndex, - memo: &Memo<'_, C>, + memo_verified_at: Revision, + cycle_heads: &CycleHeads, ) -> bool { - #[cold] - #[inline(never)] - fn validate_same_iteration_cold( - zalsa: &Zalsa, - zalsa_local: &ZalsaLocal, - cycle_heads: &CycleHeads, - verified_at: Revision, - ) -> bool { - let cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); - - for cycle_head in cycle_heads_iter { - match cycle_head { - TryClaimHeadsResult::Cycle { - head_iteration_count, - memo_iteration_count: current_iteration_count, - verified_at: head_verified_at, - } => { - if head_verified_at != verified_at { - return false; - } - - if head_iteration_count != current_iteration_count { - return false; - } - } - _ => { - return false; - } - } - } - - true - } - - crate::tracing::info!( - "{database_key_index:?}: validate_same_iteration(memo = {memo:#?})", - memo = memo.tracing_debug() - ); - - let cycle_heads = memo.revisions.cycle_heads(); - if cycle_heads.is_empty() { - return true; - } - - let verified_at = memo.verified_at.load(); + crate::tracing::trace!("validate_same_iteration({database_key_index:?})",); // This is an optimization to avoid unnecessary re-execution within the same revision. // Don't apply it when verifying memos from past revisions. We want them to re-execute // to verify their cycle heads and all participating queries. - if verified_at != zalsa.current_revision() { + if memo_verified_at != zalsa.current_revision() { return false; } - // Always return `false` if this is a cycle initial memo (or the last provisional memo in an iteration) - // as this value has obviously not finished computing yet. - if cycle_heads - .iter() - .all(|head| head.database_key_index == database_key_index) - { - return false; + let cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); + + for cycle_head in cycle_heads_iter { + match cycle_head { + TryClaimHeadsResult::Cycle { + head_iteration_count, + memo_iteration_count: current_iteration_count, + verified_at: head_verified_at, + } => { + if head_verified_at != memo_verified_at { + return false; + } + + if head_iteration_count != current_iteration_count { + return false; + } + } + _ => { + return false; + } + } } - validate_same_iteration_cold(zalsa, zalsa_local, cycle_heads, verified_at) + true } /// VerifyResult::Unchanged if the memo's value and `changed_at` time is up-to-date in the diff --git a/src/function/memo.rs b/src/function/memo.rs index f547d9074..fe91f6ba7 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -145,14 +145,6 @@ impl<'db, C: Configuration> Memo<'db, C> { zalsa_local: &ZalsaLocal, database_key_index: DatabaseKeyIndex, ) -> bool { - if self.revisions.cycle_heads().is_empty() { - return false; - } - - if !self.may_be_provisional() { - return false; - }; - if self.block_on_heads(zalsa, zalsa_local) { // If we get here, we are a provisional value of // the cycle head (either initial value, or from a later iteration) and should be @@ -177,7 +169,7 @@ impl<'db, C: Configuration> Memo<'db, C> { // IMPORTANT: If you make changes to this function, make sure to run `cycle_nested_deep` with // shuttle with at least 10k iterations. - let cycle_heads = self.revisions.cycle_heads(); + let cycle_heads = self.cycle_heads(); if cycle_heads.is_empty() { return true; } @@ -501,7 +493,7 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { .map(|query| query.iteration_count()) }) } { - crate::tracing::debug!( + crate::tracing::trace!( "Waiting for {head_database_key:?} results in a cycle (because it is already in the query stack)" ); return Some(TryClaimHeadsResult::Cycle { @@ -520,7 +512,7 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { WaitForResult::Cycle { .. } => { // We hit a cycle blocking on the cycle head; this means this query actively // participates in the cycle and some other query is blocked on this thread. - crate::tracing::debug!("Waiting for {head_database_key:?} results in a cycle"); + crate::tracing::trace!("Waiting for {head_database_key:?} results in a cycle"); let provisional_status = ingredient .provisional_status(self.zalsa, head_key_index) @@ -546,15 +538,11 @@ impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { }) } WaitForResult::Running(running) => { - crate::tracing::debug!("Ingredient {head_database_key:?} is running: {running:?}"); + crate::tracing::trace!("Ingredient {head_database_key:?} is running: {running:?}"); Some(TryClaimHeadsResult::Running(running)) } - WaitForResult::Available => { - crate::tracing::debug!("Query {head_database_key:?} is available",); - - Some(TryClaimHeadsResult::Available) - } + WaitForResult::Available => Some(TryClaimHeadsResult::Available), } } } diff --git a/src/function/sync.rs b/src/function/sync.rs index fc67ccd3a..b1df19d91 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -56,9 +56,7 @@ impl SyncTable { let mut write = self.syncs.lock(); match write.entry(key_index) { std::collections::hash_map::Entry::Occupied(occupied_entry) => { - let id = occupied_entry.get().id; - - let id = match id { + let id = match occupied_entry.get().id { SyncOwnerId::Thread(id) => id, SyncOwnerId::Transferred => { return match self.try_claim_transferred::(zalsa, occupied_entry) @@ -132,10 +130,7 @@ impl SyncTable { let SyncState { id, claimed_twice, .. } = entry.into_mut(); - - if *claimed_twice { - return Ok(ClaimResult::Cycle { inner: false }); - } + debug_assert!(!*claimed_twice); *id = SyncOwnerId::Thread(thread::current().id()); *claimed_twice = true; @@ -221,7 +216,6 @@ impl<'me> ClaimGuard<'me> { #[inline(always)] fn release(&self, wait_result: WaitResult, state: SyncState) { let database_key_index = self.database_key_index(); - tracing::debug!("release_and_unblock({database_key_index:?})"); let SyncState { anyone_waiting, @@ -241,7 +235,6 @@ impl<'me> ClaimGuard<'me> { } if is_transfer_target { - tracing::debug!("unblock transferred queries owned by {database_key_index:?}"); runtime.unblock_transferred_queries(database_key_index, wait_result); } @@ -251,7 +244,6 @@ impl<'me> ClaimGuard<'me> { #[cold] #[inline(never)] fn release_self(&self) { - tracing::debug!("release_self"); let mut syncs = self.sync_table.syncs.lock(); let std::collections::hash_map::Entry::Occupied(mut state) = syncs.entry(self.key_index) else { @@ -298,8 +290,6 @@ impl<'me> ClaimGuard<'me> { *id = SyncOwnerId::Transferred; *claimed_twice = false; *anyone_waiting = false; - - tracing::debug!("Transfer ownership completed"); } } @@ -312,13 +302,12 @@ impl Drop for ClaimGuard<'_> { WaitResult::Completed }; - // TODO, what to do if thread panics? Always force release? match self.mode { ReleaseMode::Default => { self.release_default(wait_result); } _ if matches!(wait_result, WaitResult::Panicked) => { - tracing::debug!("Release after panicked"); + tracing::debug!("Releasing `ClaimGuard` after panic"); self.release_default(wait_result); } ReleaseMode::SelfOnly => { diff --git a/src/ingredient.rs b/src/ingredient.rs index 4737193be..f58933371 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -323,13 +323,3 @@ pub enum WaitForResult<'me> { Available, Cycle { inner: bool }, } - -impl WaitForResult<'_> { - pub const fn is_cycle(&self) -> bool { - matches!(self, WaitForResult::Cycle { .. }) - } - - pub const fn is_running(&self) -> bool { - matches!(self, WaitForResult::Running(_)) - } -} diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 9f6da8a97..35cc2c650 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -1,6 +1,7 @@ +use std::pin::Pin; + use rustc_hash::FxHashMap; use smallvec::{smallvec, SmallVec}; -use std::pin::Pin; use crate::function::SyncOwnerId; use crate::key::DatabaseKeyIndex; @@ -134,9 +135,6 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { - tracing::debug!( - "Unblocking runtimes blocked on {database_key:?} with wait result {wait_result:?}" - ); let dependents = self .query_dependents .remove(&database_key) @@ -151,7 +149,6 @@ impl DependencyGraph { /// This will cause it resume execution (though it will have to grab /// the lock on this data structure first, to recover the wait result). fn unblock_runtime(&mut self, id: ThreadId, wait_result: WaitResult) { - tracing::debug!("Unblocking runtime {id:?} with wait result {wait_result:?}"); let edge = self.edges.remove(&id).expect("not blocked"); self.wait_results.insert(id, wait_result); @@ -389,7 +386,7 @@ impl DependencyGraph { } fn update_transferred_edges(&mut self, query: DatabaseKeyIndex, new_owner_thread: ThreadId) { - tracing::trace!("Resuming transitive dependents of query {query:?}"); + tracing::trace!("update_transferred_edges({query:?}"); let mut queue: SmallVec<[_; 4]> = smallvec![query]; @@ -410,7 +407,7 @@ impl DependencyGraph { for dependent in dependents.iter_mut() { let edge = self.edges.get_mut(dependent).unwrap(); - tracing::info!( + tracing::trace!( "Rewrite edge from {:?} to {new_owner_thread:?}", edge.blocked_on_id ); diff --git a/src/tracing.rs b/src/tracing.rs index e3e475987..d8b13e471 100644 --- a/src/tracing.rs +++ b/src/tracing.rs @@ -25,6 +25,7 @@ macro_rules! debug_span { }; } +#[expect(unused_macros)] macro_rules! info_span { ($($x:tt)*) => { crate::tracing::span!(INFO, $($x)*) @@ -57,4 +58,5 @@ macro_rules! span { }}; } +#[expect(unused_imports)] pub(crate) use {debug, debug_span, event, info, info_span, span, trace}; diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index c191c1dd2..1c11ea3fc 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -494,7 +494,6 @@ impl QueryRevisionsExtra { mut tracked_struct_ids: ThinVec<(Identity, Id)>, cycle_heads: CycleHeads, iteration: IterationCount, - converged: bool, ) -> Self { #[cfg(feature = "accumulator")] let acc = accumulated.is_empty(); @@ -515,7 +514,7 @@ impl QueryRevisionsExtra { cycle_heads, tracked_struct_ids, iteration: iteration.into(), - cycle_converged: converged, + cycle_converged: false, })) }; @@ -598,7 +597,10 @@ const _: [(); std::mem::size_of::()] = [(); std::mem::size_of::<[usize; if cfg!(feature = "accumulator") { 7 } else { 3 }]>()]; impl QueryRevisions { - pub(crate) fn fixpoint_initial(query: DatabaseKeyIndex) -> Self { + pub(crate) fn fixpoint_initial( + query: DatabaseKeyIndex, + iteration_count: IterationCount, + ) -> Self { Self { changed_at: Revision::start(), durability: Durability::MAX, @@ -610,9 +612,8 @@ impl QueryRevisions { #[cfg(feature = "accumulator")] AccumulatedMap::default(), ThinVec::default(), - CycleHeads::initial(query), - IterationCount::initial(), - false, + CycleHeads::initial(query, iteration_count), + iteration_count, ), } } @@ -655,7 +656,6 @@ impl QueryRevisions { ThinVec::default(), cycle_heads, IterationCount::default(), - false, ); } }; @@ -705,8 +705,7 @@ impl QueryRevisions { ) { if let Some(extra) = &mut self.extra.0 { extra.iteration.store_mut(iteration_count); - // I think updating is required for `validate_same_iteration` to work because - // unless we can skip self? + extra .cycle_heads .update_iteration_count_mut(cycle_head_index, iteration_count); diff --git a/tests/backtrace.rs b/tests/backtrace.rs index 8aab2c058..b611cac86 100644 --- a/tests/backtrace.rs +++ b/tests/backtrace.rs @@ -108,7 +108,7 @@ fn backtrace_works() { at tests/backtrace.rs:32 1: query_cycle(Id(2)) at tests/backtrace.rs:45 - cycle heads: query_cycle(Id(2)) -> iteration=0 + cycle heads: query_cycle(Id(2)) -> iteration = 0 2: query_f(Id(2)) at tests/backtrace.rs:40 "#]] @@ -119,9 +119,9 @@ fn backtrace_works() { query stacktrace: 0: query_e(Id(3)) -> (R1, Durability::LOW) at tests/backtrace.rs:32 - 1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = iteration=0) + 1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = 0) at tests/backtrace.rs:45 - cycle heads: query_cycle(Id(3)) -> iteration=0 + cycle heads: query_cycle(Id(3)) -> iteration = 0 2: query_f(Id(3)) -> (R1, Durability::HIGH) at tests/backtrace.rs:40 "#]] diff --git a/tests/cycle.rs b/tests/cycle.rs index 5a6a25565..c1cd097e4 100644 --- a/tests/cycle.rs +++ b/tests/cycle.rs @@ -230,7 +230,6 @@ fn value(num: u8) -> Input { #[test] #[should_panic(expected = "dependency graph cycle")] fn self_panic() { - // TODO: This test takes very long to run? let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); @@ -921,7 +920,7 @@ fn cycle_unchanged_nested() { a.assert_value(&db, 59); b.assert_value(&db, 60); - db.assert_logs_len(13); + db.assert_logs_len(15); // next revision, we change only A, which is not part of the cycle and the cycle does not // depend on. From 1dc2bea87ca7e5ea604474608e2ac4f104340077 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 14:30:05 +0200 Subject: [PATCH 22/45] Only collect cycle heads one level deep --- Cargo.toml | 2 +- src/cycle.rs | 44 ++++++++--- src/function/execute.rs | 159 +++++++++++++++++++--------------------- src/zalsa_local.rs | 2 + tests/cycle.rs | 2 +- 5 files changed, 114 insertions(+), 95 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cc1cd0347..9c419e339 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ intrusive-collections = "0.9.7" parking_lot = "0.12" portable-atomic = "1" rustc-hash = "2" -smallvec = "1" +smallvec = { version = "1", features = ["const_new"] } thin-vec = { version = "0.2.14" } tracing = { version = "0.1", default-features = false, features = ["std"] } diff --git a/src/cycle.rs b/src/cycle.rs index 42e131e3f..3d224c213 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -114,6 +114,19 @@ pub struct CycleHead { removed: AtomicBool, } +impl CycleHead { + pub const fn new( + database_key_index: DatabaseKeyIndex, + iteration_count: IterationCount, + ) -> Self { + Self { + database_key_index, + iteration_count: AtomicIterationCount(AtomicU8::new(iteration_count.0)), + removed: AtomicBool::new(false), + } + } +} + impl Clone for CycleHead { fn clone(&self) -> Self { Self { @@ -147,6 +160,10 @@ impl IterationCount { Self(u8::MAX) } + pub(crate) const fn is_panicked(self) -> bool { + self.0 == u8::MAX + } + pub(crate) const fn increment(self) -> Option { let next = Self(self.0 + 1); if next.0 <= MAX_ITERATIONS.0 { @@ -248,6 +265,12 @@ impl CycleHeads { } } + /// Iterates over all cycle heads that aren't equal to `own`. + pub(crate) fn iter_not_eq(&self, own: DatabaseKeyIndex) -> impl Iterator { + self.iter() + .filter(move |head| head.database_key_index != own) + } + pub(crate) fn contains(&self, value: &DatabaseKeyIndex) -> bool { self.into_iter() .any(|head| head.database_key_index == *value && !head.removed.load(Ordering::Relaxed)) @@ -307,17 +330,20 @@ impl CycleHeads { self.0.reserve(other.0.len()); for head in other { - self.insert(head); + debug_assert!(!head.removed.load(Ordering::Relaxed)); + self.insert(head.database_key_index, head.iteration_count.load()); } } - pub(crate) fn insert(&mut self, head: &CycleHead) -> bool { - debug_assert!(!head.removed.load(Ordering::Relaxed)); - + pub(crate) fn insert( + &mut self, + database_key_index: DatabaseKeyIndex, + iteration_count: IterationCount, + ) -> bool { if let Some(existing) = self .0 .iter_mut() - .find(|candidate| candidate.database_key_index == head.database_key_index) + .find(|candidate| candidate.database_key_index == database_key_index) { let removed = existing.removed.get_mut(); @@ -327,18 +353,18 @@ impl CycleHeads { true } else { let existing_count = existing.iteration_count.load_mut(); - let head_count = head.iteration_count.load(); assert_eq!( - existing_count, head_count, - "Can't merge cycle heads {:?} with different iteration counts ({existing_count:?}, {head_count:?})", + existing_count, iteration_count, + "Can't merge cycle heads {:?} with different iteration counts ({existing_count:?}, {iteration_count:?})", existing.database_key_index ); false } } else { - self.0.push(head.clone()); + self.0 + .push(CycleHead::new(database_key_index, iteration_count)); true } } diff --git a/src/function/execute.rs b/src/function/execute.rs index 44a475d9b..d391a86c8 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -160,11 +160,14 @@ where let mut iteration_count = IterationCount::initial(); if let Some(old_memo) = opt_old_memo { + let memo_iteration_count = old_memo.revisions.iteration(); + if old_memo.verified_at.load() == zalsa.current_revision() && old_memo.cycle_heads().contains(&database_key_index) + && !memo_iteration_count.is_panicked() { previous_memo = Some(old_memo); - iteration_count = old_memo.revisions.iteration(); + iteration_count = memo_iteration_count; } } @@ -185,15 +188,19 @@ where // If there are no cycle heads, break out of the loop (`cycle_heads_mut` returns `None` if the cycle head list is empty) let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() else { + claim_guard.set_release_mode(ReleaseMode::SelfOnly); break (new_value, completed_query); }; - // TODO: Remove "removed" cycle heads" let mut cycle_heads = std::mem::take(cycle_heads); - - // Recursively resolve all cycle heads that this head depends on. - // This isn't required in a single-threaded execution but it's not guaranteed that all nested cycles are listed - // in cycle heads in a multi-threaded execution: + let mut missing_heads: SmallVec<[(DatabaseKeyIndex, IterationCount); 1]> = + SmallVec::new_const(); + let mut max_iteration_count = iteration_count; + let mut depends_on_self = false; + + // Ensure that we resolve the latest cycle heads from any provisional value this query depended on during execution. + // This isn't required in a single-threaded execution, but it's not guaranteed that `cycle_heads` contains all cycles + // in a multi-threaded execution: // // t1: a -> b // t2: c -> b (blocks on t1) @@ -201,32 +208,44 @@ where // t1: a -> b (completes b, b has c(0) in its cycle heads, releases `b`, which resumes `t2`, and `retry_provisional` blocks on `c` (t2)) // t2: c -> a (cycle, returns fixpoint initial for a with a(0) in heads) // t2: completes c, `provisional_retry` blocks on `a` (t2) - // t1: a (complets `b` with `c` in heads) + // t1: a (completes `b` with `c` in heads) // // Note how `a` only depends on `c` but not `a`. This is because `a` only saw the initial value of `c` and wasn't updated when `c` completed. // That's why we need to resolve the cycle heads recursively to `cycle_heads` contains all cycle heads at the moment this query completed. - let mut queue: SmallVec<[DatabaseKeyIndex; 4]> = cycle_heads - .iter() - .map(|head| head.database_key_index) - .filter(|head| *head != database_key_index) - .collect(); - - // TODO: Can we also resolve whether the cycles have converged here? - while let Some(head) = queue.pop() { - let ingredient = zalsa.lookup_ingredient(head.ingredient_index()); - let nested_heads = ingredient.cycle_heads(zalsa, head.key_index()); - - for head in nested_heads { - if cycle_heads.insert(head) && !queue.contains(&head.database_key_index) { - queue.push(head.database_key_index); + for head in &cycle_heads { + max_iteration_count = max_iteration_count.max(head.iteration_count.load()); + depends_on_self |= head.database_key_index == database_key_index; + + let ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + + for nested_head in + ingredient.cycle_heads(zalsa, head.database_key_index.key_index()) + { + let nested_as_tuple = ( + nested_head.database_key_index, + nested_head.iteration_count.load(), + ); + + if !cycle_heads.contains(&nested_head.database_key_index) + && !missing_heads.contains(&nested_as_tuple) + { + missing_heads.push(nested_as_tuple); } } } + for (head_key, iteration_count) in missing_heads { + max_iteration_count = max_iteration_count.max(iteration_count); + depends_on_self |= head_key == database_key_index; + + cycle_heads.insert(head_key, iteration_count); + } + let outer_cycle = outer_cycle(zalsa, zalsa_local, &cycle_heads, database_key_index); // Did the new result we got depend on our own provisional value, in a cycle? - if !cycle_heads.contains(&database_key_index) { + if !depends_on_self { if let Some(outer) = outer_cycle { claim_guard.set_release_mode(ReleaseMode::TransferTo(outer)); } else { @@ -260,30 +279,19 @@ where let last_provisional_value = last_provisional_value.expect( "`fetch_cold_cycle` should have inserted a provisional memo with Cycle::initial", ); - crate::tracing::debug!( + tracing::debug!( "{database_key_index:?}: execute: \ I am a cycle head, comparing last provisional value with new value" ); - // determine if it is a nested query. - // This is a nested query if it depends on any other cycle head than itself - // where claiming it results in a cycle. In that case, both queries form a single connected component - // that we can iterate together rather than having separate nested fixpoint iterations. - let this_converged = C::values_equal(&new_value, last_provisional_value); iteration_count = if outer_cycle.is_some() { iteration_count } else { - cycle_heads - .iter() - .map(|head| head.iteration_count.load()) - .max() - .unwrap_or(iteration_count) + max_iteration_count }; - // If the new result is equal to the last provisional result, the cycle has - // converged and we are done. if !this_converged { // We are in a cycle that hasn't converged; ask the user's // cycle-recovery function what to do: @@ -295,7 +303,7 @@ where ) { crate::CycleRecoveryAction::Iterate => {} crate::CycleRecoveryAction::Fallback(fallback_value) => { - crate::tracing::debug!( + tracing::debug!( "{database_key_index:?}: execute: user cycle_fn says to fall back" ); new_value = fallback_value; @@ -303,52 +311,45 @@ where } } - completed_query - .revisions - .set_cycle_converged(this_converged); - if let Some(outer_cycle) = outer_cycle { tracing::debug!( "Detected nested cycle {database_key_index:?}, iterate it as part of the outer cycle {outer_cycle:?}" ); completed_query.revisions.set_cycle_heads(cycle_heads); + // Store whether this cycle has converged, so that the outer cycle can check it. + completed_query + .revisions + .set_cycle_converged(this_converged); claim_guard.set_release_mode(ReleaseMode::TransferTo(outer_cycle)); break (new_value, completed_query); } - // Verify that all cycles have converged, including all inner cycles. + // Verify that this cycle and all inner cycles have converged. let converged = this_converged - && cycle_heads - .iter() - .filter(|head| head.database_key_index != database_key_index) - .all(|head| { - let ingredient = - zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); + && cycle_heads.iter_not_eq(database_key_index).all(|head| { + let ingredient = + zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - let converged = - ingredient.cycle_converged(zalsa, head.database_key_index.key_index()); + let converged = + ingredient.cycle_converged(zalsa, head.database_key_index.key_index()); - if !converged { - tracing::debug!("inner cycle {database_key_index:?} has not converged"); - } + if !converged { + tracing::debug!("inner cycle {database_key_index:?} has not converged"); + } - converged - }); + converged + }); if converged { - crate::tracing::debug!( - "{database_key_index:?}: execute: fixpoint iteration has a final value after {iteration_count:?} iterations" - ); + tracing::debug!( + "{database_key_index:?}: execute: fixpoint iteration has a final value after {iteration_count:?} iterations" + ); // Set the nested cycles as verified. This is necessary because // `validate_provisional` doesn't follow cycle heads recursively (and the inner memos now depend on all cycle heads). - for head in cycle_heads { - if head.database_key_index == database_key_index { - continue; - } - + for head in cycle_heads.iter_not_eq(database_key_index) { let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); ingredient.finalize_cycle_head(zalsa, head.database_key_index.key_index()); @@ -359,10 +360,7 @@ where break (new_value, completed_query); } - completed_query.revisions.set_cycle_heads(cycle_heads); - - // `iteration_count` can't overflow as we check it against `MAX_ITERATIONS` - // which is less than `u32::MAX`. + // The fixpoint iteration hasn't converged. Iterate again... iteration_count = iteration_count.increment().unwrap_or_else(|| { ::tracing::warn!("{database_key_index:?}: execute: too many cycle iterations"); panic!("{database_key_index:?}: execute: too many cycle iterations") @@ -375,28 +373,15 @@ where }) }); - crate::tracing::info!( + tracing::info!( "{database_key_index:?}: execute: iterate again ({iteration_count:?})...", ); - completed_query - .revisions - .update_iteration_count_mut(database_key_index, iteration_count); - - for head in completed_query.revisions.cycle_heads() { - if head.database_key_index == database_key_index { - continue; - } - + // Update the iteration count of nested cycles + for head in cycle_heads.iter_not_eq(database_key_index) { let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); - // let iteration_count = if was_initial && !head.iteration_count.load().is_initial() { - // IterationCount::first_after_restart() - // } else { - // iteration_count - // }; - ingredient.set_cycle_iteration_count( zalsa, head.database_key_index.key_index(), @@ -404,6 +389,13 @@ where ); } + // Update the iteration count of this cycle head, but only after restoring + // the cycle heads array. + completed_query.revisions.set_cycle_heads(cycle_heads); + completed_query + .revisions + .update_iteration_count_mut(database_key_index, iteration_count); + let new_memo = self.insert_memo( zalsa, id, @@ -527,8 +519,7 @@ fn outer_cycle( current_key: DatabaseKeyIndex, ) -> Option { cycle_heads - .iter() - .filter(|head| head.database_key_index != current_key) + .iter_not_eq(current_key) .find(|head| { // SAFETY: We don't call into with_query_stack recursively let is_on_stack = unsafe { diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 1c11ea3fc..5ec0fbba7 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -564,6 +564,8 @@ struct QueryRevisionsExtraInner { iteration: AtomicIterationCount, + /// Stores for nested cycle heads whether they've converged in the last iteration. + /// This value is always `false` for other queries. cycle_converged: bool, } diff --git a/tests/cycle.rs b/tests/cycle.rs index c1cd097e4..5e46cc0be 100644 --- a/tests/cycle.rs +++ b/tests/cycle.rs @@ -920,7 +920,7 @@ fn cycle_unchanged_nested() { a.assert_value(&db, 59); b.assert_value(&db, 60); - db.assert_logs_len(15); + db.assert_logs_len(13); // next revision, we change only A, which is not part of the cycle and the cycle does not // depend on. From 6c5495af8dc8ac45086d796eec44dd3b70454290 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 17:12:41 +0200 Subject: [PATCH 23/45] More cleanups --- src/function.rs | 9 +++++++++ src/function/execute.rs | 33 ++++++++++++++++++++++++--------- src/ingredient.rs | 6 ++++++ src/zalsa_local.rs | 1 + 4 files changed, 40 insertions(+), 9 deletions(-) diff --git a/src/function.rs b/src/function.rs index 9e5e3551f..b69be9121 100644 --- a/src/function.rs +++ b/src/function.rs @@ -94,6 +94,15 @@ pub trait Configuration: Any { /// Decide whether to iterate a cycle again or fallback. `value` is the provisional return /// value from the latest iteration of this cycle. `count` is the number of cycle iterations /// we've already completed. + /// + /// Note: There is no guarantee that `count` always starts at 0. It's possible that + /// the function is called with a non-zero value even if it is the first time around for + /// this specific query if the query has become the outermost cycle of a larger cycle. + /// In this case, Salsa uses the `count` value of the already iterating cycle as the start. + /// + /// It's also not guaranteed that `count` values are contiguous. The function might not be called + /// if this query converged in this specific iteration OR if the query only participates conditionally + /// in the cycle (e.g. every other iteration). fn recover_from_cycle<'db>( db: &'db Self::DbView, value: &Self::Output<'db>, diff --git a/src/function/execute.rs b/src/function/execute.rs index d391a86c8..b3d26988a 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -192,6 +192,7 @@ where break (new_value, completed_query); }; + // Take the cycle heads to not-fight-rust's-borrow-checker. let mut cycle_heads = std::mem::take(cycle_heads); let mut missing_heads: SmallVec<[(DatabaseKeyIndex, IterationCount); 1]> = SmallVec::new_const(); @@ -245,6 +246,7 @@ where let outer_cycle = outer_cycle(zalsa, zalsa_local, &cycle_heads, database_key_index); // Did the new result we got depend on our own provisional value, in a cycle? + // If not, return because this query is not a cycle head. if !depends_on_self { if let Some(outer) = outer_cycle { claim_guard.set_release_mode(ReleaseMode::TransferTo(outer)); @@ -256,6 +258,8 @@ where break (new_value, completed_query); } + // Get the last provisional value for this query so that we can compare it with the new value + // to test if the cycle converged. let last_provisional_value = if let Some(last_provisional) = previous_memo { // We have a last provisional value from our previous time around the loop. last_provisional.value.as_ref() @@ -281,15 +285,23 @@ where ); tracing::debug!( "{database_key_index:?}: execute: \ - I am a cycle head, comparing last provisional value with new value" + I am a cycle head, comparing last provisional value with new value" ); let this_converged = C::values_equal(&new_value, last_provisional_value); - iteration_count = if outer_cycle.is_some() { - iteration_count - } else { + // If this is the outermost cycle, use the maximum iteration count of all cycles. + // This is important for when later iterations introduce new cycle heads (that then + // become the outermost cycle). We want to ensure that the iteration count keeps increasing + // for all queries or they won't be re-executed because `validate_same_iteration` would + // pass when we go from 1 -> 0 and then increment by 1 to 1). + iteration_count = if outer_cycle.is_none() { max_iteration_count + } else { + // Otherwise keep the iteration count because outer cycles + // already have a cycle head with this exact iteration count (and we don't allow + // heads from different iterations). + iteration_count }; if !this_converged { @@ -321,12 +333,15 @@ where completed_query .revisions .set_cycle_converged(this_converged); + + // Transfer ownership of this query to the outer cycle, so that it can claim it + // and other threads don't compete for the same lock. claim_guard.set_release_mode(ReleaseMode::TransferTo(outer_cycle)); break (new_value, completed_query); } - // Verify that this cycle and all inner cycles have converged. + // If this is the outermost cycle, test if all inner cycles have converged as well. let converged = this_converged && cycle_heads.iter_not_eq(database_key_index).all(|head| { let ingredient = @@ -348,7 +363,7 @@ where ); // Set the nested cycles as verified. This is necessary because - // `validate_provisional` doesn't follow cycle heads recursively (and the inner memos now depend on all cycle heads). + // `validate_provisional` doesn't follow cycle heads recursively (and the memos now depend on all cycle heads). for head in cycle_heads.iter_not_eq(database_key_index) { let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); @@ -377,7 +392,7 @@ where "{database_key_index:?}: execute: iterate again ({iteration_count:?})...", ); - // Update the iteration count of nested cycles + // Update the iteration count of nested cycles. for head in cycle_heads.iter_not_eq(database_key_index) { let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); @@ -390,7 +405,7 @@ where } // Update the iteration count of this cycle head, but only after restoring - // the cycle heads array. + // the cycle heads array (or this becomes a no-op). completed_query.revisions.set_cycle_heads(cycle_heads); completed_query .revisions @@ -415,7 +430,7 @@ where continue; }; - crate::tracing::debug!( + tracing::debug!( "{database_key_index:?}: execute_maybe_iterate: result.revisions = {revisions:#?}", revisions = &completed_query.revisions ); diff --git a/src/ingredient.rs b/src/ingredient.rs index f58933371..e21e3690c 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -162,10 +162,16 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { } // Function ingredient methods + /// Tests if the (nested) cycle head `_input` has converged in the most recent iteration. + /// + /// Returns `false` if the Memo doesn't exist or if called on a non-cycle head. fn cycle_converged(&self, _zalsa: &Zalsa, _input: Id) -> bool { unreachable!("cycle_converged should only be called on cycle heads and only functions can be cycle heads"); } + /// Updates the iteration count for the (nested) cycle head `_input` to `iteration_count`. + /// + /// This is a no-op if the memo doesn't exist or if called on a Memo without cycle heads. fn set_cycle_iteration_count( &self, _zalsa: &Zalsa, diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 5ec0fbba7..13701c85b 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -691,6 +691,7 @@ impl QueryRevisions { let Some(extra) = &self.extra.0 else { return; }; + debug_assert!(extra.iteration.load() <= iteration_count); extra.iteration.store(iteration_count); From 2071c7f0b43312edc66b10fb346dac4015701f39 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 17:59:40 +0200 Subject: [PATCH 24/45] More docs --- src/function/execute.rs | 14 +++++---- src/function/sync.rs | 69 +++++++++++++++++++++++++++++------------ 2 files changed, 58 insertions(+), 25 deletions(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index b3d26988a..2e0a91671 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -248,11 +248,13 @@ where // Did the new result we got depend on our own provisional value, in a cycle? // If not, return because this query is not a cycle head. if !depends_on_self { - if let Some(outer) = outer_cycle { - claim_guard.set_release_mode(ReleaseMode::TransferTo(outer)); - } else { - claim_guard.set_release_mode(ReleaseMode::SelfOnly); - } + // For as long as this query participates in any cycle, don't release its lock, instead + // transfer it to the outermost cycle head (if any). This prevents any other thread + // from claiming this query (all cycle heads are potential entry points to the same cycle), + // which would result in them competing for the same locks (we want the locks to converge to a single cycle head). + claim_guard.set_release_mode(ReleaseMode::TransferTo( + outer_cycle.expect("query to of an outer cycle."), + )); completed_query.revisions.set_cycle_heads(cycle_heads); break (new_value, completed_query); @@ -324,7 +326,7 @@ where } if let Some(outer_cycle) = outer_cycle { - tracing::debug!( + tracing::info!( "Detected nested cycle {database_key_index:?}, iterate it as part of the outer cycle {outer_cycle:?}" ); diff --git a/src/function/sync.rs b/src/function/sync.rs index b1df19d91..83c519c90 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -35,8 +35,18 @@ pub(crate) struct SyncState { /// waiting for this query to complete. anyone_waiting: bool, + /// Whether any other query has transferred its lock ownership to this query. + /// This is only an optimization so that the expensive unblocking of transferred queries + /// can be skipped if `false`. This field might be `true` in cases where queries *were* transferred + /// to this query, but have since then been transferred to another query (in a later iteration). is_transfer_target: bool, - claimed_twice: bool, + + /// Whether this query has been claimed by the query that currently owns it. + /// + /// If `a` has been transferred to `b` and the stack for t1 is `b -> a`, then `a` can be claimed + /// and `claimed_transferred` is set to `true`. However, t2 won't be able to claim `a` because + /// it doesn't own `b`. + claimed_transferred: bool, } impl SyncTable { @@ -47,8 +57,12 @@ impl SyncTable { } } + /// Claims the given key index, or blocks if it is running on another thread. + /// + /// `REENTRANT` controls whether claiming a query whose ownership has been transferred to another query + /// should result in a cycle (`false`) or can be claimed (`true`), if not already done so. #[inline] - pub(crate) fn try_claim<'me, const REENTRANT: bool>( + pub(crate) fn try_claim<'me, const TRANSFERRED: bool>( &'me self, zalsa: &'me Zalsa, key_index: Id, @@ -59,7 +73,8 @@ impl SyncTable { let id = match occupied_entry.get().id { SyncOwnerId::Thread(id) => id, SyncOwnerId::Transferred => { - return match self.try_claim_transferred::(zalsa, occupied_entry) + return match self + .try_claim_transferred::(zalsa, occupied_entry) { Ok(claimed) => claimed, Err(other_thread) => match other_thread.block(write) { @@ -96,7 +111,7 @@ impl SyncTable { id: SyncOwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, - claimed_twice: false, + claimed_transferred: false, }); ClaimResult::Claimed(ClaimGuard { key_index, @@ -128,7 +143,9 @@ impl SyncTable { } ClaimTransferredResult::Reentrant => { let SyncState { - id, claimed_twice, .. + id, + claimed_transferred: claimed_twice, + .. } = entry.into_mut(); debug_assert!(!*claimed_twice); @@ -150,7 +167,7 @@ impl SyncTable { id: SyncOwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, - claimed_twice: false, + claimed_transferred: false, }); Ok(ClaimResult::Claimed(ClaimGuard { key_index, @@ -175,10 +192,18 @@ impl SyncTable { #[derive(Copy, Clone, Debug)] pub(crate) enum SyncOwnerId { - /// Entry is owned by this thread + /// Query is owned by this thread Thread(thread::ThreadId), - /// Entry has been transferred and is owned by another thread. - /// The id is known by the `DependencyGraph`. + + /// The query's lock ownership has been transferred to another query. + /// E.g. if `a` transfers its ownership to `b`, then only the thread in the critical path + /// to complete b` can claim `a` (in most instances, only the thread owning `b` can claim `a`). + /// + /// The thread owning `a` is stored in the `DependencyGraph`. + /// + /// A query can be marked as `Transferred` even if it has since then been released by the owning query. + /// In that case, the query is effectively unclaimed and the `Transferred` state is stale. The reason + /// for this is that it avoids the need for locking each sync table when releasing the transferred queries. Transferred, } @@ -220,7 +245,7 @@ impl<'me> ClaimGuard<'me> { let SyncState { anyone_waiting, is_transfer_target, - claimed_twice, + claimed_transferred: claimed_twice, .. } = state; @@ -250,8 +275,8 @@ impl<'me> ClaimGuard<'me> { panic!("key claimed twice?"); }; - if state.get().claimed_twice { - state.get_mut().claimed_twice = false; + if state.get().claimed_transferred { + state.get_mut().claimed_transferred = false; state.get_mut().id = SyncOwnerId::Transferred; } else { self.release(WaitResult::Completed, state.remove()); @@ -261,6 +286,7 @@ impl<'me> ClaimGuard<'me> { #[cold] #[inline(never)] pub(crate) fn transfer(&self, new_owner: DatabaseKeyIndex) { + tracing::info!("transfer"); let self_key = self.database_key_index(); let owner_ingredient = self.zalsa.lookup_ingredient(new_owner.ingredient_index()); @@ -283,7 +309,7 @@ impl<'me> ClaimGuard<'me> { let SyncState { anyone_waiting, id, - claimed_twice, + claimed_transferred: claimed_twice, .. } = syncs.get_mut(&self.key_index).expect("key claimed twice?"); @@ -326,23 +352,28 @@ impl std::fmt::Debug for SyncTable { } } +/// Controls how the lock is released when the `ClaimGuard` is dropped. #[derive(Copy, Clone, Debug, Default)] pub(crate) enum ReleaseMode { /// The default release mode. /// - /// Releases the lock of the current query for claims that are not transferred. Queries who's ownership - /// were transferred to this query will be transitively unlocked. - /// - /// If this lock is owned by another query (because it was transferred), then releasing is a no-op. + /// Releases the query for which this claim guard holds the lock and any queries that have + /// transferred ownership to this query. #[default] Default, + /// Only releases the lock for this query. Any query that has transferred ownership to this query + /// will remain locked. + /// + /// If this thread panics, the query will be released as normal (default mode). SelfOnly, /// Transfers the ownership of the lock to the specified query. /// - /// All waiting queries will be awakened so that they can retry and block on the new owner thread. - /// The new owner thread (or any thread it blocks on) will be able to acquire the lock (reentrant). + /// The query will remain locked except the query that's currently blocking this query from completing + /// (to avoid deadlocks). + /// + /// If this thread panics, the query will be released as normal (default mode). TransferTo(DatabaseKeyIndex), } From c72efefc3f8c7dad8eb5ab5ff28d9c41bcdc4ff8 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 9 Oct 2025 21:53:04 +0200 Subject: [PATCH 25/45] More comments --- src/function.rs | 5 +- src/function/sync.rs | 60 ++++++++++------- src/runtime.rs | 111 ++++++++++++++++---------------- src/runtime/dependency_graph.rs | 50 ++++++-------- 4 files changed, 114 insertions(+), 112 deletions(-) diff --git a/src/function.rs b/src/function.rs index b69be9121..36aacceed 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, SyncGuard, SyncOwnerId, SyncTable}; +pub(crate) use sync::{ClaimGuard, ClaimResult, SyncGuard, SyncOwnerId, SyncState, SyncTable}; use std::any::Any; use std::fmt; @@ -13,7 +13,6 @@ use crate::cycle::{ }; use crate::database::RawDatabase; use crate::function::delete::DeletedEntries; -use crate::function::sync::ClaimResult; use crate::hash::{FxHashSet, FxIndexSet}; use crate::ingredient::{Ingredient, WaitForResult}; use crate::key::DatabaseKeyIndex; @@ -419,7 +418,7 @@ where fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim::(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle { inner } => WaitForResult::Cycle { inner }, + ClaimResult::Cycle { inner: inner } => WaitForResult::Cycle { inner }, ClaimResult::Claimed(_) => WaitForResult::Available, } } diff --git a/src/function/sync.rs b/src/function/sync.rs index 83c519c90..da6cdc5ab 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -2,7 +2,9 @@ use rustc_hash::FxHashMap; use std::collections::hash_map::OccupiedEntry; use crate::key::DatabaseKeyIndex; -use crate::runtime::{BlockResult, ClaimTransferredResult, OtherThread, Running, WaitResult}; +use crate::runtime::{ + BlockOnTransferredOwner, BlockResult, BlockTransferredResult, Running, WaitResult, +}; use crate::sync::thread::{self}; use crate::sync::Mutex; use crate::tracing; @@ -22,7 +24,12 @@ pub(crate) enum ClaimResult<'a> { /// Can't claim the query because it is running on an other thread. Running(Running<'a>), /// Claiming the query results in a cycle. - Cycle { inner: bool }, + Cycle { + /// `true` if this is a cycle with an inner query. For example, if `a` transferred its ownership to + /// `b`. If the thread claiming `b` tries to claim `a`, then this results in a cycle unless + /// `REENTRANT` is `true` (in which case it can be claimed). + inner: bool, + }, /// Successfully claimed the query. Claimed(ClaimGuard<'a>), } @@ -59,10 +66,11 @@ impl SyncTable { /// Claims the given key index, or blocks if it is running on another thread. /// - /// `REENTRANT` controls whether claiming a query whose ownership has been transferred to another query - /// should result in a cycle (`false`) or can be claimed (`true`), if not already done so. + /// `REENTRANT` controls whether a query that transferred its ownership to another query for which + /// this thread currently holds the lock for can be claimed. For example, if `a` transferred its ownership + /// to `b`, and this thread holds the lock for `b`, then this thread can also claim `a` but only if `REENTRANT` is `true`. #[inline] - pub(crate) fn try_claim<'me, const TRANSFERRED: bool>( + pub(crate) fn try_claim<'me, const REENTRANT: bool>( &'me self, zalsa: &'me Zalsa, key_index: Id, @@ -73,8 +81,7 @@ impl SyncTable { let id = match occupied_entry.get().id { SyncOwnerId::Thread(id) => id, SyncOwnerId::Transferred => { - return match self - .try_claim_transferred::(zalsa, occupied_entry) + return match self.try_claim_transferred::(zalsa, occupied_entry) { Ok(claimed) => claimed, Err(other_thread) => match other_thread.block(write) { @@ -129,28 +136,25 @@ impl SyncTable { &'me self, zalsa: &'me Zalsa, mut entry: OccupiedEntry, - ) -> Result, OtherThread<'me>> { + ) -> Result, Box>> { let key_index = *entry.key(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); + let thread_id = thread::current().id(); match zalsa .runtime() - .claim_transferred::(database_key_index) + .block_transferred(database_key_index, thread_id) { - ClaimTransferredResult::ClaimedBy(other_thread) => { - entry.get_mut().anyone_waiting = true; - Err(other_thread) - } - ClaimTransferredResult::Reentrant => { + BlockTransferredResult::ImTheOwner if REENTRANT => { let SyncState { id, - claimed_transferred: claimed_twice, + claimed_transferred, .. } = entry.into_mut(); - debug_assert!(!*claimed_twice); + debug_assert!(!*claimed_transferred); - *id = SyncOwnerId::Thread(thread::current().id()); - *claimed_twice = true; + *id = SyncOwnerId::Thread(thread_id); + *claimed_transferred = true; Ok(ClaimResult::Claimed(ClaimGuard { key_index, @@ -159,12 +163,14 @@ impl SyncTable { mode: ReleaseMode::SelfOnly, })) } - ClaimTransferredResult::Cycle { inner: nested } => { - Ok(ClaimResult::Cycle { inner: nested }) + BlockTransferredResult::ImTheOwner => Ok(ClaimResult::Cycle { inner: true }), + BlockTransferredResult::OwnedBy(other_thread) => { + entry.get_mut().anyone_waiting = true; + Err(other_thread) } - ClaimTransferredResult::Released => { + BlockTransferredResult::Released => { entry.insert(SyncState { - id: SyncOwnerId::Thread(thread::current().id()), + id: SyncOwnerId::Thread(thread_id), anyone_waiting: false, is_transfer_target: false, claimed_transferred: false, @@ -179,7 +185,13 @@ impl SyncTable { } } - fn make_transfer_target(&self, key_index: Id) -> Option { + /// Makes `key_index` an owner of a transferred query. + /// + /// Returns the `SyncOwnerId` of the thread that currently owns this query. + /// + /// Note: The result of this method will immediately become stale unless the thread owning `key_index` + /// is currently blocked on this thread (claiming `key_index` from this thread results in a cycle). + fn make_owner_of(&self, key_index: Id) -> Option { let mut syncs = self.syncs.lock(); syncs.get_mut(&key_index).map(|state| { state.anyone_waiting = true; @@ -294,7 +306,7 @@ impl<'me> ClaimGuard<'me> { // Get the owning thread of `new_owner`. let owner_sync_table = owner_ingredient.sync_table(); let owner_thread_id = owner_sync_table - .make_transfer_target(new_owner.key_index()) + .make_owner_of(new_owner.key_index()) .expect("new owner to be a locked query"); tracing::debug!( diff --git a/src/runtime.rs b/src/runtime.rs index 6b6b6c9f7..28883dfb6 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,13 +1,13 @@ -use self::dependency_graph::DependencyGraph; +use self::dependency_graph::{CanClaimTransferred, DependencyGraph}; use crate::durability::Durability; -use crate::function::{SyncGuard, SyncOwnerId}; +use crate::function::{SyncGuard, SyncOwnerId, SyncState}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::thread::{self, ThreadId}; use crate::sync::Mutex; use crate::table::Table; use crate::zalsa::Zalsa; -use crate::{Cancelled, Event, EventKind, Revision}; +use crate::{Cancelled, Event, EventKind, Id, Revision}; mod dependency_graph; @@ -58,42 +58,43 @@ pub(crate) enum BlockResult<'me> { Cycle, } -pub(crate) enum ClaimTransferredResult<'me> { - /// The transferred query has been successfully claimed. - Reentrant, +pub(crate) enum BlockTransferredResult<'me> { + /// The current thread is the owner of the transferred query + /// and it can claim it if it wants to. + ImTheOwner, - /// The query is running on another thread. - ClaimedBy(OtherThread<'me>), - - /// Blocking resulted in a cycle. - /// - /// The lock is hold by the current thread or there's another thread that is waiting on the current thread, - /// and blocking this thread on the other thread would result in a deadlock/cycle. - Cycle { inner: bool }, + /// The query is owned/running on another thread. + OwnedBy(Box>), - /// Query is no longer a transferred query. + /// The query has transferred its ownership to another query previously but that query has + /// since then completed and released the lock. Released, } -pub(super) struct OtherThread<'me> { +pub(super) struct BlockOnTransferredOwner<'me> { dg: crate::sync::MutexGuard<'me, DependencyGraph>, + /// The query that we're trying to claim. database_key: DatabaseKeyIndex, + /// The thread that currently owns the lock for the transferred query. other_id: ThreadId, + /// The current thread that is trying to claim the transferred query. + thread_id: ThreadId, } -impl<'me> OtherThread<'me> { +impl<'me> BlockOnTransferredOwner<'me> { + /// Block on the other thread to complete the computation. pub(super) fn block(self, query_mutex_guard: SyncGuard<'me>) -> BlockResult<'me> { - let thread_id = thread::current().id(); // Cycle in the same thread. - if thread_id == self.other_id { + if self.thread_id == self.other_id { return BlockResult::Cycle; } - if self.dg.depends_on(self.other_id, thread_id) { + if self.dg.depends_on(self.other_id, self.thread_id) { crate::tracing::debug!( "block_on: cycle detected for {:?} in thread {thread_id:?} on {:?}", self.database_key, - self.other_id + self.other_id, + thread_id = self.thread_id ); return BlockResult::Cycle; } @@ -103,7 +104,7 @@ impl<'me> OtherThread<'me> { query_mutex_guard, database_key: self.database_key, other_id: self.other_id, - thread_id, + thread_id: self.thread_id, }))) } } @@ -299,6 +300,38 @@ impl Runtime { }))) } + /// Tries to claim ownership of a transferred query where `thread_id` is the current thread and `query` + /// is the query (that had its ownership transferred) to claim. + /// + /// For this operation to be reasonable, the caller must ensure that the lock on `query` is not released + /// before this operation completes. + pub(super) fn block_transferred( + &self, + query: DatabaseKeyIndex, + current_id: ThreadId, + ) -> BlockTransferredResult<'_> { + let mut dg = self.dependency_graph.lock(); + + let owner_thread = dg.resolved_transferred_id(query, None); + + let Some(owner_thread_id) = owner_thread else { + // The query transferred its ownership but the owner has since then released the lock. + return BlockTransferredResult::Released; + }; + + if owner_thread_id == current_id || dg.depends_on(owner_thread_id, current_id) { + BlockTransferredResult::ImTheOwner + } else { + // Lock is owned by another thread, wait for it to be released. + BlockTransferredResult::OwnedBy(Box::new(BlockOnTransferredOwner { + dg, + database_key: query, + other_id: owner_thread_id, + thread_id: current_id, + })) + } + } + /// Invoked when this runtime completed computing `database_key` with /// the given result `wait_result` (`wait_result` should be `None` if /// computing `database_key` panicked and could not complete). @@ -325,40 +358,6 @@ impl Runtime { .unblock_transferred_queries(database_key, wait_result); } - pub(super) fn claim_transferred( - &self, - query: DatabaseKeyIndex, - ) -> ClaimTransferredResult<'_> { - let mut dg = self.dependency_graph.lock(); - let thread_id = thread::current().id(); - - match dg.block_on_transferred(query, thread_id) { - Ok(_) => { - if !REENTRANT { - tracing::debug!("Claiming {query:?} results in a cycle because re-entrant lock is not allowed"); - ClaimTransferredResult::Cycle { inner: true } - } else { - tracing::debug!("Reentrant lock {query:?}"); - - ClaimTransferredResult::Reentrant - } - } - // Lock is owned by another thread, wait for it to be released. - Err(Some(thread_id)) => { - tracing::debug!( - "Waiting for transfered lock {query:?} to be released by thread {thread_id:?}" - ); - ClaimTransferredResult::ClaimedBy(OtherThread { - dg, - database_key: query, - other_id: thread_id, - }) - } - // Lock was transferred but is no more. Replace the entry. - Err(None) => ClaimTransferredResult::Released, - } - } - #[cold] pub(super) fn remove_transferred(&self, query: DatabaseKeyIndex) { self.dependency_graph.lock().remove_transferred(query); diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 35cc2c650..8be482c11 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -190,28 +190,6 @@ impl DependencyGraph { unblock_recursive(self, database_key, wait_result); } - /// Returns `Ok(thread_id)` if `database_key_index` is a query who's lock ownership has been transferred to `thread_id` (potentially over multiple steps) - /// and the lock was claimed. Returns `Err(Some(thread_id))` if the lock was not claimed. - /// - /// Returns `Err(None)` if `database_key_index` hasn't been transferred or its owning lock has since then been removed. - pub(super) fn block_on_transferred( - &mut self, - database_key_index: DatabaseKeyIndex, - current_id: ThreadId, - ) -> Result> { - let owner_thread = self.resolved_transferred_id(database_key_index, None); - - let Some((thread_id, owner_key)) = owner_thread else { - return Err(None); - }; - - if thread_id == current_id || self.depends_on(thread_id, current_id) { - Ok(owner_key) - } else { - Err(Some(thread_id)) - } - } - pub(super) fn remove_transferred(&mut self, database_key: DatabaseKeyIndex) { if let Some((_, owner)) = self.transferred.remove(&database_key) { let dependents = self.transferred_dependents.get_mut(&owner).unwrap(); @@ -224,7 +202,7 @@ impl DependencyGraph { &self, database_key: DatabaseKeyIndex, ignore: Option, - ) -> Option<(ThreadId, DatabaseKeyIndex)> { + ) -> Option { let &(mut resolved_thread, owner) = self.transferred.get(&database_key)?; let mut current_owner = owner; @@ -237,7 +215,7 @@ impl DependencyGraph { current_owner = next_key; } - Some((resolved_thread, owner)) + Some(resolved_thread) } pub(super) fn transfer_lock( @@ -249,13 +227,15 @@ impl DependencyGraph { ) { let new_owner_thread = match new_owner_thread { SyncOwnerId::Thread(thread) => thread, - SyncOwnerId::Transferred => { - self.resolved_transferred_id(new_owner, Some(query)) - .unwrap() - .0 - } + SyncOwnerId::Transferred => self + .resolved_transferred_id(new_owner, Some(query)) + .unwrap(), }; + debug_assert!( + new_owner_thread == current_thread || self.depends_on(new_owner_thread, current_thread) + ); + let mut thread_changed = current_thread != new_owner_thread; match self.transferred.entry(query) { @@ -422,6 +402,18 @@ impl DependencyGraph { } } +#[derive(Debug)] +pub(super) enum CanClaimTransferred { + /// Transferred can be claimed because the current thread is the owner or depends on the owner. + ImTheOwner, + + /// Transferred can't be claimed because it's owned by another thread. + OwnedBy(ThreadId), + + /// The query was transferred earlier but it has since then been released by the owning query. + Released, +} + mod edge { use crate::sync::thread::ThreadId; use crate::sync::{Condvar, MutexGuard}; From 0a2d865b3d5afe7e2ce73c1a76ce3f0c02593d1a Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 10:30:08 +0200 Subject: [PATCH 26/45] More documentation, cleanups --- src/function.rs | 8 +- src/function/sync.rs | 119 +++++++-------- src/ingredient.rs | 9 +- src/runtime.rs | 50 ++++--- src/runtime/dependency_graph.rs | 246 +++++++++++++++++++++----------- 5 files changed, 270 insertions(+), 162 deletions(-) diff --git a/src/function.rs b/src/function.rs index 36aacceed..e99cc29db 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, ClaimResult, SyncGuard, SyncOwnerId, SyncState, SyncTable}; +pub(crate) use sync::{ClaimGuard, ClaimResult, SyncGuard, SyncOwnerId, SyncTable}; use std::any::Any; use std::fmt; @@ -398,8 +398,8 @@ where memo.revisions.cycle_converged() } - fn sync_table(&self) -> &SyncTable { - &self.sync_table + fn mark_as_transfer_target(&self, key_index: Id) -> Option { + self.sync_table.mark_as_transfer_target(key_index) } fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads { @@ -418,7 +418,7 @@ where fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim::(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), - ClaimResult::Cycle { inner: inner } => WaitForResult::Cycle { inner }, + ClaimResult::Cycle { inner } => WaitForResult::Cycle { inner }, ClaimResult::Claimed(_) => WaitForResult::Available, } } diff --git a/src/function/sync.rs b/src/function/sync.rs index da6cdc5ab..841c5e89f 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -53,7 +53,7 @@ pub(crate) struct SyncState { /// If `a` has been transferred to `b` and the stack for t1 is `b -> a`, then `a` can be claimed /// and `claimed_transferred` is set to `true`. However, t2 won't be able to claim `a` because /// it doesn't own `b`. - claimed_transferred: bool, + claimed_twice: bool, } impl SyncTable { @@ -118,7 +118,7 @@ impl SyncTable { id: SyncOwnerId::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, - claimed_transferred: false, + claimed_twice: false, }); ClaimResult::Claimed(ClaimGuard { key_index, @@ -147,14 +147,12 @@ impl SyncTable { { BlockTransferredResult::ImTheOwner if REENTRANT => { let SyncState { - id, - claimed_transferred, - .. + id, claimed_twice, .. } = entry.into_mut(); - debug_assert!(!*claimed_transferred); + debug_assert!(!*claimed_twice); *id = SyncOwnerId::Thread(thread_id); - *claimed_transferred = true; + *claimed_twice = true; Ok(ClaimResult::Claimed(ClaimGuard { key_index, @@ -173,7 +171,7 @@ impl SyncTable { id: SyncOwnerId::Thread(thread_id), anyone_waiting: false, is_transfer_target: false, - claimed_transferred: false, + claimed_twice: false, }); Ok(ClaimResult::Claimed(ClaimGuard { key_index, @@ -185,13 +183,13 @@ impl SyncTable { } } - /// Makes `key_index` an owner of a transferred query. + /// Marks `key_index` as a transfer target. /// /// Returns the `SyncOwnerId` of the thread that currently owns this query. /// /// Note: The result of this method will immediately become stale unless the thread owning `key_index` /// is currently blocked on this thread (claiming `key_index` from this thread results in a cycle). - fn make_owner_of(&self, key_index: Id) -> Option { + pub(super) fn mark_as_transfer_target(&self, key_index: Id) -> Option { let mut syncs = self.syncs.lock(); syncs.get_mut(&key_index).map(|state| { state.anyone_waiting = true; @@ -203,7 +201,7 @@ impl SyncTable { } #[derive(Copy, Clone, Debug)] -pub(crate) enum SyncOwnerId { +pub enum SyncOwnerId { /// Query is owned by this thread Thread(thread::ThreadId), @@ -242,22 +240,20 @@ impl<'me> ClaimGuard<'me> { self.mode = mode; } - #[inline(always)] - fn release_default(&self, wait_result: WaitResult) { + #[cold] + fn release_panicking(&self) { let mut syncs = self.sync_table.syncs.lock(); let state = syncs.remove(&self.key_index).expect("key claimed twice?"); - self.release(wait_result, state); + self.release(state, WaitResult::Panicked); } #[inline(always)] - fn release(&self, wait_result: WaitResult, state: SyncState) { - let database_key_index = self.database_key_index(); - + fn release(&self, state: SyncState, wait_result: WaitResult) { let SyncState { anyone_waiting, is_transfer_target, - claimed_transferred: claimed_twice, + claimed_twice, .. } = state; @@ -266,13 +262,14 @@ impl<'me> ClaimGuard<'me> { } let runtime = self.zalsa.runtime(); + let database_key_index = self.database_key_index(); if claimed_twice { - runtime.remove_transferred(database_key_index); + runtime.undo_transfer_lock(database_key_index); } if is_transfer_target { - runtime.unblock_transferred_queries(database_key_index, wait_result); + runtime.unblock_transferred_queries_owned_by(database_key_index, wait_result); } runtime.unblock_queries_blocked_on(database_key_index, wait_result); @@ -284,69 +281,77 @@ impl<'me> ClaimGuard<'me> { let mut syncs = self.sync_table.syncs.lock(); let std::collections::hash_map::Entry::Occupied(mut state) = syncs.entry(self.key_index) else { - panic!("key claimed twice?"); + panic!("key should only be claimed/released once"); }; - if state.get().claimed_transferred { - state.get_mut().claimed_transferred = false; + if state.get().claimed_twice { + state.get_mut().claimed_twice = false; state.get_mut().id = SyncOwnerId::Transferred; } else { - self.release(WaitResult::Completed, state.remove()); + self.release(state.remove(), WaitResult::Completed); } } #[cold] #[inline(never)] pub(crate) fn transfer(&self, new_owner: DatabaseKeyIndex) { - tracing::info!("transfer"); - let self_key = self.database_key_index(); - let owner_ingredient = self.zalsa.lookup_ingredient(new_owner.ingredient_index()); // Get the owning thread of `new_owner`. - let owner_sync_table = owner_ingredient.sync_table(); - let owner_thread_id = owner_sync_table - .make_owner_of(new_owner.key_index()) - .expect("new owner to be a locked query"); - - tracing::debug!( - "Transferring ownership of {self_key:?} to {new_owner:?} ({owner_thread_id:?})" - ); + // The thread id is guaranteed to not be stale because `new_owner` must be blocked on `self_key` + // or `transfer_lock` will panic (at least in debug builds). + let Some(new_owner_thread_id) = + owner_ingredient.mark_as_transfer_target(new_owner.key_index()) + else { + self.release( + self.sync_table + .syncs + .lock() + .remove(&self.key_index) + .expect("key should only be claimed/released once"), + WaitResult::Panicked, + ); + + panic!("new owner to be a locked query") + }; let mut syncs = self.sync_table.syncs.lock(); - let runtime = self.zalsa.runtime(); - runtime.transfer_lock(self_key, thread::current().id(), new_owner, owner_thread_id); + let self_key = self.database_key_index(); + tracing::debug!( + "Transferring lock ownership of {self_key:?} to {new_owner:?} ({new_owner_thread_id:?})" + ); let SyncState { - anyone_waiting, - id, - claimed_transferred: claimed_twice, - .. - } = syncs.get_mut(&self.key_index).expect("key claimed twice?"); + id, claimed_twice, .. + } = syncs + .get_mut(&self.key_index) + .expect("key should only be claimed/released once"); + + self.zalsa + .runtime() + .transfer_lock(self_key, new_owner, new_owner_thread_id); *id = SyncOwnerId::Transferred; *claimed_twice = false; - *anyone_waiting = false; } } impl Drop for ClaimGuard<'_> { - #[inline] fn drop(&mut self) { - let wait_result = if thread::panicking() { - WaitResult::Panicked - } else { - WaitResult::Completed - }; + if thread::panicking() { + self.release_panicking(); + return; + } match self.mode { ReleaseMode::Default => { - self.release_default(wait_result); - } - _ if matches!(wait_result, WaitResult::Panicked) => { - tracing::debug!("Releasing `ClaimGuard` after panic"); - self.release_default(wait_result); + let mut syncs = self.sync_table.syncs.lock(); + let state = syncs + .remove(&self.key_index) + .expect("key should only be claimed/released once"); + + self.release(state, WaitResult::Completed); } ReleaseMode::SelfOnly => { self.release_self(); @@ -382,8 +387,10 @@ pub(crate) enum ReleaseMode { /// Transfers the ownership of the lock to the specified query. /// - /// The query will remain locked except the query that's currently blocking this query from completing - /// (to avoid deadlocks). + /// The query will remain locked and only the thread owning the transfer target will be resumed. + /// + /// The transfer target must be a query that's blocked on this query to guarantee that the transfer target doesn't complete + /// before the transfer is finished (which would leave this query locked forever). /// /// If this thread panics, the query will be released as normal (default mode). TransferTo(DatabaseKeyIndex), diff --git a/src/ingredient.rs b/src/ingredient.rs index e21e3690c..7999b08fb 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -99,8 +99,13 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { ); } - fn sync_table(&self) -> &crate::function::SyncTable { - unreachable!("owning_thread should only be called on functions"); + /// Invoked when a query transfers its lock-ownership to `_key_index`. Returns the thread + /// owning the lock for `_key_index` or `None` if `_key_index` is not claimed. + /// + /// Note: The returned `SyncOwnerId` may be outdated as soon as this function returns **unless** + /// it's guaranteed that `_key_index` is blocked on the current thread. + fn mark_as_transfer_target(&self, _key_index: Id) -> Option { + unreachable!("mark_as_transfer_target should only be called on functions"); } /// Invoked when the value `output_key` should be marked as valid in the current revision. diff --git a/src/runtime.rs b/src/runtime.rs index 28883dfb6..e1f4aadf2 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,13 +1,13 @@ -use self::dependency_graph::{CanClaimTransferred, DependencyGraph}; +use self::dependency_graph::DependencyGraph; use crate::durability::Durability; -use crate::function::{SyncGuard, SyncOwnerId, SyncState}; +use crate::function::{SyncGuard, SyncOwnerId}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::thread::{self, ThreadId}; use crate::sync::Mutex; use crate::table::Table; use crate::zalsa::Zalsa; -use crate::{Cancelled, Event, EventKind, Id, Revision}; +use crate::{Cancelled, Event, EventKind, Revision}; mod dependency_graph; @@ -303,16 +303,16 @@ impl Runtime { /// Tries to claim ownership of a transferred query where `thread_id` is the current thread and `query` /// is the query (that had its ownership transferred) to claim. /// - /// For this operation to be reasonable, the caller must ensure that the lock on `query` is not released + /// For this operation to be reasonable, the caller must ensure that the sync table lock on `query` is not released /// before this operation completes. pub(super) fn block_transferred( &self, query: DatabaseKeyIndex, current_id: ThreadId, ) -> BlockTransferredResult<'_> { - let mut dg = self.dependency_graph.lock(); + let dg = self.dependency_graph.lock(); - let owner_thread = dg.resolved_transferred_id(query, None); + let owner_thread = dg.thread_id_of_transferred_query(query, None); let Some(owner_thread_id) = owner_thread else { // The query transferred its ownership but the owner has since then released the lock. @@ -333,8 +333,7 @@ impl Runtime { } /// Invoked when this runtime completed computing `database_key` with - /// the given result `wait_result` (`wait_result` should be `None` if - /// computing `database_key` panicked and could not complete). + /// the given result `wait_result`. /// This function unblocks any dependent queries and allows them /// to continue executing. pub(crate) fn unblock_queries_blocked_on( @@ -347,34 +346,49 @@ impl Runtime { .unblock_runtimes_blocked_on(database_key, wait_result); } + /// Unblocks all transferred queries that are owned by `database_key` recursively. + /// + /// Invoked when a query completes that has been marked as transfer target (it has + /// queries that transferred their lock ownership to it) with the given `wait_result`. + /// + /// This function unblocks any dependent queries and allows them to continue executing. The + /// query `database_key` is not unblocked by this function. #[cold] - pub(crate) fn unblock_transferred_queries( + pub(crate) fn unblock_transferred_queries_owned_by( &self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { self.dependency_graph .lock() - .unblock_transferred_queries(database_key, wait_result); + .unblock_runtimes_blocked_on_transferred_queries_owned_by(database_key, wait_result); } + /// Removes the ownership transfer of `query`'s lock if it exists. + /// + /// If `query` has transferred its lock ownership to another query, this function will remove that transfer, + /// so that `query` now owns its lock again. #[cold] - pub(super) fn remove_transferred(&self, query: DatabaseKeyIndex) { - self.dependency_graph.lock().remove_transferred(query); + pub(super) fn undo_transfer_lock(&self, query: DatabaseKeyIndex) { + self.dependency_graph.lock().undo_transfer_lock(query); } + /// Transfers ownership of the lock for `query` to `new_owner_key`. + /// + /// For this operation to be reasonable, the caller must ensure that the sync table lock on `query` is not released + /// and that `new_owner_key` is currently blocked on `query`. Otherwise, `new_owner_key` might + /// complete before the lock is transferred, leaving `query` locked forever. pub(super) fn transfer_lock( &self, query: DatabaseKeyIndex, - current_thread: ThreadId, - new_owner: DatabaseKeyIndex, - new_owner_thread: SyncOwnerId, + new_owner_key: DatabaseKeyIndex, + new_owner_id: SyncOwnerId, ) { self.dependency_graph.lock().transfer_lock( query, - current_thread, - new_owner, - new_owner_thread, + thread::current().id(), + new_owner_key, + new_owner_id, ); } diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 8be482c11..7e1dad1f5 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -17,7 +17,7 @@ pub(super) struct DependencyGraph { /// `K` is blocked on some query executing in the runtime `V`. /// This encodes a graph that must be acyclic (or else deadlock /// will result). - edges: FxHashMap, + edges: Edges, /// Encodes the `ThreadId` that are blocked waiting for the result /// of a given query. @@ -28,13 +28,15 @@ pub(super) struct DependencyGraph { /// come here to fetch their results. wait_results: FxHashMap, - /// A `K -> Q` pair indicates that `K`'s lock is now owned by - /// `Q` (The thread id of `Q` and its database key) + /// A `K -> Q` pair indicates that the query `K`'s lock is now owned by the query + /// `Q`. It's important that `transferred` always forms a tree (must be acyclic), + /// or else deadlock will result. transferred: FxHashMap, - /// A `K -> Qs` pair indicates that `K`'s lock is now owned by - /// `Qs` (The thread id of `Qs` and their database keys) - transferred_dependents: FxHashMap>, + /// A `K -> [Q]` pair indicates that the query `K` owns the locks of + /// `Q`. This is the reverse mapping of `transferred` to allow efficient unlocking + /// of all dependent queries when `K` completes. + transferred_dependents: FxHashMap>, } impl DependencyGraph { @@ -42,23 +44,7 @@ impl DependencyGraph { /// /// (i.e., there is a path from `from_id` to `to_id` in the graph.) pub(super) fn depends_on(&self, from_id: ThreadId, to_id: ThreadId) -> bool { - Self::depends_on_impl(&self.edges, from_id, to_id) - } - - fn depends_on_impl( - edges: &FxHashMap, - from_id: ThreadId, - to_id: ThreadId, - ) -> bool { - let mut p = from_id; - while let Some(q) = edges.get(&p).map(|edge| edge.blocked_on_id) { - if q == to_id { - return true; - } - - p = q; - } - p == to_id + self.edges.depends_on(from_id, to_id) } /// Modifies the graph so that `from_id` is blocked @@ -157,7 +143,9 @@ impl DependencyGraph { edge.notify(); } - pub(super) fn unblock_transferred_queries( + /// Invoked when the query `database_key` completes and it owns the locks of other queries + /// (the queries transferred their locks to `database_key`). + pub(super) fn unblock_runtimes_blocked_on_transferred_queries_owned_by( &mut self, database_key: DatabaseKeyIndex, wait_result: WaitResult, @@ -168,37 +156,44 @@ impl DependencyGraph { wait_result: WaitResult, ) { me.transferred.remove(&query); - if let Some(transitive) = me.transferred_dependents.remove(&query) { - for query in transitive { - me.unblock_runtimes_blocked_on(query, wait_result); - unblock_recursive(me, query, wait_result); - } + + for query in me.transferred_dependents.remove(&query).unwrap_or_default() { + me.unblock_runtimes_blocked_on(query, wait_result); + unblock_recursive(me, query, wait_result); } } // If `database_key` is `c` and it has been transferred to `b` earlier, remove its entry. - tracing::trace!("unblock_transferred_queries({database_key:?}"); + tracing::trace!( + "unblock_runtimes_blocked_on_transferred_queries_owned_by({database_key:?}" + ); + if let Some((_, owner)) = self.transferred.remove(&database_key) { - let owner_dependents = self.transferred_dependents.get_mut(&owner).unwrap(); - let index = owner_dependents - .iter() - .position(|&x| x == database_key) - .unwrap(); - owner_dependents.swap_remove(index); + // If this query previously transferred its lock ownership to another query, remove + // it from that queries dependents as it is now completing. + self.transferred_dependents + .get_mut(&owner) + .unwrap() + .remove(&database_key); } unblock_recursive(self, database_key, wait_result); } - pub(super) fn remove_transferred(&mut self, database_key: DatabaseKeyIndex) { + pub(super) fn undo_transfer_lock(&mut self, database_key: DatabaseKeyIndex) { if let Some((_, owner)) = self.transferred.remove(&database_key) { - let dependents = self.transferred_dependents.get_mut(&owner).unwrap(); - let index = dependents.iter().position(|h| *h == database_key).unwrap(); - dependents.swap_remove(index); + self.transferred_dependents + .get_mut(&owner) + .unwrap() + .remove(&database_key); } } - pub(super) fn resolved_transferred_id( + /// Recursively resolves the thread id that currently owns the lock for `database_key`. + /// + /// Returns `None` if `database_key` hasn't (or has since then been released) transferred its lock + /// and the thread id must be looked up in the `SyncTable` instead. + pub(super) fn thread_id_of_transferred_query( &self, database_key: DatabaseKeyIndex, ignore: Option, @@ -218,50 +213,51 @@ impl DependencyGraph { Some(resolved_thread) } + /// Modifies the graph so that the lock on `query` (currently owned by `current_thread`) is + /// transferred to `new_owner` (which is owned by `new_owner_id`). pub(super) fn transfer_lock( &mut self, query: DatabaseKeyIndex, current_thread: ThreadId, new_owner: DatabaseKeyIndex, - new_owner_thread: SyncOwnerId, + new_owner_id: SyncOwnerId, ) { - let new_owner_thread = match new_owner_thread { + let new_owner_thread = match new_owner_id { SyncOwnerId::Thread(thread) => thread, - SyncOwnerId::Transferred => self - .resolved_transferred_id(new_owner, Some(query)) - .unwrap(), + SyncOwnerId::Transferred => { + // Skip over `query` to skip over any existing mapping from `new_owner` to `query` that may + // exist from previous transfers. + self.thread_id_of_transferred_query(new_owner, Some(query)) + .expect("new owner should be blocked on `query`") + } }; debug_assert!( - new_owner_thread == current_thread || self.depends_on(new_owner_thread, current_thread) + new_owner_thread == current_thread || self.depends_on(new_owner_thread, current_thread), + "new owner {new_owner:?} ({new_owner_thread:?}) must be blocked on {query:?} ({current_thread:?})" ); - let mut thread_changed = current_thread != new_owner_thread; - - match self.transferred.entry(query) { + let thread_changed = match self.transferred.entry(query) { std::collections::hash_map::Entry::Vacant(entry) => { // Transfer `c -> b` and there's no existing entry for `c`. entry.insert((new_owner_thread, new_owner)); + current_thread != new_owner_thread } std::collections::hash_map::Entry::Occupied(mut entry) => { - // If we transfer to the same owner as before, return immediately. + // If we transfer to the same owner as before, return immediately as this is a no-op. if entry.get() == &(new_owner_thread, new_owner) { return; } // `Transfer `c -> b` after a previous `c -> d` mapping. // Update the owner and remove the query from the old owner's dependents. - let (old_owner_thread, old_owner) = *entry.get(); - - // We simply assume here that the thread has changed because we'd have to walk the entire - // transferred chaine of `old_owner` to know if the thread has changed. This won't safe us much - // compared to just updating all dependent threads. - thread_changed = true; + let &(old_owner_thread, old_owner) = entry.get(); // For the example below, remove `d` from `b`'s dependents.` - let old_dependents = self.transferred_dependents.get_mut(&old_owner).unwrap(); - let index = old_dependents.iter().position(|key| *key == query).unwrap(); - old_dependents.swap_remove(index); + self.transferred_dependents + .get_mut(&old_owner) + .unwrap() + .remove(&query); entry.insert((new_owner_thread, new_owner)); @@ -290,13 +286,10 @@ impl DependencyGraph { ); // Remove `a` from the dependents of `d` and remove the mapping from `a -> d`. - let query_dependents = self.transferred_dependents.get_mut(&query).unwrap(); - let index = query_dependents - .iter() - .copied() - .position(|key| key == source) - .unwrap(); - query_dependents.swap_remove(index); + self.transferred_dependents + .get_mut(&query) + .unwrap() + .remove(&source); // if the old mapping was `c -> d` and we now insert `d -> c`, remove `d -> c` if old_owner == new_owner { @@ -305,10 +298,10 @@ impl DependencyGraph { // otherwise (when `d` pointed to some other query, e.g. `b` in the example), // add an edge from `a` to `b` entry.insert((old_owner_thread, old_owner)); - - let old_owner_dependents = - self.transferred_dependents.get_mut(&old_owner).unwrap(); - old_owner_dependents.push(source); + self.transferred_dependents + .get_mut(&old_owner) + .unwrap() + .push(source); } break; @@ -316,12 +309,16 @@ impl DependencyGraph { last_segment = self.transferred.entry(next_target); } + + // We simply assume here that the thread has changed because we'd have to walk the entire + // transferred chaine of `old_owner` to know if the thread has changed. This won't save us much + // compared to just updating all dependent threads. + true } }; // Register `c` as a dependent of `b`. let all_dependents = self.transferred_dependents.entry(new_owner).or_default(); - debug_assert!(!all_dependents.contains(&query)); debug_assert!(!all_dependents.contains(&new_owner)); all_dependents.push(query); @@ -340,8 +337,7 @@ impl DependencyGraph { while let Some(current) = queue.pop() { if let Some(dependents) = self.query_dependents.get_mut(¤t) { for (i, id) in dependents.iter().enumerate() { - if *id == new_owner_id || Self::depends_on_impl(&self.edges, new_owner_id, *id) - { + if *id == new_owner_id || self.edges.depends_on(new_owner_id, *id) { let thread_id = dependents.swap_remove(i); if dependents.is_empty() { self.query_dependents.remove(¤t); @@ -393,7 +389,7 @@ impl DependencyGraph { ); edge.blocked_on_id = new_owner_thread; debug_assert!( - !DependencyGraph::depends_on_impl(&self.edges, new_owner_thread, *dependent), + !&self.edges.depends_on(new_owner_thread, *dependent), "Circular reference between blocked edges: {:#?}", self.edges ); @@ -402,16 +398,102 @@ impl DependencyGraph { } } +#[derive(Debug, Default)] +struct Edges(FxHashMap); + +impl Edges { + fn depends_on(&self, from_id: ThreadId, to_id: ThreadId) -> bool { + let mut p = from_id; + while let Some(q) = self.0.get(&p).map(|edge| edge.blocked_on_id) { + if q == to_id { + return true; + } + + p = q; + } + p == to_id + } + + fn get_mut(&mut self, id: &ThreadId) -> Option<&mut edge::Edge> { + self.0.get_mut(id) + } + + fn contains_key(&self, id: &ThreadId) -> bool { + self.0.contains_key(id) + } + + fn insert(&mut self, id: ThreadId, edge: edge::Edge) { + self.0.insert(id, edge); + } + + fn remove(&mut self, id: &ThreadId) -> Option { + self.0.remove(id) + } +} + #[derive(Debug)] -pub(super) enum CanClaimTransferred { - /// Transferred can be claimed because the current thread is the owner or depends on the owner. - ImTheOwner, +struct SmallSet(SmallVec<[T; N]>); + +impl SmallSet +where + T: PartialEq, +{ + const fn new() -> Self { + Self(SmallVec::new_const()) + } + + fn push(&mut self, value: T) { + debug_assert!(!self.0.contains(&value)); + + self.0.push(value); + } + + fn contains(&self, value: &T) -> bool { + self.0.contains(value) + } + + fn remove(&mut self, value: &T) -> bool { + if let Some(index) = self.0.iter().position(|x| x == value) { + self.0.swap_remove(index); + true + } else { + false + } + } + + fn iter(&self) -> std::slice::Iter<'_, T> { + self.0.iter() + } +} - /// Transferred can't be claimed because it's owned by another thread. - OwnedBy(ThreadId), +impl IntoIterator for SmallSet { + type Item = T; + type IntoIter = smallvec::IntoIter<[T; N]>; - /// The query was transferred earlier but it has since then been released by the owning query. - Released, + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, const N: usize> IntoIterator for &'a SmallSet +where + T: PartialEq, +{ + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl Default for SmallSet +where + T: PartialEq, +{ + fn default() -> Self { + Self::new() + } } mod edge { From 92a9ab64d86ac3e03ef786c2bd29f4b5362726d5 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 11:10:29 +0200 Subject: [PATCH 27/45] More documentation, cleanups --- src/cycle.rs | 10 ++- src/runtime/dependency_graph.rs | 136 +++++++++++++++++++------------- 2 files changed, 87 insertions(+), 59 deletions(-) diff --git a/src/cycle.rs b/src/cycle.rs index 3d224c213..baeb60a5d 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -44,10 +44,12 @@ //! result in a stable, converged cycle. If it does not (that is, if the result of another //! iteration of the cycle is not the same as the fallback value), we'll panic. //! -//! In nested cycle cases, the inner cycle head will iterate until its own cycle is resolved, but -//! the "final" value it then returns will still be provisional on the outer cycle head. The outer -//! cycle head may then iterate, which may result in a new set of iterations on the inner cycle, -//! for each iteration of the outer cycle. +//! In nested cycle cases, the inner cycles are iterated as part of the outer cycle iteration. This helps +//! to significantly reduce the number of iterations needed to reach a fixpoint. For nested cycles, +//! the inner cycles head will transfer their lock ownership to the outer cycle. This ensures +//! that, over time, the outer cycle will hold all necessary locks to complete the fixpoint iteration. +//! Without this, different threads would compete for the locks of inner cycle heads, leading to potential +//! hangs (but not deadlocks). use thin_vec::{thin_vec, ThinVec}; diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 7e1dad1f5..366a98f53 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -1,7 +1,7 @@ use std::pin::Pin; use rustc_hash::FxHashMap; -use smallvec::{smallvec, SmallVec}; +use smallvec::SmallVec; use crate::function::SyncOwnerId; use crate::key::DatabaseKeyIndex; @@ -11,6 +11,9 @@ use crate::sync::thread::ThreadId; use crate::sync::MutexGuard; use crate::tracing; +type QueryDependents = FxHashMap>; +type TransferredDependents = FxHashMap>; + #[derive(Debug, Default)] pub(super) struct DependencyGraph { /// A `(K -> V)` pair in this map indicates that the runtime @@ -21,7 +24,7 @@ pub(super) struct DependencyGraph { /// Encodes the `ThreadId` that are blocked waiting for the result /// of a given query. - query_dependents: FxHashMap>, + query_dependents: QueryDependents, /// When a key K completes which had dependent queries Qs blocked on it, /// it stores its `WaitResult` here. As they wake up, each query Q in Qs will @@ -36,7 +39,7 @@ pub(super) struct DependencyGraph { /// A `K -> [Q]` pair indicates that the query `K` owns the locks of /// `Q`. This is the reverse mapping of `transferred` to allow efficient unlocking /// of all dependent queries when `K` completes. - transferred_dependents: FxHashMap>, + transferred_dependents: TransferredDependents, } impl DependencyGraph { @@ -332,69 +335,92 @@ impl DependencyGraph { /// Finds the one query in the dependents of the `source_query` (the one that is transferred to a new owner) /// on which the `new_owner_id` thread blocks on and unblocks it, to ensure progress. fn unblock_transfer_target(&mut self, source_query: DatabaseKeyIndex, new_owner_id: ThreadId) { - let mut queue: SmallVec<[_; 4]> = smallvec![source_query]; - - while let Some(current) = queue.pop() { - if let Some(dependents) = self.query_dependents.get_mut(¤t) { - for (i, id) in dependents.iter().enumerate() { - if *id == new_owner_id || self.edges.depends_on(new_owner_id, *id) { - let thread_id = dependents.swap_remove(i); - if dependents.is_empty() { - self.query_dependents.remove(¤t); - } - - self.unblock_runtime(thread_id, WaitResult::Completed); - - return; + /// Finds the thread that's currently blocking the `new_owner_id` thread. + /// + /// Returns `Some` if there's such a thread where the first element is the query + /// that the thread is blocked on (key into `query_dependents`) and the second element + /// is the index in the list of blocked threads (index into the `query_dependents` value) for that query. + fn find_blocked_thread( + me: &DependencyGraph, + query: DatabaseKeyIndex, + new_owner_id: ThreadId, + ) -> Option<(DatabaseKeyIndex, usize)> { + if let Some(blocked_threads) = me.query_dependents.get(&query) { + for (i, id) in blocked_threads.iter().copied().enumerate() { + if id == new_owner_id || me.edges.depends_on(new_owner_id, id) { + return Some((query, i)); } } - }; + } - queue.extend( - self.transferred_dependents - .get(¤t) - .iter() - .copied() - .flatten() - .copied(), - ); + me.transferred_dependents + .get(&query) + .iter() + .copied() + .flatten() + .find_map(|dependent| find_blocked_thread(me, *dependent, new_owner_id)) } - } - fn update_transferred_edges(&mut self, query: DatabaseKeyIndex, new_owner_thread: ThreadId) { - tracing::trace!("update_transferred_edges({query:?}"); + if let Some((query, query_dependents_index)) = + find_blocked_thread(self, source_query, new_owner_id) + { + let blocked_threads = self.query_dependents.get_mut(&query).unwrap(); - let mut queue: SmallVec<[_; 4]> = smallvec![query]; + let thread_id = blocked_threads.swap_remove(query_dependents_index); + if blocked_threads.is_empty() { + self.query_dependents.remove(&query); + } - while let Some(query) = queue.pop() { - queue.extend( - self.transferred_dependents - .get(&query) - .iter() - .copied() - .flatten() - .copied(), - ); - - let Some(dependents) = self.query_dependents.get_mut(&query) else { - continue; + self.unblock_runtime(thread_id, WaitResult::Completed); + } + } + + fn update_transferred_edges(&mut self, query: DatabaseKeyIndex, new_owner_thread: ThreadId) { + fn update_transferred_edges( + edges: &mut Edges, + query_dependents: &QueryDependents, + transferred_dependents: &TransferredDependents, + query: DatabaseKeyIndex, + new_owner_thread: ThreadId, + ) { + tracing::trace!("update_transferred_edges({query:?}"); + if let Some(dependents) = query_dependents.get(&query) { + for dependent in dependents.iter() { + let edge = edges.get_mut(dependent).unwrap(); + + tracing::trace!( + "Rewrite edge from {:?} to {new_owner_thread:?}", + edge.blocked_on_id + ); + edge.blocked_on_id = new_owner_thread; + debug_assert!( + !edges.depends_on(new_owner_thread, *dependent), + "Circular reference between blocked edges: {:#?}", + edges + ); + } }; - for dependent in dependents.iter_mut() { - let edge = self.edges.get_mut(dependent).unwrap(); - - tracing::trace!( - "Rewrite edge from {:?} to {new_owner_thread:?}", - edge.blocked_on_id - ); - edge.blocked_on_id = new_owner_thread; - debug_assert!( - !&self.edges.depends_on(new_owner_thread, *dependent), - "Circular reference between blocked edges: {:#?}", - self.edges - ); + if let Some(dependents) = transferred_dependents.get(&query) { + for dependent in dependents { + update_transferred_edges( + edges, + query_dependents, + transferred_dependents, + *dependent, + new_owner_thread, + ) + } } } + + update_transferred_edges( + &mut self.edges, + &self.query_dependents, + &self.transferred_dependents, + query, + new_owner_thread, + ) } } From 868fb50fa4c09d33fcfca7570adeaff31b17e3a8 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 11:16:40 +0200 Subject: [PATCH 28/45] Remove inline attribute --- src/function/execute.rs | 3 +++ src/function/sync.rs | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index 2e0a91671..20bc2495a 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -529,6 +529,9 @@ impl Drop for ClearCycleHeadIfPanicking<'_, C> { } } +/// Returns the key of any potential outer cycle head or `None` if there is no outer cycle. +/// +/// That is, any query that's currently blocked on the result computed by this query (claiming it results in a cycle). fn outer_cycle( zalsa: &Zalsa, zalsa_local: &ZalsaLocal, diff --git a/src/function/sync.rs b/src/function/sync.rs index 841c5e89f..26f3759c8 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -69,7 +69,6 @@ impl SyncTable { /// `REENTRANT` controls whether a query that transferred its ownership to another query for which /// this thread currently holds the lock for can be claimed. For example, if `a` transferred its ownership /// to `b`, and this thread holds the lock for `b`, then this thread can also claim `a` but only if `REENTRANT` is `true`. - #[inline] pub(crate) fn try_claim<'me, const REENTRANT: bool>( &'me self, zalsa: &'me Zalsa, From b34ea293371fda9ae0065ee5ae545df88e652919 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 13:07:01 +0200 Subject: [PATCH 29/45] Fix failing tracked structs test --- src/function/execute.rs | 20 ++++++++++++-------- src/function/maybe_changed_after.rs | 5 ++--- tests/cycle_tracked.rs | 2 +- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index 20bc2495a..7e56aceff 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -150,9 +150,8 @@ where let id = database_key_index.key_index(); // Our provisional value from the previous iteration, when doing fixpoint iteration. - // Initially it's set to None, because the initial provisional value is created lazily, - // only when a cycle is actually encountered. - let mut previous_memo: Option<&Memo<'db, C>> = None; + // This is different from `opt_old_memo` which might be from a different revision. + let mut last_provisional_memo: Option<&Memo<'db, C>> = None; // TODO: Can we seed those somehow? let mut last_stale_tracked_ids: Vec<(Identity, Id)> = Vec::new(); @@ -166,7 +165,8 @@ where && old_memo.cycle_heads().contains(&database_key_index) && !memo_iteration_count.is_panicked() { - previous_memo = Some(old_memo); + // BUG, we need to pass in previous memo even for the first iteration. + last_provisional_memo = Some(old_memo); iteration_count = memo_iteration_count; } } @@ -183,8 +183,12 @@ where // if they aren't recreated when reaching the final iteration. active_query.seed_tracked_struct_ids(&last_stale_tracked_ids); - let (mut new_value, mut completed_query) = - Self::execute_query(db, zalsa, active_query, previous_memo); + let (mut new_value, mut completed_query) = Self::execute_query( + db, + zalsa, + active_query, + last_provisional_memo.or(opt_old_memo), + ); // If there are no cycle heads, break out of the loop (`cycle_heads_mut` returns `None` if the cycle head list is empty) let Some(cycle_heads) = completed_query.revisions.cycle_heads_mut() else { @@ -262,7 +266,7 @@ where // Get the last provisional value for this query so that we can compare it with the new value // to test if the cycle converged. - let last_provisional_value = if let Some(last_provisional) = previous_memo { + let last_provisional_value = if let Some(last_provisional) = last_provisional_memo { // We have a last provisional value from our previous time around the loop. last_provisional.value.as_ref() } else { @@ -424,7 +428,7 @@ where memo_ingredient_index, ); - previous_memo = Some(new_memo); + last_provisional_memo = Some(new_memo); last_stale_tracked_ids = completed_query.stale_tracked_structs; active_query = zalsa_local.push_query(database_key_index, iteration_count); diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index 5795178d4..acd63d358 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -321,12 +321,11 @@ where } let last_changed = zalsa.last_changed_revision(memo.revisions.durability); - crate::tracing::debug!( - "{database_key_index:?}: check_durability(memo = {memo:#?}, last_changed={:?} <= verified_at={:?}) = {:?}", + crate::tracing::trace!( + "{database_key_index:?}: check_durability({database_key_index:#?}, last_changed={:?} <= verified_at={:?}) = {:?}", last_changed, verified_at, last_changed <= verified_at, - memo = memo.tracing_debug() ); if last_changed <= verified_at { // No input of the suitable durability has changed since last verified. diff --git a/tests/cycle_tracked.rs b/tests/cycle_tracked.rs index 154ba3370..2e0c2cfd0 100644 --- a/tests/cycle_tracked.rs +++ b/tests/cycle_tracked.rs @@ -269,7 +269,7 @@ fn cycle_recover_with_structs<'db>( CycleRecoveryAction::Iterate } -#[test] +#[test_log::test] fn test_cycle_with_fixpoint_structs() { let mut db = EventLoggerDatabase::default(); From 8d786aee0a92c6adfdf69a26c091766873655108 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 13:42:30 +0200 Subject: [PATCH 30/45] Fix panic --- src/function/execute.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index 7e56aceff..e361eb1d0 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -256,9 +256,11 @@ where // transfer it to the outermost cycle head (if any). This prevents any other thread // from claiming this query (all cycle heads are potential entry points to the same cycle), // which would result in them competing for the same locks (we want the locks to converge to a single cycle head). - claim_guard.set_release_mode(ReleaseMode::TransferTo( - outer_cycle.expect("query to of an outer cycle."), - )); + if let Some(outer_cycle) = outer_cycle { + claim_guard.set_release_mode(ReleaseMode::TransferTo(outer_cycle)); + } else { + claim_guard.set_release_mode(ReleaseMode::SelfOnly); + } completed_query.revisions.set_cycle_heads(cycle_heads); break (new_value, completed_query); From 5813326f80bf4160d6839e75aa410431878c8c2c Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 14:03:04 +0200 Subject: [PATCH 31/45] Fix persistence test --- src/function/execute.rs | 1 - src/zalsa_local.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index e361eb1d0..748326bb1 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -165,7 +165,6 @@ where && old_memo.cycle_heads().contains(&database_key_index) && !memo_iteration_count.is_panicked() { - // BUG, we need to pass in previous memo even for the first iteration. last_provisional_memo = Some(old_memo); iteration_count = memo_iteration_count; } diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 13701c85b..b4a92c399 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -566,6 +566,7 @@ struct QueryRevisionsExtraInner { /// Stores for nested cycle heads whether they've converged in the last iteration. /// This value is always `false` for other queries. + #[cfg_attr(feature = "persistence", serde(skip))] cycle_converged: bool, } From 81137dde6532b441084561baa56e61e8278cfd12 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 14:32:29 +0200 Subject: [PATCH 32/45] Add test for panic in nested cycle --- src/function/sync.rs | 4 + tests/parallel/cycle_nested_deep_panic.rs | 104 ++++++++++++++++++++++ tests/parallel/main.rs | 1 + 3 files changed, 109 insertions(+) create mode 100644 tests/parallel/cycle_nested_deep_panic.rs diff --git a/src/function/sync.rs b/src/function/sync.rs index 26f3759c8..6302186ed 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -243,6 +243,10 @@ impl<'me> ClaimGuard<'me> { fn release_panicking(&self) { let mut syncs = self.sync_table.syncs.lock(); let state = syncs.remove(&self.key_index).expect("key claimed twice?"); + tracing::debug!( + "Release claim on {:?} due to panic", + self.database_key_index() + ); self.release(state, WaitResult::Panicked); } diff --git a/tests/parallel/cycle_nested_deep_panic.rs b/tests/parallel/cycle_nested_deep_panic.rs new file mode 100644 index 000000000..8455cc194 --- /dev/null +++ b/tests/parallel/cycle_nested_deep_panic.rs @@ -0,0 +1,104 @@ +// Shuttle doesn't like panics inside of its runtime. +#![cfg(not(feature = "shuttle"))] + +//! Tests that salsa doesn't get stuck after a panic in a nested cycle function. + +use crate::sync::thread; +use crate::{Knobs, KnobsDatabase}; +use std::panic::catch_unwind; + +use salsa::CycleRecoveryAction; + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] +struct CycleValue(u32); + +const MIN: CycleValue = CycleValue(0); +const MAX: CycleValue = CycleValue(3); + +#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] +fn query_a(db: &dyn KnobsDatabase) -> CycleValue { + db.signal(1); + query_b(db) +} + +#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] +fn query_b(db: &dyn KnobsDatabase) -> CycleValue { + let c_value = query_c(db); + CycleValue(c_value.0 + 1).min(MAX) +} + +#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] +fn query_c(db: &dyn KnobsDatabase) -> CycleValue { + let d_value = query_d(db); + + if d_value > CycleValue(0) { + let _e_value = query_e(db); + let _b = query_b(db); + db.wait_for(2); + db.signal(3); + panic!("Dragons are real"); + } else { + let a_value = query_a(db); + CycleValue(d_value.0.max(a_value.0)) + } +} + +#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] +fn query_d(db: &dyn KnobsDatabase) -> CycleValue { + query_c(db) +} + +#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] +fn query_e(db: &dyn KnobsDatabase) -> CycleValue { + query_c(db) +} + +fn cycle_fn( + _db: &dyn KnobsDatabase, + _value: &CycleValue, + _count: u32, +) -> CycleRecoveryAction { + CycleRecoveryAction::Iterate +} + +fn initial(_db: &dyn KnobsDatabase) -> CycleValue { + MIN +} + +#[test_log::test] +fn the_test() { + tracing::debug!("Starting new run"); + let db_t1 = Knobs::default(); + let db_t2 = db_t1.clone(); + let db_t3 = db_t1.clone(); + let db_t4 = db_t1.clone(); + + let t1 = thread::spawn(move || { + let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); + + let result = query_a(&db_t1); + result + }); + let t2 = thread::spawn(move || { + let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); + db_t4.wait_for(1); + db_t4.signal(2); + query_b(&db_t4) + }); + let t3 = thread::spawn(move || { + let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); + db_t2.wait_for(1); + query_d(&db_t2) + }); + + let r_t1 = t1.join(); + let r_t2 = t2.join(); + let r_t3 = t3.join(); + + assert!(r_t1.is_err()); + assert!(r_t2.is_err()); + assert!(r_t3.is_err()); + + // Pulling the cycle again at a later point should still result in a panic. + assert!(catch_unwind(|| query_d(&db_t3)).is_err()); +} diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs index 6c450faa1..d2c780787 100644 --- a/tests/parallel/main.rs +++ b/tests/parallel/main.rs @@ -9,6 +9,7 @@ mod cycle_ab_peeping_c; mod cycle_nested_deep; mod cycle_nested_deep_conditional; mod cycle_nested_deep_conditional_changed; +mod cycle_nested_deep_panic; mod cycle_nested_three_threads; mod cycle_nested_three_threads_changed; mod cycle_panic; From 282382aea1da4039f1db19dbf72a558365f1bcf1 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 14:56:19 +0200 Subject: [PATCH 33/45] Allow cycle initial values same-stack --- src/function/maybe_changed_after.rs | 30 ++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index acd63d358..c8db219db 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -372,15 +372,6 @@ where return true; } - // Always return `false` if this is a cycle initial memo (or the last provisional memo in an iteration) - // as this value has obviously not finished computing yet. - if cycle_heads - .iter() - .all(|head| head.database_key_index == database_key_index) - { - return false; - } - crate::tracing::trace!( "{database_key_index:?}: validate_may_be_provisional(memo = {memo:#?})", memo = memo.tracing_debug() @@ -475,11 +466,11 @@ where &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, - database_key_index: DatabaseKeyIndex, + memo_database_key_index: DatabaseKeyIndex, memo_verified_at: Revision, cycle_heads: &CycleHeads, ) -> bool { - crate::tracing::trace!("validate_same_iteration({database_key_index:?})",); + crate::tracing::trace!("validate_same_iteration({memo_database_key_index:?})",); // This is an optimization to avoid unnecessary re-execution within the same revision. // Don't apply it when verifying memos from past revisions. We want them to re-execute @@ -488,6 +479,23 @@ where return false; } + // Always return `false` for cycle initial values "unless" they are running in the same thread. + if cycle_heads + .iter() + .all(|head| head.database_key_index == memo_database_key_index) + { + let on_stack = unsafe { + zalsa_local.with_query_stack_unchecked(|stack| { + stack + .iter() + .rev() + .any(|query| query.database_key_index == memo_database_key_index) + }) + }; + + return on_stack; + } + let cycle_heads_iter = TryClaimCycleHeadsIter::new(zalsa, zalsa_local, cycle_heads); for cycle_head in cycle_heads_iter { From d6f0f20a05af11569bc71c055d3e902d37ead5dc Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 15:03:56 +0200 Subject: [PATCH 34/45] Try inlining fetch --- src/function/fetch.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 43a5352f1..ff106d290 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -13,6 +13,7 @@ impl IngredientImpl where C: Configuration, { + #[inline] pub fn fetch<'db>( &'db self, db: &'db C::DbView, From e532693bd959a75dfbf782fbbde952ca300a2ac4 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 15:36:52 +0200 Subject: [PATCH 35/45] Remove some inline attributes --- src/function/fetch.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/function/fetch.rs b/src/function/fetch.rs index ff106d290..9ec829e48 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -96,7 +96,6 @@ where } } - #[inline(never)] fn fetch_cold_with_retry<'db>( &'db self, zalsa: &'db Zalsa, From 14c64ee81e29139cd9c8c862cdfdb8e6411d5d90 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 15:38:30 +0200 Subject: [PATCH 36/45] Add safety comment --- src/function/maybe_changed_after.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index c8db219db..ed212fe62 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -484,6 +484,7 @@ where .iter() .all(|head| head.database_key_index == memo_database_key_index) { + // SAFETY: We do not access the query stack reentrantly. let on_stack = unsafe { zalsa_local.with_query_stack_unchecked(|stack| { stack From bb97d96f3bebcff42fca1b460157e9f8ca719f49 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Fri, 10 Oct 2025 15:46:22 +0200 Subject: [PATCH 37/45] Clippy --- tests/parallel/cycle_nested_deep_panic.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/parallel/cycle_nested_deep_panic.rs b/tests/parallel/cycle_nested_deep_panic.rs index 8455cc194..0ec79f5e7 100644 --- a/tests/parallel/cycle_nested_deep_panic.rs +++ b/tests/parallel/cycle_nested_deep_panic.rs @@ -76,8 +76,7 @@ fn the_test() { let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); - let result = query_a(&db_t1); - result + query_a(&db_t1) }); let t2 = thread::spawn(move || { let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); From 7bb93a9b3977e304e5e0c5a627088a450f45f5cd Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 11 Oct 2025 09:31:39 +0200 Subject: [PATCH 38/45] Panic if `provisional_retry` runs too many times --- src/function/fetch.rs | 13 +++- src/function/memo.rs | 10 +++ src/function/sync.rs | 2 +- tests/parallel/cycle_nested_deep_panic.rs | 79 ++++++++++++++++------- 4 files changed, 77 insertions(+), 27 deletions(-) diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 9ec829e48..7c0f3ba17 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -58,11 +58,19 @@ where id: Id, ) -> &'db Memo<'db, C> { let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); + let mut retry_count = 0; loop { if let Some(memo) = self .fetch_hot(zalsa, id, memo_ingredient_index) .or_else(|| { - self.fetch_cold_with_retry(zalsa, zalsa_local, db, id, memo_ingredient_index) + self.fetch_cold_with_retry( + zalsa, + zalsa_local, + db, + id, + memo_ingredient_index, + &mut retry_count, + ) }) { return memo; @@ -103,6 +111,7 @@ where db: &'db C::DbView, id: Id, memo_ingredient_index: MemoIngredientIndex, + retry_count: &mut u32, ) -> Option<&'db Memo<'db, C>> { let memo = self.fetch_cold(zalsa, zalsa_local, db, id, memo_ingredient_index)?; @@ -114,7 +123,7 @@ where // That is only correct for fixpoint cycles, though: `FallbackImmediate` cycles // never have provisional entries. if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate - || !memo.provisional_retry(zalsa, zalsa_local, self.database_key_index(id)) + || !memo.provisional_retry(zalsa, zalsa_local, self.database_key_index(id), retry_count) { Some(memo) } else { diff --git a/src/function/memo.rs b/src/function/memo.rs index fe91f6ba7..302ca73c3 100644 --- a/src/function/memo.rs +++ b/src/function/memo.rs @@ -144,6 +144,7 @@ impl<'db, C: Configuration> Memo<'db, C> { zalsa: &Zalsa, zalsa_local: &ZalsaLocal, database_key_index: DatabaseKeyIndex, + retry_count: &mut u32, ) -> bool { if self.block_on_heads(zalsa, zalsa_local) { // If we get here, we are a provisional value of @@ -151,6 +152,15 @@ impl<'db, C: Configuration> Memo<'db, C> { // returned to caller to allow fixpoint iteration to proceed. false } else { + assert!( + *retry_count <= 20000, + "Provisional memo retry limit exceeded for {database_key_index:?}; \ + this usually indicates a bug in salsa's cycle caching/locking. \ + (retried {retry_count} times)", + ); + + *retry_count += 1; + // all our cycle heads are complete; re-fetch // and we should get a non-provisional memo. crate::tracing::debug!( diff --git a/src/function/sync.rs b/src/function/sync.rs index 6302186ed..3ac8b1c86 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -219,7 +219,7 @@ pub enum SyncOwnerId { /// Marks an active 'claim' in the synchronization map. The claim is /// released when this value is dropped. #[must_use] -pub struct ClaimGuard<'me> { +pub(crate) struct ClaimGuard<'me> { key_index: Id, zalsa: &'me Zalsa, sync_table: &'me SyncTable, diff --git a/tests/parallel/cycle_nested_deep_panic.rs b/tests/parallel/cycle_nested_deep_panic.rs index 0ec79f5e7..42e9bcdfd 100644 --- a/tests/parallel/cycle_nested_deep_panic.rs +++ b/tests/parallel/cycle_nested_deep_panic.rs @@ -5,6 +5,7 @@ use crate::sync::thread; use crate::{Knobs, KnobsDatabase}; +use std::fmt; use std::panic::catch_unwind; use salsa::CycleRecoveryAction; @@ -17,7 +18,6 @@ const MAX: CycleValue = CycleValue(3); #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { - db.signal(1); query_b(db) } @@ -27,16 +27,14 @@ fn query_b(db: &dyn KnobsDatabase) -> CycleValue { CycleValue(c_value.0 + 1).min(MAX) } -#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] +#[salsa::tracked] fn query_c(db: &dyn KnobsDatabase) -> CycleValue { let d_value = query_d(db); if d_value > CycleValue(0) { - let _e_value = query_e(db); - let _b = query_b(db); - db.wait_for(2); - db.signal(3); - panic!("Dragons are real"); + let e_value = query_e(db); + let b_value = query_b(db); + CycleValue(d_value.0.max(e_value.0).max(b_value.0)) } else { let a_value = query_a(db); CycleValue(d_value.0.max(a_value.0)) @@ -45,7 +43,7 @@ fn query_c(db: &dyn KnobsDatabase) -> CycleValue { #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_d(db: &dyn KnobsDatabase) -> CycleValue { - query_c(db) + query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] @@ -75,29 +73,62 @@ fn the_test() { let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); - - query_a(&db_t1) + catch_unwind(|| { + db_t1.wait_for(1); + query_a(&db_t1) + }) }); let t2 = thread::spawn(move || { - let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); - db_t4.wait_for(1); - db_t4.signal(2); - query_b(&db_t4) + let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); + catch_unwind(|| { + db_t2.wait_for(1); + + query_b(&db_t2) + }) }); let t3 = thread::spawn(move || { - let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); - db_t2.wait_for(1); - query_d(&db_t2) + let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered(); + catch_unwind(|| { + db_t3.signal(2); + query_d(&db_t3) + }) }); - let r_t1 = t1.join(); - let r_t2 = t2.join(); - let r_t3 = t3.join(); + let r_t1 = t1.join().unwrap(); + let r_t2 = t2.join().unwrap(); + let r_t3 = t3.join().unwrap(); - assert!(r_t1.is_err()); - assert!(r_t2.is_err()); - assert!(r_t3.is_err()); + assert_is_set_cycle_error(r_t1); + assert_is_set_cycle_error(r_t2); + assert_is_set_cycle_error(r_t3); // Pulling the cycle again at a later point should still result in a panic. - assert!(catch_unwind(|| query_d(&db_t3)).is_err()); + assert_is_set_cycle_error(catch_unwind(|| query_d(&db_t4))); +} + +#[track_caller] +fn assert_is_set_cycle_error(result: Result>) +where + T: fmt::Debug, +{ + let err = result.expect_err("expected an error"); + + if let Some(message) = err.downcast_ref::<&str>() { + assert!( + message.contains("set cycle_fn/cycle_initial to fixpoint iterate"), + "Expected error message to contain 'set cycle_fn/cycle_initial to fixpoint iterate', but got: {}", + message + ); + } else if let Some(message) = err.downcast_ref::() { + assert!( + message.contains("set cycle_fn/cycle_initial to fixpoint iterate"), + "Expected error message to contain 'set cycle_fn/cycle_initial to fixpoint iterate', but got: {}", + message + ); + } else if let Some(_) = err.downcast_ref::() { + // This is okay, because Salsa throws a Cancelled::PropagatedPanic when a panic occurs in a query + // that it blocks on. + } else { + std::panic::resume_unwind(err); + } } From c5cc3ddd8a13da4432bb51f061ee689069645875 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 11 Oct 2025 16:51:14 +0200 Subject: [PATCH 39/45] Better handling of panics in cycles --- src/cycle.rs | 1 + src/function/execute.rs | 33 ++++++++++++++++------- tests/parallel/cycle_nested_deep_panic.rs | 12 ++++++--- 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/src/cycle.rs b/src/cycle.rs index baeb60a5d..4a7c6ea47 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -342,6 +342,7 @@ impl CycleHeads { database_key_index: DatabaseKeyIndex, iteration_count: IterationCount, ) -> bool { + assert!(!iteration_count.is_panicked()); if let Some(existing) = self .0 .iter_mut() diff --git a/src/function/execute.rs b/src/function/execute.rs index 748326bb1..e1cf6a4b5 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -8,10 +8,11 @@ use crate::function::{ClaimGuard, Configuration, IngredientImpl}; use crate::ingredient::WaitForResult; use crate::plumbing::ZalsaLocal; use crate::sync::atomic::{AtomicBool, Ordering}; -use crate::tracing; +use crate::sync::thread; use crate::tracked_struct::Identity; use crate::zalsa::{MemoIngredientIndex, Zalsa}; use crate::zalsa_local::{ActiveQueryGuard, QueryRevisions}; +use crate::{tracing, Cancelled}; use crate::{DatabaseKeyIndex, Event, EventKind, Id}; impl IngredientImpl @@ -144,6 +145,8 @@ where zalsa_local: &'db ZalsaLocal, memo_ingredient_index: MemoIngredientIndex, ) -> (C::Output<'db>, CompletedQuery) { + claim_guard.set_release_mode(ReleaseMode::Default); + let database_key_index = claim_guard.database_key_index(); let zalsa = claim_guard.zalsa(); @@ -155,23 +158,33 @@ where // TODO: Can we seed those somehow? let mut last_stale_tracked_ids: Vec<(Identity, Id)> = Vec::new(); - let _guard = ClearCycleHeadIfPanicking::new(self, zalsa, id, memo_ingredient_index); let mut iteration_count = IterationCount::initial(); if let Some(old_memo) = opt_old_memo { - let memo_iteration_count = old_memo.revisions.iteration(); - if old_memo.verified_at.load() == zalsa.current_revision() && old_memo.cycle_heads().contains(&database_key_index) - && !memo_iteration_count.is_panicked() { + let memo_iteration_count = old_memo.revisions.iteration(); + + // The `DependencyGraph` locking propagates panics when another thread is blocked on a panicking query. + // However, the locking doesn't handle the case where a thread fetches the result of a panicking cycle head query **after** all locks were released. + // That's what we do here. We could consider re-executing the entire cycle but: + // a) It's tricky to ensure that all queries participating in the cycle will re-execute + // (we can't rely on `iteration_count` being updated for nested cycles because the nested cycles may have completed successfully). + // b) It's guaranteed that this query will panic again anyway. + // That's why we simply propagate the panic here. It simplifies our lives and it also avoids duplicate panic messages. + if memo_iteration_count.is_panicked() { + ::tracing::warn!("Propagating panic for cycle head that panicked in an earlier execution in that revision"); + Cancelled::PropagatedPanic.throw(); + } last_provisional_memo = Some(old_memo); iteration_count = memo_iteration_count; } } + let _poison_guard = + PoisonProvisionalIfPanicking::new(self, zalsa, id, memo_ingredient_index); let mut active_query = zalsa_local.push_query(database_key_index, iteration_count); - claim_guard.set_release_mode(ReleaseMode::Default); let (new_value, completed_query) = loop { // Tracked struct ids that existed in the previous revision @@ -496,14 +509,14 @@ where /// a new fix point initial value if that happens. /// /// We could insert a fixpoint initial value here, but it seems unnecessary. -struct ClearCycleHeadIfPanicking<'a, C: Configuration> { +struct PoisonProvisionalIfPanicking<'a, C: Configuration> { ingredient: &'a IngredientImpl, zalsa: &'a Zalsa, id: Id, memo_ingredient_index: MemoIngredientIndex, } -impl<'a, C: Configuration> ClearCycleHeadIfPanicking<'a, C> { +impl<'a, C: Configuration> PoisonProvisionalIfPanicking<'a, C> { fn new( ingredient: &'a IngredientImpl, zalsa: &'a Zalsa, @@ -519,9 +532,9 @@ impl<'a, C: Configuration> ClearCycleHeadIfPanicking<'a, C> { } } -impl Drop for ClearCycleHeadIfPanicking<'_, C> { +impl Drop for PoisonProvisionalIfPanicking<'_, C> { fn drop(&mut self) { - if std::thread::panicking() { + if thread::panicking() { let revisions = QueryRevisions::fixpoint_initial( self.ingredient.database_key_index(self.id), IterationCount::panicked(), diff --git a/tests/parallel/cycle_nested_deep_panic.rs b/tests/parallel/cycle_nested_deep_panic.rs index 42e9bcdfd..98512fbd2 100644 --- a/tests/parallel/cycle_nested_deep_panic.rs +++ b/tests/parallel/cycle_nested_deep_panic.rs @@ -63,8 +63,7 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } -#[test_log::test] -fn the_test() { +fn run() { tracing::debug!("Starting new run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); @@ -106,6 +105,13 @@ fn the_test() { assert_is_set_cycle_error(catch_unwind(|| query_d(&db_t4))); } +#[test_log::test] +fn the_test() { + for _ in 0..200 { + run() + } +} + #[track_caller] fn assert_is_set_cycle_error(result: Result>) where @@ -125,7 +131,7 @@ where "Expected error message to contain 'set cycle_fn/cycle_initial to fixpoint iterate', but got: {}", message ); - } else if let Some(_) = err.downcast_ref::() { + } else if err.downcast_ref::().is_some() { // This is okay, because Salsa throws a Cancelled::PropagatedPanic when a panic occurs in a query // that it blocks on. } else { From 7b39c2423a2491a9eb5b2858342c1dbb7463c7e5 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 11 Oct 2025 17:09:10 +0200 Subject: [PATCH 40/45] Don't use const-generic for `REENTRANT` --- src/function.rs | 4 ++-- src/function/fetch.rs | 4 ++-- src/function/maybe_changed_after.rs | 4 ++-- src/function/sync.rs | 27 ++++++++++++++++++++++----- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/function.rs b/src/function.rs index e99cc29db..3bc329808 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, ClaimResult, SyncGuard, SyncOwnerId, SyncTable}; +pub(crate) use sync::{ClaimGuard, ClaimResult, Reentrant, SyncGuard, SyncOwnerId, SyncTable}; use std::any::Any; use std::fmt; @@ -416,7 +416,7 @@ where /// * [`WaitResult::Cycle`] Claiming the `key_index` results in a cycle because it's on the current's thread query stack or /// running on another thread that is blocked on this thread. fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { - match self.sync_table.try_claim::(zalsa, key_index) { + match self.sync_table.try_claim(zalsa, key_index, Reentrant::Deny) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), ClaimResult::Cycle { inner } => WaitForResult::Cycle { inner }, ClaimResult::Claimed(_) => WaitForResult::Available, diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 7c0f3ba17..776bedcbe 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -4,7 +4,7 @@ use crate::cycle::{CycleHeads, CycleRecoveryStrategy, IterationCount}; use crate::function::maybe_changed_after::VerifyCycleHeads; use crate::function::memo::Memo; use crate::function::sync::ClaimResult; -use crate::function::{Configuration, IngredientImpl}; +use crate::function::{Configuration, IngredientImpl, Reentrant}; use crate::zalsa::{MemoIngredientIndex, Zalsa}; use crate::zalsa_local::{QueryRevisions, ZalsaLocal}; use crate::{DatabaseKeyIndex, Id}; @@ -141,7 +141,7 @@ where ) -> Option<&'db Memo<'db, C>> { let database_key_index = self.database_key_index(id); // Try to claim this query: if someone else has claimed it already, go back and start again. - let claim_guard = match self.sync_table.try_claim::(zalsa, id) { + let claim_guard = match self.sync_table.try_claim(zalsa, id, Reentrant::Allow) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index ed212fe62..bbe62a608 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -5,7 +5,7 @@ use crate::accumulator::accumulated_map::InputAccumulatedValues; use crate::cycle::{CycleHeads, CycleRecoveryStrategy, ProvisionalStatus}; use crate::function::memo::{Memo, TryClaimCycleHeadsIter, TryClaimHeadsResult}; use crate::function::sync::ClaimResult; -use crate::function::{Configuration, IngredientImpl}; +use crate::function::{Configuration, IngredientImpl, Reentrant}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::Ordering; @@ -141,7 +141,7 @@ where ) -> Option { let database_key_index = self.database_key_index(key_index); - let claim_guard = match self.sync_table.try_claim::(zalsa, key_index) { + let claim_guard = match self.sync_table.try_claim(zalsa, key_index, Reentrant::Deny) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); diff --git a/src/function/sync.rs b/src/function/sync.rs index 3ac8b1c86..faa867d43 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -69,10 +69,11 @@ impl SyncTable { /// `REENTRANT` controls whether a query that transferred its ownership to another query for which /// this thread currently holds the lock for can be claimed. For example, if `a` transferred its ownership /// to `b`, and this thread holds the lock for `b`, then this thread can also claim `a` but only if `REENTRANT` is `true`. - pub(crate) fn try_claim<'me, const REENTRANT: bool>( + pub(crate) fn try_claim<'me>( &'me self, zalsa: &'me Zalsa, key_index: Id, + reentrant: Reentrant, ) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { @@ -80,8 +81,7 @@ impl SyncTable { let id = match occupied_entry.get().id { SyncOwnerId::Thread(id) => id, SyncOwnerId::Transferred => { - return match self.try_claim_transferred::(zalsa, occupied_entry) - { + return match self.try_claim_transferred(zalsa, occupied_entry, reentrant) { Ok(claimed) => claimed, Err(other_thread) => match other_thread.block(write) { BlockResult::Cycle => ClaimResult::Cycle { inner: false }, @@ -131,10 +131,11 @@ impl SyncTable { #[cold] #[inline(never)] - fn try_claim_transferred<'me, const REENTRANT: bool>( + fn try_claim_transferred<'me>( &'me self, zalsa: &'me Zalsa, mut entry: OccupiedEntry, + reentrant: Reentrant, ) -> Result, Box>> { let key_index = *entry.key(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); @@ -144,7 +145,7 @@ impl SyncTable { .runtime() .block_transferred(database_key_index, thread_id) { - BlockTransferredResult::ImTheOwner if REENTRANT => { + BlockTransferredResult::ImTheOwner if reentrant.is_allow() => { let SyncState { id, claimed_twice, .. } = entry.into_mut(); @@ -407,3 +408,19 @@ impl std::fmt::Debug for ClaimGuard<'_> { .finish_non_exhaustive() } } + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(crate) enum Reentrant { + /// Allow `try_claim` to reclaim a query's that transferred its ownership to a query + /// hold by this thread. + Allow, + + /// Only allow claiming queries that haven't been claimed by any thread. + Deny, +} + +impl Reentrant { + const fn is_allow(self) -> bool { + matches!(self, Reentrant::Allow) + } +} From 054c469920cb3a353c6bbf7e887536cd960fa832 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 11 Oct 2025 17:56:50 +0200 Subject: [PATCH 41/45] More nit improvements --- src/cancelled.rs | 1 + src/cycle.rs | 23 +++++++++++++++++++++-- src/function.rs | 18 ++++++++++-------- src/function/execute.rs | 32 ++++++++++++++++---------------- src/function/sync.rs | 14 ++++++++------ src/zalsa_local.rs | 41 ++++++++++++++++++++++++++++++++++++++++- tests/parallel/main.rs | 2 +- 7 files changed, 97 insertions(+), 34 deletions(-) diff --git a/src/cancelled.rs b/src/cancelled.rs index 2f2f315d9..3c31bae5a 100644 --- a/src/cancelled.rs +++ b/src/cancelled.rs @@ -20,6 +20,7 @@ pub enum Cancelled { } impl Cancelled { + #[cold] pub(crate) fn throw(self) -> ! { // We use resume and not panic here to avoid running the panic // hook (that is, to avoid collecting and printing backtrace). diff --git a/src/cycle.rs b/src/cycle.rs index 4a7c6ea47..74995755e 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -51,6 +51,7 @@ //! Without this, different threads would compete for the locks of inner cycle heads, leading to potential //! hangs (but not deadlocks). +use std::iter::FusedIterator; use thin_vec::{thin_vec, ThinVec}; use crate::key::DatabaseKeyIndex; @@ -268,14 +269,17 @@ impl CycleHeads { } /// Iterates over all cycle heads that aren't equal to `own`. - pub(crate) fn iter_not_eq(&self, own: DatabaseKeyIndex) -> impl Iterator { + pub(crate) fn iter_not_eq( + &self, + own: DatabaseKeyIndex, + ) -> impl DoubleEndedIterator { self.iter() .filter(move |head| head.database_key_index != own) } pub(crate) fn contains(&self, value: &DatabaseKeyIndex) -> bool { self.into_iter() - .any(|head| head.database_key_index == *value && !head.removed.load(Ordering::Relaxed)) + .any(|head| head.database_key_index == *value) } /// Removes all cycle heads except `except` by marking them as removed. @@ -438,6 +442,21 @@ impl<'a> Iterator for CycleHeadsIterator<'a> { } } +impl FusedIterator for CycleHeadsIterator<'_> {} +impl DoubleEndedIterator for CycleHeadsIterator<'_> { + fn next_back(&mut self) -> Option { + loop { + let next = self.inner.next_back()?; + + if next.removed.load(Ordering::Relaxed) { + continue; + } + + return Some(next); + } + } +} + impl<'a> std::iter::IntoIterator for &'a CycleHeads { type Item = &'a CycleHead; type IntoIter = CycleHeadsIterator<'a>; diff --git a/src/function.rs b/src/function.rs index 3bc329808..7499b575b 100644 --- a/src/function.rs +++ b/src/function.rs @@ -92,16 +92,18 @@ pub trait Configuration: Any { /// Decide whether to iterate a cycle again or fallback. `value` is the provisional return /// value from the latest iteration of this cycle. `count` is the number of cycle iterations - /// we've already completed. + /// completed so far. /// - /// Note: There is no guarantee that `count` always starts at 0. It's possible that - /// the function is called with a non-zero value even if it is the first time around for - /// this specific query if the query has become the outermost cycle of a larger cycle. - /// In this case, Salsa uses the `count` value of the already iterating cycle as the start. + /// # Iteration count semantics /// - /// It's also not guaranteed that `count` values are contiguous. The function might not be called - /// if this query converged in this specific iteration OR if the query only participates conditionally - /// in the cycle (e.g. every other iteration). + /// The `count` parameter isn't guaranteed to start from zero or to be contiguous: + /// + /// * **Initial value**: `count` may be non-zero on the first call for a given query if that + /// query becomes the outermost cycle head after a nested cycle complete a few iterations. In this case, + /// `count` continues from the nested cycle's iteration count rather than resetting to zero. + /// * **Non-contiguous values**: This function isn't called if this cycle is part of an outer cycle + /// and the value for this query remains unchanged for one iteration. But the outer cycle might + /// keep iterating because other heads keep changing. fn recover_from_cycle<'db>( db: &'db Self::DbView, value: &Self::Output<'db>, diff --git a/src/function/execute.rs b/src/function/execute.rs index e1cf6a4b5..7465e0849 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -155,9 +155,9 @@ where // Our provisional value from the previous iteration, when doing fixpoint iteration. // This is different from `opt_old_memo` which might be from a different revision. let mut last_provisional_memo: Option<&Memo<'db, C>> = None; + // TODO: Can we seed those somehow? let mut last_stale_tracked_ids: Vec<(Identity, Id)> = Vec::new(); - let mut iteration_count = IterationCount::initial(); if let Some(old_memo) = opt_old_memo { @@ -556,23 +556,23 @@ fn outer_cycle( cycle_heads: &CycleHeads, current_key: DatabaseKeyIndex, ) -> Option { + // SAFETY: We don't call into with_query_stack recursively + if let Some(on_stack) = unsafe { + zalsa_local.with_query_stack_unchecked(|stack| { + cycle_heads.iter_not_eq(current_key).rfind(|query| { + stack + .iter() + .rev() + .any(|active_query| active_query.database_key_index == query.database_key_index) + }) + }) + } { + return Some(on_stack.database_key_index); + } + cycle_heads .iter_not_eq(current_key) - .find(|head| { - // SAFETY: We don't call into with_query_stack recursively - let is_on_stack = unsafe { - zalsa_local.with_query_stack_unchecked(|stack| { - stack - .iter() - .rev() - .any(|query| query.database_key_index == head.database_key_index) - }) - }; - - if is_on_stack { - return true; - } - + .rfind(|head| { let ingredient = zalsa.lookup_ingredient(head.database_key_index.ingredient_index()); matches!( diff --git a/src/function/sync.rs b/src/function/sync.rs index faa867d43..09225cde7 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -26,8 +26,8 @@ pub(crate) enum ClaimResult<'a> { /// Claiming the query results in a cycle. Cycle { /// `true` if this is a cycle with an inner query. For example, if `a` transferred its ownership to - /// `b`. If the thread claiming `b` tries to claim `a`, then this results in a cycle unless - /// `REENTRANT` is `true` (in which case it can be claimed). + /// `b`. If the thread claiming `b` tries to claim `a`, then this results in a cycle except when calling + /// [`SyncTable::try_claim`] with [`Reentrant::Allow`]. inner: bool, }, /// Successfully claimed the query. @@ -65,10 +65,6 @@ impl SyncTable { } /// Claims the given key index, or blocks if it is running on another thread. - /// - /// `REENTRANT` controls whether a query that transferred its ownership to another query for which - /// this thread currently holds the lock for can be claimed. For example, if `a` transferred its ownership - /// to `b`, and this thread holds the lock for `b`, then this thread can also claim `a` but only if `REENTRANT` is `true`. pub(crate) fn try_claim<'me>( &'me self, zalsa: &'me Zalsa, @@ -241,6 +237,7 @@ impl<'me> ClaimGuard<'me> { } #[cold] + #[inline(never)] fn release_panicking(&self) { let mut syncs = self.sync_table.syncs.lock(); let state = syncs.remove(&self.key_index).expect("key claimed twice?"); @@ -409,6 +406,11 @@ impl std::fmt::Debug for ClaimGuard<'_> { } } +/// Controls whether this thread can claim a query that transferred its ownership to a query +/// this thread currently holds the lock for. +/// +/// For example: if query `a` transferred its ownership to query `b`, and this thread holds +/// the lock for `b`, then this thread can also claim `a` — but only when using [`Self::Allow`]. #[derive(Copy, Clone, PartialEq, Eq)] pub(crate) enum Reentrant { /// Allow `try_claim` to reclaim a query's that transferred its ownership to a query diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index b4a92c399..70bf29a07 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -1,4 +1,6 @@ use std::cell::{RefCell, UnsafeCell}; +use std::fmt; +use std::fmt::Formatter; use std::panic::UnwindSafe; use std::ptr::{self, NonNull}; @@ -522,7 +524,6 @@ impl QueryRevisionsExtra { } } -#[derive(Debug)] #[cfg_attr(feature = "persistence", derive(serde::Serialize, serde::Deserialize))] struct QueryRevisionsExtraInner { #[cfg(feature = "accumulator")] @@ -590,6 +591,44 @@ impl QueryRevisionsExtraInner { } } +impl fmt::Debug for QueryRevisionsExtraInner { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + struct FmtTrackedStructIds<'a>(&'a ThinVec<(Identity, Id)>); + + impl fmt::Debug for FmtTrackedStructIds<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut f = f.debug_list(); + + if self.0.len() > 5 { + f.entries(&self.0[..5]); + f.finish_non_exhaustive() + } else { + f.entries(self.0); + f.finish() + } + } + } + + let mut f = f.debug_struct("QueryRevisionsExtraInner"); + + f.field("cycle_heads", &self.cycle_heads) + .field("iteration", &self.iteration) + .field("cycle_converged", &self.cycle_converged); + + #[cfg(feature = "accumulator")] + { + f.field("accumulated", &self.accumulated); + } + + f.field( + "tracked_struct_ids", + &FmtTrackedStructIds(&self.tracked_struct_ids), + ); + + f.finish() + } +} + #[cfg(not(feature = "shuttle"))] #[cfg(target_pointer_width = "64")] const _: [(); std::mem::size_of::()] = [(); std::mem::size_of::<[usize; 4]>()]; diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs index d2c780787..c015f7ec0 100644 --- a/tests/parallel/main.rs +++ b/tests/parallel/main.rs @@ -34,7 +34,7 @@ pub(crate) mod sync { pub use shuttle::thread; pub fn check(f: impl Fn() + Send + Sync + 'static) { - shuttle::check_pct(f, 10000, 50); + shuttle::check_pct(f, 1000, 50); } } From f89c430644e15efe0ec74ef6108e045c98d0db80 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sat, 11 Oct 2025 18:00:24 +0200 Subject: [PATCH 42/45] Remove `IterationCount::panicked` --- src/cycle.rs | 14 -------------- src/function/execute.rs | 8 +++----- src/function/fetch.rs | 3 +-- src/zalsa_local.rs | 9 +++------ tests/parallel/main.rs | 2 +- 5 files changed, 8 insertions(+), 28 deletions(-) diff --git a/src/cycle.rs b/src/cycle.rs index 74995755e..c9a9b82c1 100644 --- a/src/cycle.rs +++ b/src/cycle.rs @@ -154,19 +154,6 @@ impl IterationCount { self.0 == 0 } - /// Iteration count reserved for panicked cycles. - /// - /// Using a special iteration count ensures that `validate_same_iteration` and `validate_provisional` - /// return `false` for queries depending on this panicked cycle, because the iteration count is guaranteed - /// to be different (which isn't guaranteed if the panicked memo uses [`Self::initial`]). - pub(crate) const fn panicked() -> Self { - Self(u8::MAX) - } - - pub(crate) const fn is_panicked(self) -> bool { - self.0 == u8::MAX - } - pub(crate) const fn increment(self) -> Option { let next = Self(self.0 + 1); if next.0 <= MAX_ITERATIONS.0 { @@ -346,7 +333,6 @@ impl CycleHeads { database_key_index: DatabaseKeyIndex, iteration_count: IterationCount, ) -> bool { - assert!(!iteration_count.is_panicked()); if let Some(existing) = self .0 .iter_mut() diff --git a/src/function/execute.rs b/src/function/execute.rs index 7465e0849..09606cb36 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -173,7 +173,7 @@ where // (we can't rely on `iteration_count` being updated for nested cycles because the nested cycles may have completed successfully). // b) It's guaranteed that this query will panic again anyway. // That's why we simply propagate the panic here. It simplifies our lives and it also avoids duplicate panic messages. - if memo_iteration_count.is_panicked() { + if old_memo.value.is_none() { ::tracing::warn!("Propagating panic for cycle head that panicked in an earlier execution in that revision"); Cancelled::PropagatedPanic.throw(); } @@ -535,10 +535,8 @@ impl<'a, C: Configuration> PoisonProvisionalIfPanicking<'a, C> { impl Drop for PoisonProvisionalIfPanicking<'_, C> { fn drop(&mut self) { if thread::panicking() { - let revisions = QueryRevisions::fixpoint_initial( - self.ingredient.database_key_index(self.id), - IterationCount::panicked(), - ); + let revisions = + QueryRevisions::fixpoint_initial(self.ingredient.database_key_index(self.id)); let memo = Memo::new(None, self.zalsa.current_revision(), revisions); self.ingredient diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 776bedcbe..2285bbd2e 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -273,8 +273,7 @@ where "hit cycle at {database_key_index:#?}, \ inserting and returning fixpoint initial value" ); - let revisions = - QueryRevisions::fixpoint_initial(database_key_index, IterationCount::initial()); + let revisions = QueryRevisions::fixpoint_initial(database_key_index); let initial_value = C::cycle_initial(db, C::id_to_input(zalsa, id)); self.insert_memo( zalsa, diff --git a/src/zalsa_local.rs b/src/zalsa_local.rs index 70bf29a07..39d0c489c 100644 --- a/src/zalsa_local.rs +++ b/src/zalsa_local.rs @@ -639,10 +639,7 @@ const _: [(); std::mem::size_of::()] = [(); std::mem::size_of::<[usize; if cfg!(feature = "accumulator") { 7 } else { 3 }]>()]; impl QueryRevisions { - pub(crate) fn fixpoint_initial( - query: DatabaseKeyIndex, - iteration_count: IterationCount, - ) -> Self { + pub(crate) fn fixpoint_initial(query: DatabaseKeyIndex) -> Self { Self { changed_at: Revision::start(), durability: Durability::MAX, @@ -654,8 +651,8 @@ impl QueryRevisions { #[cfg(feature = "accumulator")] AccumulatedMap::default(), ThinVec::default(), - CycleHeads::initial(query, iteration_count), - iteration_count, + CycleHeads::initial(query, IterationCount::initial()), + IterationCount::initial(), ), } } diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs index c015f7ec0..d2c780787 100644 --- a/tests/parallel/main.rs +++ b/tests/parallel/main.rs @@ -34,7 +34,7 @@ pub(crate) mod sync { pub use shuttle::thread; pub fn check(f: impl Fn() + Send + Sync + 'static) { - shuttle::check_pct(f, 1000, 50); + shuttle::check_pct(f, 10000, 50); } } From e2fda865c097754096102d4fbe989e1781744d4a Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Sun, 12 Oct 2025 07:34:11 +0200 Subject: [PATCH 43/45] Prefer outer most cycles in `outer_cycle` --- src/function/execute.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/function/execute.rs b/src/function/execute.rs index 09606cb36..b48b3250d 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -554,20 +554,25 @@ fn outer_cycle( cycle_heads: &CycleHeads, current_key: DatabaseKeyIndex, ) -> Option { + // First, look for the outer most cycle head on the same thread. + // Using the outer most over the inner most should reduce the need + // for transitive transfers. // SAFETY: We don't call into with_query_stack recursively - if let Some(on_stack) = unsafe { + if let Some(same_thread) = unsafe { zalsa_local.with_query_stack_unchecked(|stack| { - cycle_heads.iter_not_eq(current_key).rfind(|query| { - stack - .iter() - .rev() - .any(|active_query| active_query.database_key_index == query.database_key_index) - }) + stack + .iter() + .find(|active_query| { + cycle_heads.contains(&active_query.database_key_index) + && active_query.database_key_index != current_key + }) + .map(|active_query| active_query.database_key_index) }) } { - return Some(on_stack.database_key_index); + return Some(same_thread); } + // Check for any outer cycle head running on a different thread. cycle_heads .iter_not_eq(current_key) .rfind(|head| { From 00f5f7ffbde2d5183b1c205f9a28011b7aa538ab Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 16 Oct 2025 10:43:20 +0200 Subject: [PATCH 44/45] Code review feedback --- src/function.rs | 9 ++++--- src/function/execute.rs | 11 +++++---- src/function/fetch.rs | 4 +-- src/function/maybe_changed_after.rs | 7 ++++-- src/function/sync.rs | 38 ++++++++++++++++------------- src/ingredient.rs | 2 +- src/runtime.rs | 4 +-- src/runtime/dependency_graph.rs | 11 +++++---- src/tracing.rs | 8 +++++- tests/parallel/main.rs | 2 +- 10 files changed, 57 insertions(+), 39 deletions(-) diff --git a/src/function.rs b/src/function.rs index 7499b575b..259dff14b 100644 --- a/src/function.rs +++ b/src/function.rs @@ -1,5 +1,5 @@ pub(crate) use maybe_changed_after::{VerifyCycleHeads, VerifyResult}; -pub(crate) use sync::{ClaimGuard, ClaimResult, Reentrant, SyncGuard, SyncOwnerId, SyncTable}; +pub(crate) use sync::{ClaimGuard, ClaimResult, Reentrancy, SyncGuard, SyncOwner, SyncTable}; use std::any::Any; use std::fmt; @@ -400,7 +400,7 @@ where memo.revisions.cycle_converged() } - fn mark_as_transfer_target(&self, key_index: Id) -> Option { + fn mark_as_transfer_target(&self, key_index: Id) -> Option { self.sync_table.mark_as_transfer_target(key_index) } @@ -418,7 +418,10 @@ where /// * [`WaitResult::Cycle`] Claiming the `key_index` results in a cycle because it's on the current's thread query stack or /// running on another thread that is blocked on this thread. fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { - match self.sync_table.try_claim(zalsa, key_index, Reentrant::Deny) { + match self + .sync_table + .try_claim(zalsa, key_index, Reentrancy::Deny) + { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), ClaimResult::Cycle { inner } => WaitForResult::Cycle { inner }, ClaimResult::Claimed(_) => WaitForResult::Available, diff --git a/src/function/execute.rs b/src/function/execute.rs index b48b3250d..67f76e145 100644 --- a/src/function/execute.rs +++ b/src/function/execute.rs @@ -167,14 +167,15 @@ where let memo_iteration_count = old_memo.revisions.iteration(); // The `DependencyGraph` locking propagates panics when another thread is blocked on a panicking query. - // However, the locking doesn't handle the case where a thread fetches the result of a panicking cycle head query **after** all locks were released. - // That's what we do here. We could consider re-executing the entire cycle but: + // However, the locking doesn't handle the case where a thread fetches the result of a panicking + // cycle head query **after** all locks were released. That's what we do here. + // We could consider re-executing the entire cycle but: // a) It's tricky to ensure that all queries participating in the cycle will re-execute // (we can't rely on `iteration_count` being updated for nested cycles because the nested cycles may have completed successfully). // b) It's guaranteed that this query will panic again anyway. // That's why we simply propagate the panic here. It simplifies our lives and it also avoids duplicate panic messages. if old_memo.value.is_none() { - ::tracing::warn!("Propagating panic for cycle head that panicked in an earlier execution in that revision"); + tracing::warn!("Propagating panic for cycle head that panicked in an earlier execution in that revision"); Cancelled::PropagatedPanic.throw(); } last_provisional_memo = Some(old_memo); @@ -228,7 +229,7 @@ where // t1: a (completes `b` with `c` in heads) // // Note how `a` only depends on `c` but not `a`. This is because `a` only saw the initial value of `c` and wasn't updated when `c` completed. - // That's why we need to resolve the cycle heads recursively to `cycle_heads` contains all cycle heads at the moment this query completed. + // That's why we need to resolve the cycle heads recursively so `cycle_heads` contains all cycle heads at the moment this query completed. for head in &cycle_heads { max_iteration_count = max_iteration_count.max(head.iteration_count.load()); depends_on_self |= head.database_key_index == database_key_index; @@ -397,7 +398,7 @@ where // The fixpoint iteration hasn't converged. Iterate again... iteration_count = iteration_count.increment().unwrap_or_else(|| { - ::tracing::warn!("{database_key_index:?}: execute: too many cycle iterations"); + tracing::warn!("{database_key_index:?}: execute: too many cycle iterations"); panic!("{database_key_index:?}: execute: too many cycle iterations") }); diff --git a/src/function/fetch.rs b/src/function/fetch.rs index 2285bbd2e..ef42708a7 100644 --- a/src/function/fetch.rs +++ b/src/function/fetch.rs @@ -4,7 +4,7 @@ use crate::cycle::{CycleHeads, CycleRecoveryStrategy, IterationCount}; use crate::function::maybe_changed_after::VerifyCycleHeads; use crate::function::memo::Memo; use crate::function::sync::ClaimResult; -use crate::function::{Configuration, IngredientImpl, Reentrant}; +use crate::function::{Configuration, IngredientImpl, Reentrancy}; use crate::zalsa::{MemoIngredientIndex, Zalsa}; use crate::zalsa_local::{QueryRevisions, ZalsaLocal}; use crate::{DatabaseKeyIndex, Id}; @@ -141,7 +141,7 @@ where ) -> Option<&'db Memo<'db, C>> { let database_key_index = self.database_key_index(id); // Try to claim this query: if someone else has claimed it already, go back and start again. - let claim_guard = match self.sync_table.try_claim(zalsa, id, Reentrant::Allow) { + let claim_guard = match self.sync_table.try_claim(zalsa, id, Reentrancy::Allow) { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); diff --git a/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs index bbe62a608..698285055 100644 --- a/src/function/maybe_changed_after.rs +++ b/src/function/maybe_changed_after.rs @@ -5,7 +5,7 @@ use crate::accumulator::accumulated_map::InputAccumulatedValues; use crate::cycle::{CycleHeads, CycleRecoveryStrategy, ProvisionalStatus}; use crate::function::memo::{Memo, TryClaimCycleHeadsIter, TryClaimHeadsResult}; use crate::function::sync::ClaimResult; -use crate::function::{Configuration, IngredientImpl, Reentrant}; +use crate::function::{Configuration, IngredientImpl, Reentrancy}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::Ordering; @@ -141,7 +141,10 @@ where ) -> Option { let database_key_index = self.database_key_index(key_index); - let claim_guard = match self.sync_table.try_claim(zalsa, key_index, Reentrant::Deny) { + let claim_guard = match self + .sync_table + .try_claim(zalsa, key_index, Reentrancy::Deny) + { ClaimResult::Claimed(guard) => guard, ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); diff --git a/src/function/sync.rs b/src/function/sync.rs index 09225cde7..97a36262c 100644 --- a/src/function/sync.rs +++ b/src/function/sync.rs @@ -35,8 +35,8 @@ pub(crate) enum ClaimResult<'a> { } pub(crate) struct SyncState { - /// The thread id that is owning this query (actively executing it or iterating it as part of a larger cycle). - id: SyncOwnerId, + /// The thread id that currently owns this query (actively executing it or iterating it as part of a larger cycle). + id: SyncOwner, /// Set to true if any other queries are blocked, /// waiting for this query to complete. @@ -51,7 +51,7 @@ pub(crate) struct SyncState { /// Whether this query has been claimed by the query that currently owns it. /// /// If `a` has been transferred to `b` and the stack for t1 is `b -> a`, then `a` can be claimed - /// and `claimed_transferred` is set to `true`. However, t2 won't be able to claim `a` because + /// and `claimed_twice` is set to `true`. However, t2 won't be able to claim `a` because /// it doesn't own `b`. claimed_twice: bool, } @@ -69,14 +69,14 @@ impl SyncTable { &'me self, zalsa: &'me Zalsa, key_index: Id, - reentrant: Reentrant, + reentrant: Reentrancy, ) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { std::collections::hash_map::Entry::Occupied(occupied_entry) => { let id = match occupied_entry.get().id { - SyncOwnerId::Thread(id) => id, - SyncOwnerId::Transferred => { + SyncOwner::Thread(id) => id, + SyncOwner::Transferred => { return match self.try_claim_transferred(zalsa, occupied_entry, reentrant) { Ok(claimed) => claimed, Err(other_thread) => match other_thread.block(write) { @@ -110,7 +110,7 @@ impl SyncTable { } std::collections::hash_map::Entry::Vacant(vacant_entry) => { vacant_entry.insert(SyncState { - id: SyncOwnerId::Thread(thread::current().id()), + id: SyncOwner::Thread(thread::current().id()), anyone_waiting: false, is_transfer_target: false, claimed_twice: false, @@ -131,7 +131,7 @@ impl SyncTable { &'me self, zalsa: &'me Zalsa, mut entry: OccupiedEntry, - reentrant: Reentrant, + reentrant: Reentrancy, ) -> Result, Box>> { let key_index = *entry.key(); let database_key_index = DatabaseKeyIndex::new(self.ingredient, key_index); @@ -147,7 +147,7 @@ impl SyncTable { } = entry.into_mut(); debug_assert!(!*claimed_twice); - *id = SyncOwnerId::Thread(thread_id); + *id = SyncOwner::Thread(thread_id); *claimed_twice = true; Ok(ClaimResult::Claimed(ClaimGuard { @@ -164,7 +164,7 @@ impl SyncTable { } BlockTransferredResult::Released => { entry.insert(SyncState { - id: SyncOwnerId::Thread(thread_id), + id: SyncOwner::Thread(thread_id), anyone_waiting: false, is_transfer_target: false, claimed_twice: false, @@ -185,9 +185,13 @@ impl SyncTable { /// /// Note: The result of this method will immediately become stale unless the thread owning `key_index` /// is currently blocked on this thread (claiming `key_index` from this thread results in a cycle). - pub(super) fn mark_as_transfer_target(&self, key_index: Id) -> Option { + pub(super) fn mark_as_transfer_target(&self, key_index: Id) -> Option { let mut syncs = self.syncs.lock(); syncs.get_mut(&key_index).map(|state| { + // We set `anyone_waiting` to true because it is used in `ClaimGuard::release` + // to exit early if the query doesn't need to release any locks. + // However, there are now dependent queries that need to be released, that's why we set `anyone_waiting` to true, + // so that `ClaimGuard::release` no longer exits early. state.anyone_waiting = true; state.is_transfer_target = true; @@ -197,7 +201,7 @@ impl SyncTable { } #[derive(Copy, Clone, Debug)] -pub enum SyncOwnerId { +pub enum SyncOwner { /// Query is owned by this thread Thread(thread::ThreadId), @@ -287,7 +291,7 @@ impl<'me> ClaimGuard<'me> { if state.get().claimed_twice { state.get_mut().claimed_twice = false; - state.get_mut().id = SyncOwnerId::Transferred; + state.get_mut().id = SyncOwner::Transferred; } else { self.release(state.remove(), WaitResult::Completed); } @@ -333,7 +337,7 @@ impl<'me> ClaimGuard<'me> { .runtime() .transfer_lock(self_key, new_owner, new_owner_thread_id); - *id = SyncOwnerId::Transferred; + *id = SyncOwner::Transferred; *claimed_twice = false; } } @@ -412,7 +416,7 @@ impl std::fmt::Debug for ClaimGuard<'_> { /// For example: if query `a` transferred its ownership to query `b`, and this thread holds /// the lock for `b`, then this thread can also claim `a` — but only when using [`Self::Allow`]. #[derive(Copy, Clone, PartialEq, Eq)] -pub(crate) enum Reentrant { +pub(crate) enum Reentrancy { /// Allow `try_claim` to reclaim a query's that transferred its ownership to a query /// hold by this thread. Allow, @@ -421,8 +425,8 @@ pub(crate) enum Reentrant { Deny, } -impl Reentrant { +impl Reentrancy { const fn is_allow(self) -> bool { - matches!(self, Reentrant::Allow) + matches!(self, Reentrancy::Allow) } } diff --git a/src/ingredient.rs b/src/ingredient.rs index 7999b08fb..9b377e4d1 100644 --- a/src/ingredient.rs +++ b/src/ingredient.rs @@ -104,7 +104,7 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { /// /// Note: The returned `SyncOwnerId` may be outdated as soon as this function returns **unless** /// it's guaranteed that `_key_index` is blocked on the current thread. - fn mark_as_transfer_target(&self, _key_index: Id) -> Option { + fn mark_as_transfer_target(&self, _key_index: Id) -> Option { unreachable!("mark_as_transfer_target should only be called on functions"); } diff --git a/src/runtime.rs b/src/runtime.rs index e1f4aadf2..670d6d62f 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,6 +1,6 @@ use self::dependency_graph::DependencyGraph; use crate::durability::Durability; -use crate::function::{SyncGuard, SyncOwnerId}; +use crate::function::{SyncGuard, SyncOwner}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::thread::{self, ThreadId}; @@ -382,7 +382,7 @@ impl Runtime { &self, query: DatabaseKeyIndex, new_owner_key: DatabaseKeyIndex, - new_owner_id: SyncOwnerId, + new_owner_id: SyncOwner, ) { self.dependency_graph.lock().transfer_lock( query, diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs index 366a98f53..403f7c544 100644 --- a/src/runtime/dependency_graph.rs +++ b/src/runtime/dependency_graph.rs @@ -3,7 +3,7 @@ use std::pin::Pin; use rustc_hash::FxHashMap; use smallvec::SmallVec; -use crate::function::SyncOwnerId; +use crate::function::SyncOwner; use crate::key::DatabaseKeyIndex; use crate::runtime::dependency_graph::edge::EdgeCondvar; use crate::runtime::WaitResult; @@ -223,11 +223,11 @@ impl DependencyGraph { query: DatabaseKeyIndex, current_thread: ThreadId, new_owner: DatabaseKeyIndex, - new_owner_id: SyncOwnerId, + new_owner_id: SyncOwner, ) { let new_owner_thread = match new_owner_id { - SyncOwnerId::Thread(thread) => thread, - SyncOwnerId::Transferred => { + SyncOwner::Thread(thread) => thread, + SyncOwner::Transferred => { // Skip over `query` to skip over any existing mapping from `new_owner` to `query` that may // exist from previous transfers. self.thread_id_of_transferred_query(new_owner, Some(query)) @@ -267,7 +267,7 @@ impl DependencyGraph { // If we have `c -> a -> d` and we now insert a mapping `d -> c`, rewrite the mapping to // `d -> c -> a` to avoid cycles. // - // Or, starting with `e -> c -> a -> d -> b` insert `d -> c`. We need to respine the tree to + // Or, starting with `e -> c -> a -> d -> b` insert `d -> c`. We need to rewrite the tree to // ``` // e -> c -> a -> b // d / @@ -547,6 +547,7 @@ mod edge { /// Signalled whenever a query with dependents completes. /// Allows those dependents to check if they are ready to unblock. + /// `condvar: unsafe<'stack_frame> Pin<&'stack_frame Condvar>` condvar: Pin<&'static EdgeCondvar>, } diff --git a/src/tracing.rs b/src/tracing.rs index d8b13e471..6d3ae8851 100644 --- a/src/tracing.rs +++ b/src/tracing.rs @@ -7,6 +7,12 @@ macro_rules! trace { }; } +macro_rules! warn_event { + ($($x:tt)*) => { + crate::tracing::event!(WARN, $($x)*) + }; +} + macro_rules! info { ($($x:tt)*) => { crate::tracing::event!(INFO, $($x)*) @@ -59,4 +65,4 @@ macro_rules! span { } #[expect(unused_imports)] -pub(crate) use {debug, debug_span, event, info, info_span, span, trace}; +pub(crate) use {debug, debug_span, event, info, info_span, span, trace, warn_event as warn}; diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs index d2c780787..6bc89d2a2 100644 --- a/tests/parallel/main.rs +++ b/tests/parallel/main.rs @@ -34,7 +34,7 @@ pub(crate) mod sync { pub use shuttle::thread; pub fn check(f: impl Fn() + Send + Sync + 'static) { - shuttle::check_pct(f, 10000, 50); + shuttle::check_pct(f, 2500, 50); } } From 9293c3da80afff13fdb755968610040c999a4796 Mon Sep 17 00:00:00 2001 From: Micha Reiser Date: Thu, 16 Oct 2025 11:12:36 +0200 Subject: [PATCH 45/45] Iterate only once in panic test when running with miri --- tests/parallel/cycle_nested_deep_panic.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/parallel/cycle_nested_deep_panic.rs b/tests/parallel/cycle_nested_deep_panic.rs index 98512fbd2..8b89f362a 100644 --- a/tests/parallel/cycle_nested_deep_panic.rs +++ b/tests/parallel/cycle_nested_deep_panic.rs @@ -107,7 +107,9 @@ fn run() { #[test_log::test] fn the_test() { - for _ in 0..200 { + let count = if cfg!(miri) { 1 } else { 200 }; + + for _ in 0..count { run() } }