diff --git a/src/context_deserialize.rs b/src/context_deserialize.rs index 7642f68..9214a85 100644 --- a/src/context_deserialize.rs +++ b/src/context_deserialize.rs @@ -1,4 +1,4 @@ -use crate::{List, Value, Vector}; +use crate::{List, ProgressiveList, Value, Vector}; use context_deserialize::ContextDeserialize; use serde::de::Deserializer; use typenum::Unsigned; @@ -43,3 +43,23 @@ where }) } } + +impl<'de, C, T> ContextDeserialize<'de, C> for ProgressiveList +where + T: ContextDeserialize<'de, C> + Value, + C: Clone, +{ + fn context_deserialize(deserializer: D, context: C) -> Result + where + D: Deserializer<'de>, + { + // First deserialize as a Vec. + // This is not the most efficient implementation as it allocates a temporary Vec. In future + // we could write a more performant implementation using `ProgressiveList::builder()`. + let vec = Vec::::context_deserialize(deserializer, context)?; + + // Then convert to List, which will check the length. + ProgressiveList::try_from(vec) + .map_err(|e| serde::de::Error::custom(format!("Failed to create List: {:?}", e))) + } +} diff --git a/src/lib.rs b/src/lib.rs index c09d0fd..2d08b79 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,6 +12,8 @@ pub mod level_iter; pub mod list; pub mod mem; pub mod packed_leaf; +pub mod prog_tree; +pub mod progressive_list; mod repeat; pub mod serde; mod tests; @@ -29,6 +31,7 @@ pub use interface::ImmList; pub use leaf::Leaf; pub use list::List; pub use packed_leaf::PackedLeaf; +pub use progressive_list::ProgressiveList; pub use tree::Tree; pub use triomphe::Arc; pub use update_map::UpdateMap; diff --git a/src/prog_tree.rs b/src/prog_tree.rs new file mode 100644 index 0000000..31edd0e --- /dev/null +++ b/src/prog_tree.rs @@ -0,0 +1,285 @@ +use crate::{ + Arc, Error, Tree, Value, + builder::Builder, + iter::Iter, + utils::{Length, opt_packing_factor}, +}; +use educe::Educe; +use ethereum_hashing::hash32_concat; +use parking_lot::RwLock; +use tree_hash::Hash256; + +/// The size of each binary subtree in a progressive tree is `4^prog_depth` at depth `prog_depth`. +const PROG_TREE_EXPONENT: usize = 4; + +/// This scaling factor is used to convert between a 4-based progressive depth and a 2-based +/// depth for a binary subtree. +/// +/// It is defined such that the binary subtree at progressive depth `prog_depth` has depth +/// `PROG_TREE_BINARY_SCALE * prog_depth`. This comes from this equation: +/// +/// PROG_TREE_EXPONENT^prog_depth = 2^binary_depth +/// +/// Hence: +/// +/// binary_depth = log2(PROG_TREE_EXPONENT^prog_depth) +/// +/// Knowing PROG_TREE_EXPONENT is `2^k` for some `k`, this becomes: +/// +/// binary_depth = log2(2^(k * prog_depth)) +/// = k * prog_depth +/// +/// This `k` is the scaling factor, equal to `log2(PROG_TREE_EXPONENT)`. +const PROG_TREE_BINARY_SCALE: usize = PROG_TREE_EXPONENT.trailing_zeros() as usize; + +/// Tree type for the implementation of `ProgressiveList`. +#[derive(Debug, Educe)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[educe(PartialEq(bound(T: Value)), Hash)] +pub enum ProgTree { + ProgZero, + ProgNode { + #[educe(PartialEq(ignore), Hash(ignore))] + #[cfg_attr(feature = "arbitrary", arbitrary(with = crate::utils::arb_rwlock))] + hash: RwLock, + #[cfg_attr(feature = "arbitrary", arbitrary(with = crate::utils::arb_arc))] + left: Arc, + #[cfg_attr(feature = "arbitrary", arbitrary(with = crate::utils::arb_arc))] + right: Arc>, + }, +} + +impl ProgTree { + pub fn empty() -> Self { + Self::ProgZero + } + + /// The number of values that can be stored in the single subtree at `prog_depth` itself. + pub fn capacity_at_depth(prog_depth: u32) -> usize { + let capacity_pre_packing = match prog_depth.checked_sub(1) { + None => 0, + Some(depth_minus_one) => PROG_TREE_EXPONENT.pow(depth_minus_one), + }; + capacity_pre_packing * opt_packing_factor::().unwrap_or(1) + } + + /// The number of values that be stored in the whole progressive tree up to and including + /// the layer at `prog_depth`. + pub fn total_capacity_at_depth(prog_depth: u32) -> usize { + let total_capacity_pre_packing = + PROG_TREE_EXPONENT.pow(prog_depth).saturating_sub(1) / (PROG_TREE_EXPONENT - 1); + total_capacity_pre_packing * opt_packing_factor::().unwrap_or(1) + } + + /// Calculate the depth for the binary subtree at `prog_depth`. + pub fn prog_depth_to_binary_depth(prog_depth: u32) -> usize { + match prog_depth.checked_sub(1) { + None => 0, + Some(prog_depth_minus_one) => { + // FIXME: work out why we don't need to sub the packing depth here, seems weird + PROG_TREE_BINARY_SCALE * prog_depth_minus_one as usize + } + } + } + + // TODO: add a bulk builder + fn push_recursive( + &self, + value: T, + current_length: usize, + prog_depth: u32, + ) -> Result { + match self { + // Expand this zero into a new right node for our element. + Self::ProgZero => { + // The `prog_depth` of the new right subtree is `prog_depth + 1`. + let subtree_depth = Self::prog_depth_to_binary_depth(prog_depth + 1); + let mut tree_builder = Builder::::new(subtree_depth, 0)?; + tree_builder.push(value)?; + let (new_right, _, _) = tree_builder.finish()?; + + Ok(Self::ProgNode { + hash: RwLock::new(Hash256::ZERO), + left: Arc::new(Self::ProgZero), + right: new_right, + }) + } + Self::ProgNode { + hash: _, + left, + right, + } => { + // Case 1: new element already fits inside the right-tree at prog_depth + 1. + let total_capacity_at_depth = Self::total_capacity_at_depth(prog_depth + 1); + if current_length < total_capacity_at_depth { + let index = + current_length.saturating_sub(Self::total_capacity_at_depth(prog_depth)); + + // Our right subtree can hold 4^prog_depth entries. We need to work out + // a 2-based depth for this sub tree, such that the subtree holds + // 2^subtree_depth entries. + let subtree_depth = Self::prog_depth_to_binary_depth(prog_depth + 1); + let new_right = right.with_updated_leaf(index, value, subtree_depth)?; + + // FIXME: remove assert + assert!(matches!(**left, Self::ProgZero)); + + Ok(Self::ProgNode { + hash: RwLock::new(Hash256::ZERO), + left: left.clone(), + right: new_right, + }) + } else { + // Case 2: new element does not fit inside this right-tree: recurse to the next + // level on the left. + let new_left = left.push_recursive(value, current_length, prog_depth + 1)?; + + Ok(Self::ProgNode { + hash: RwLock::new(Hash256::ZERO), + left: Arc::new(new_left), + right: right.clone(), + }) + } + } + } + } + + pub fn push(&self, value: T, current_length: usize) -> Result { + self.push_recursive(value, current_length, 0) + } + + /// Create an iterator over all elements in the progressive tree. + /// + /// The iterator traverses elements in order by visiting each binary subtree + /// (right child) at increasing progressive depths: + /// 1. All elements in the right child at the root level + /// 2. All elements in the right child of the first left node + /// 3. All elements in the right child of the second left node + /// + /// And so on, following the progressive tree structure as defined in EIP-7916. + pub fn iter(&self, length: usize) -> ProgTreeIter<'_, T> { + ProgTreeIter::new(self, length) + } +} + +impl ProgTree { + pub fn tree_hash(&self) -> Hash256 { + match self { + Self::ProgZero => Hash256::ZERO, + Self::ProgNode { hash, left, right } => { + let read_lock = hash.read(); + let existing_hash = *read_lock; + drop(read_lock); + + if !existing_hash.is_zero() { + existing_hash + } else { + // Parallelism goes brrrr. + let (left_hash, right_hash) = + rayon::join(|| left.tree_hash(), || right.tree_hash()); + let tree_hash = + Hash256::from(hash32_concat(left_hash.as_slice(), right_hash.as_slice())); + *hash.write() = tree_hash; + tree_hash + } + } + } + } +} + +/// Iterator over elements in a progressive tree. +/// +/// The iterator traverses each binary subtree (right child) in sequence by following +/// the left spine of the progressive tree structure. +#[derive(Debug)] +pub struct ProgTreeIter<'a, T: Value> { + /// Current progressive node being traversed. + current_prog_node: Option<&'a ProgTree>, + /// Current iterator over a binary subtree (Tree). + current_iter: Option>, + /// Progressive depth for calculating the next subtree depth. + prog_depth: u32, + /// Total number of elements to iterate. + length: usize, + /// Number of elements already yielded. + yielded: usize, +} + +impl<'a, T: Value> ProgTreeIter<'a, T> { + fn new(root: &'a ProgTree, length: usize) -> Self { + let mut iter = Self { + current_prog_node: Some(root), + current_iter: None, + prog_depth: 0, + length, + yielded: 0, + }; + + // Initialize by setting up the iterator for the first right child + iter.advance_to_next_subtree(); + iter + } + + /// Advance to the next binary subtree by moving to the left child and + /// setting up an iterator for its right child. + fn advance_to_next_subtree(&mut self) { + match self.current_prog_node { + None | Some(ProgTree::ProgZero) => { + // No more subtrees + self.current_iter = None; + self.current_prog_node = None; + } + Some(ProgTree::ProgNode { left, right, .. }) => { + self.prog_depth += 1; + + // Calculate the depth and length for this binary subtree + let binary_depth = ProgTree::::prog_depth_to_binary_depth(self.prog_depth); + let remaining = self.length.saturating_sub(self.yielded); + let capacity = ProgTree::::capacity_at_depth(self.prog_depth); + let subtree_length = remaining.min(capacity); + + // Create an iterator for the right subtree + self.current_iter = Some(Iter::from_index( + 0, + right, + binary_depth, + Length(subtree_length), + )); + + // Move to the left child for the next iteration + self.current_prog_node = Some(left); + } + } + } +} + +impl<'a, T: Value> Iterator for ProgTreeIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + loop { + // Try to get the next item from the current binary tree iterator + if let Some(iter) = &mut self.current_iter + && let Some(value) = iter.next() + { + self.yielded += 1; + return Some(value); + } + + // Current subtree exhausted, move to the next one + if self.current_prog_node.is_some() { + self.advance_to_next_subtree(); + } else { + // No more subtrees to iterate + return None; + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.length.saturating_sub(self.yielded); + (remaining, Some(remaining)) + } +} + +impl ExactSizeIterator for ProgTreeIter<'_, T> {} diff --git a/src/progressive_list.rs b/src/progressive_list.rs new file mode 100644 index 0000000..2cca5c7 --- /dev/null +++ b/src/progressive_list.rs @@ -0,0 +1,180 @@ +use crate::{ + Arc, Error, Value, + prog_tree::{ProgTree, ProgTreeIter}, + utils::Length, +}; +use itertools::process_results; +use serde::{Deserialize, Deserializer, de::Error as _}; +use ssz::{BYTES_PER_LENGTH_OFFSET, Decode, Encode, SszEncoder, TryFromIter}; +use std::convert::TryFrom; +use tree_hash::{Hash256, PackedEncoding, TreeHash}; + +#[derive(Debug, Clone, PartialEq)] +pub struct ProgressiveList { + tree: Arc>, + length: Length, +} + +impl ProgressiveList { + pub fn empty() -> Self { + Self { + tree: Arc::new(ProgTree::empty()), + length: Length(0), + } + } + + pub fn try_from_iter(iter: impl IntoIterator) -> Result { + let mut list = Self::empty(); + for value in iter { + list.push(value)?; + } + Ok(list) + } + + pub fn push(&mut self, value: T) -> Result<(), Error> { + self.tree = self.tree.push(value, self.len())?.into(); + *self.length.as_mut() += 1; + Ok(()) + } + + pub fn len(&self) -> usize { + self.length.as_usize() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn iter(&self) -> ProgTreeIter<'_, T> { + self.tree.iter(self.len()) + } +} + +impl TryFrom> for ProgressiveList { + type Error = Error; + + fn try_from(vec: Vec) -> Result { + Self::try_from_iter(vec) + } +} + +impl<'a, T: Value> IntoIterator for &'a ProgressiveList { + type Item = &'a T; + type IntoIter = ProgTreeIter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl TreeHash for ProgressiveList { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + unreachable!("ProgressiveList should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("ProgressiveList should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + let root = self.tree.tree_hash(); + tree_hash::mix_in_length(&root, self.len()) + } +} + +// FIXME: duplicated from `ssz::encode::impl_for_vec` +impl Encode for ProgressiveList { + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() * self.len() + } else { + let mut len = self.iter().map(|item| item.ssz_bytes_len()).sum(); + len += BYTES_PER_LENGTH_OFFSET * self.len(); + len + } + } + + fn ssz_append(&self, buf: &mut Vec) { + if ::is_ssz_fixed_len() { + buf.reserve(::ssz_fixed_len() * self.len()); + + for item in self { + item.ssz_append(buf); + } + } else { + let mut encoder = SszEncoder::container(buf, self.len() * BYTES_PER_LENGTH_OFFSET); + + for item in self { + encoder.append(item); + } + + encoder.finalize(); + } + } +} + +impl TryFromIter for ProgressiveList +where + T: Value, +{ + type Error = Error; + + fn try_from_iter(iter: I) -> Result + where + I: IntoIterator, + { + ProgressiveList::try_from_iter(iter) + } +} + +impl Decode for ProgressiveList +where + T: Value, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + if bytes.is_empty() { + Ok(ProgressiveList::empty()) + } else if ::is_ssz_fixed_len() { + process_results( + bytes + .chunks(::ssz_fixed_len()) + .map(T::from_ssz_bytes), + |iter| { + ProgressiveList::try_from_iter(iter).map_err(|e| { + ssz::DecodeError::BytesInvalid(format!( + "Error building ssz ProgressiveList: {e:?}" + )) + }) + }, + )? + } else { + ssz::decode_list_of_variable_length_items(bytes, None) + } + } +} + +impl<'de, T> Deserialize<'de> for ProgressiveList +where + T: Deserialize<'de> + Value, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // TODO: this implementation is not necessarily the most efficient + Self::try_from_iter(Vec::deserialize(deserializer)?) + .map_err(|e| D::Error::custom(format!("{e:?}"))) + } +} diff --git a/src/tests/mod.rs b/src/tests/mod.rs index b6010f3..25923ae 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -5,6 +5,7 @@ mod iterator; mod mem; mod packed; mod pop_front; +mod prog; mod proptest; mod repeat; mod serde; diff --git a/src/tests/prog.rs b/src/tests/prog.rs new file mode 100644 index 0000000..0027c76 --- /dev/null +++ b/src/tests/prog.rs @@ -0,0 +1,107 @@ +use crate::prog_tree::ProgTree; +use tree_hash::Hash256; + +#[test] +fn wow() { + let empty = ProgTree::::empty(); + + let one = empty.push(Hash256::repeat_byte(0x11), 0).unwrap(); + + let two = one.push(Hash256::repeat_byte(0x22), 1).unwrap(); + + println!("{two:#?}"); + + let three = two.push(Hash256::repeat_byte(0x33), 2).unwrap(); + + println!("{three:#?}"); +} + +#[test] +fn wow_u64() { + let mut tree = ProgTree::::empty(); + + for i in 1..=65 { + tree = tree.push(i, i as usize - 1).unwrap(); + } + + println!("{tree:#?}"); +} + +#[test] +fn prog_tree_iterator() { + let mut tree = ProgTree::::empty(); + + // Build a tree with 65 elements + for i in 1..=65 { + tree = tree.push(i, i as usize - 1).unwrap(); + } + + // Iterate and collect all elements + let collected: Vec<_> = tree.iter(65).copied().collect(); + + // Verify we got all 65 elements in order + assert_eq!(collected.len(), 65); + for (i, &value) in collected.iter().enumerate() { + assert_eq!( + value, + (i + 1) as u64, + "Element at index {} should be {}", + i, + i + 1 + ); + } +} + +#[test] +fn prog_tree_iterator_empty() { + let tree = ProgTree::::empty(); + let collected: Vec<_> = tree.iter(0).collect(); + assert_eq!(collected.len(), 0); +} + +#[test] +fn prog_tree_iterator_small() { + let mut tree = ProgTree::::empty(); + + // Build a small tree with just 4 elements (one packed leaf) + for i in 1..=4 { + tree = tree.push(i, i as usize - 1).unwrap(); + } + + let collected: Vec<_> = tree.iter(4).copied().collect(); + assert_eq!(collected, vec![1, 2, 3, 4]); +} + +#[test] +fn prog_tree_iterator_exact_size() { + let mut tree = ProgTree::::empty(); + + for i in 1..=20 { + tree = tree.push(i, i as usize - 1).unwrap(); + } + + let iter = tree.iter(20); + assert_eq!(iter.len(), 20); + + let collected: Vec<_> = iter.copied().collect(); + assert_eq!(collected.len(), 20); +} + +#[test] +fn prog_tree_iterator_hash256() { + let mut tree = ProgTree::::empty(); + + // Build a tree with non-packed values + for i in 1..=10 { + let hash = Hash256::repeat_byte(i as u8); + tree = tree.push(hash, i - 1).unwrap(); + } + + let collected: Vec<_> = tree.iter(10).collect(); + assert_eq!(collected.len(), 10); + + // Verify order + for (i, hash) in collected.iter().enumerate() { + assert_eq!(**hash, Hash256::repeat_byte((i + 1) as u8)); + } +} diff --git a/src/tests/proptest/mod.rs b/src/tests/proptest/mod.rs index eaa9385..46d1116 100644 --- a/src/tests/proptest/mod.rs +++ b/src/tests/proptest/mod.rs @@ -35,6 +35,17 @@ pub fn arb_hash256() -> impl Strategy { proptest::array::uniform32(any::()).prop_map(Hash256::from) } +/// Strategy for generating initial values for a progressive list. +/// Unlike `arb_list`, this has no length limit, but we cap it at a reasonable +/// size for testing purposes. +pub fn arb_progressive_list(strategy: S, max_len: usize) -> impl Strategy> +where + S: Strategy, + T: std::fmt::Debug, +{ + proptest::collection::vec(strategy, 0..=max_len) +} + /// Struct with multiple fields shared by multiple proptests. #[derive(Debug, Clone, PartialEq, Encode, Decode, TreeHash)] pub struct Large { diff --git a/src/tests/proptest/operations.rs b/src/tests/proptest/operations.rs index bb3aaa6..6286eed 100644 --- a/src/tests/proptest/operations.rs +++ b/src/tests/proptest/operations.rs @@ -1,5 +1,5 @@ -use super::{Large, arb_hash256, arb_index, arb_large, arb_list, arb_vect}; -use crate::{Error, List, Value, Vector}; +use super::{Large, arb_hash256, arb_index, arb_large, arb_list, arb_progressive_list, arb_vect}; +use crate::{Error, List, ProgressiveList, Value, Vector}; use proptest::prelude::*; use ssz::{Decode, Encode}; use std::fmt::Debug; @@ -91,6 +91,30 @@ impl Spec { } } +/// Simple specification for `ProgressiveList` behaviour without a length limit. +#[derive(Debug, Clone)] +pub struct ProgressiveSpec { + values: Vec, +} + +impl ProgressiveSpec { + pub fn new(values: Vec) -> Self { + Self { values } + } + + pub fn len(&self) -> usize { + self.values.len() + } + + pub fn iter(&self) -> impl Iterator { + self.values.iter() + } + + pub fn push(&mut self, value: T) { + self.values.push(value); + } +} + #[derive(Debug, Clone)] pub enum Op { /// Check that `len` returns the correct length. @@ -173,6 +197,80 @@ where proptest::collection::vec(arb_op(strategy, n), 1..limit) } +/// Strategy for generating operations for ProgressiveList. +/// Only includes operations that are currently implemented for ProgressiveList. +fn arb_op_progressive<'a, T, S>(strategy: &'a S) -> impl Strategy> + 'a +where + T: Debug + Clone + 'a, + S: Strategy + 'a, +{ + // Only include operations that are currently implemented for ProgressiveList: + // Len, Push, Iter, TreeHash, and Debase (SSZ roundtrip) + prop_oneof![ + Just(Op::Len), + strategy.prop_map(Op::Push), + Just(Op::Iter), + Just(Op::TreeHash), + Just(Op::Debase), + ] +} + +fn arb_ops_progressive<'a, T, S>( + strategy: &'a S, + limit: usize, +) -> impl Strategy>> + 'a +where + T: Debug + Clone + 'a, + S: Strategy + 'a, +{ + proptest::collection::vec(arb_op_progressive(strategy), 1..limit) +} + +fn apply_ops_progressive_list( + list: &mut ProgressiveList, + spec: &mut ProgressiveSpec, + ops: Vec>, +) where + T: Value + Debug + Send + Sync, +{ + for op in ops { + match op { + Op::Len => { + assert_eq!(list.len(), spec.len()); + } + Op::Push(value) => { + list.push(value.clone()).expect("push should succeed"); + spec.push(value); + } + Op::Iter => { + assert!(list.iter().eq(spec.iter())); + } + Op::TreeHash => { + list.tree_hash_root(); + } + Op::Debase => { + let ssz_bytes = list.as_ssz_bytes(); + let new_list = ProgressiveList::from_ssz_bytes(&ssz_bytes).expect("SSZ decode"); + assert_eq!(new_list, *list); + *list = new_list; + } + // These operations are not implemented for ProgressiveList yet and + // are not generated by arb_op_progressive, but we handle them for completeness. + Op::Get(_) + | Op::Set(_, _) + | Op::SetCowWithIntoMut(_, _) + | Op::SetCowWithMakeMut(_, _) + | Op::IterFrom(_) + | Op::PopFront(_) + | Op::ApplyUpdates + | Op::Checkpoint + | Op::Rebase + | Op::FromIntoRoundtrip + | Op::IntraRebase => {} + } + } +} + fn apply_ops_list(list: &mut List, spec: &mut Spec, ops: Vec>) where T: Value + Debug + Send + Sync, @@ -492,3 +590,37 @@ mod vect { vect_test!(large_33, Large, U33, arb_large()); vect_test!(large_1024, Large, U1024, arb_large()); } + +/// Maximum initial length for progressive list tests. +/// This is used to cap the initial list size for reasonable test execution time. +/// Compare to list tests which can use up to 1024 elements (U1024). +const PROGRESSIVE_LIST_MAX_LEN: usize = 128; + +macro_rules! progressive_list_test { + ($name:ident, $T:ty) => { + // Use default strategy (assumes existence of an `Arbitrary` impl). + progressive_list_test!($name, $T, any::<$T>()); + }; + ($name:ident, $T:ty, $strat:expr) => { + proptest! { + #[test] + fn $name( + init in arb_progressive_list::<$T, _>(&$strat, PROGRESSIVE_LIST_MAX_LEN), + ops in arb_ops_progressive::<$T, _>(&$strat, OP_LIMIT) + ) { + let mut list = ProgressiveList::<$T>::try_from_iter(init.clone()).unwrap(); + let mut spec = ProgressiveSpec::<$T>::new(init); + apply_ops_progressive_list(&mut list, &mut spec, ops); + } + } + }; +} + +mod progressive_list { + use super::*; + + progressive_list_test!(u8, u8); + progressive_list_test!(u64, u64); + progressive_list_test!(hash256, Hash256, arb_hash256()); + progressive_list_test!(large, Large, arb_large()); +}