diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..2b3d92a --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +# Build artifacts +target/ + +# Git +.git/ +.gitignore + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# Documentation +*.md +!crates/*/Cargo.toml + +# CI/CD +.github/ + +# Miscellaneous +.DS_Store diff --git a/.github/workflows/sanity.yaml b/.github/workflows/sanity.yaml index cc1950b..4dc6e26 100644 --- a/.github/workflows/sanity.yaml +++ b/.github/workflows/sanity.yaml @@ -19,7 +19,7 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-rust-nightly-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-rust-nightly-fmt-${{ hashFiles('**/Cargo.lock') }} - name: Set up Rust Nightly uses: actions-rs/toolchain@v1 with: @@ -41,7 +41,7 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-rust-stable-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-rust-stable-clippy-all-${{ hashFiles('**/Cargo.lock') }} - name: Set up Rust Stable uses: actions-rs/toolchain@v1 with: @@ -49,7 +49,7 @@ jobs: override: true components: clippy - name: Run Clippy with stable - run: cargo clippy --all-targets --all-features -- -D warnings + run: cargo clippy --workspace --all-targets --all-features -- -D warnings clippy-no-default: name: Clippy (no default features) @@ -63,7 +63,7 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-rust-stable-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-rust-stable-clippy-nodefault-${{ hashFiles('**/Cargo.lock') }} - name: Set up Rust Stable uses: actions-rs/toolchain@v1 with: @@ -71,7 +71,7 @@ jobs: override: true components: clippy - name: Run Clippy with stable - run: cargo clippy --all-targets --no-default-features -- -D warnings + run: cargo clippy --workspace --all-targets --no-default-features -- -D warnings typos: name: Typos @@ -96,14 +96,14 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-rust-stable-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-rust-stable-test-${{ hashFiles('**/Cargo.lock') }} - name: Set up Rust Stable uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - name: Run tests with stable - run: cargo test --all-targets --all-features + run: cargo test --workspace --lib --bins --tests --examples --all-features doc: name: Docs @@ -117,7 +117,7 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-rust-stable-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-rust-stable-doc-${{ hashFiles('**/Cargo.lock') }} - name: Set up Rust Stable uses: actions-rs/toolchain@v1 with: @@ -126,4 +126,4 @@ jobs: - name: Build documentation env: RUSTDOCFLAGS: -D warnings - run: cargo doc --no-deps --document-private-items --all-features --examples \ No newline at end of file + run: cargo doc --workspace --no-deps --document-private-items --all-features --examples \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 0371e4e..72dc772 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -109,6 +109,8 @@ reth-optimism-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3 reth-optimism-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3" } # test-utils +criterion = { version = "0.8", features = ["html_reports"] } +gungraun = "0.17.0" jsonrpsee-core = { version = "0.26.0", features = ["client"] } nanoid = "0.4" alloy-genesis = { version = "1.0", default-features = false } diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 2775b3e..6cdd1b0 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -112,4 +112,41 @@ op-alloy-flz = { workspace = true, optional = true } alloy-genesis = { workspace = true, optional = true } [dev-dependencies] -rblib-core = { path = ".", features = ["test-utils"] } \ No newline at end of file +rblib-core = { path = ".", features = ["test-utils"] } +criterion = { workspace = true } +gungraun = { workspace = true } + +[[bench]] +name = "apply_criterion" +path = "benches/revm_regression/apply_criterion.rs" +harness = false + +[[bench]] +name = "apply_valgrind" +path = "benches/revm_regression/apply_valgrind.rs" +harness = false + +[[bench]] +name = "revert_criterion" +path = "benches/revm_regression/revert_criterion.rs" +harness = false + +[[bench]] +name = "revert_valgrind" +path = "benches/revm_regression/revert_valgrind.rs" +harness = false + +[[bench]] +name = "traversal_criterion" +path = "benches/revm_regression/traversal_criterion.rs" +harness = false + +[[bench]] +name = "traversal_valgrind" +path = "benches/revm_regression/traversal_valgrind.rs" +harness = false + +[[bench]] +name = "forking" +path = "benches/revm_regression/forking.rs" +harness = false diff --git a/crates/core/bench/Dockerfile b/crates/core/bench/Dockerfile new file mode 100644 index 0000000..3c24c62 --- /dev/null +++ b/crates/core/bench/Dockerfile @@ -0,0 +1,40 @@ +# Dockerfile for running rblib-core benchmarks + +FROM rust:1.92-bookworm + +RUN apt-get update && apt-get install -y \ + valgrind \ + pkg-config \ + libssl-dev \ + clang \ + libclang-dev \ + && rm -rf /var/lib/apt/lists/* + +RUN cargo install gungraun-runner --version 0.17.0 + +WORKDIR /workspace + +# Copy workspace manifests +COPY Cargo.toml Cargo.lock ./ +COPY crates/rblib/Cargo.toml crates/rblib/ +COPY crates/core/Cargo.toml crates/core/ +COPY crates/pipeline/Cargo.toml crates/pipeline/ +COPY crates/pipeline-macros/Cargo.toml crates/pipeline-macros/ +COPY crates/test-utils-macros/Cargo.toml crates/test-utils-macros/ + +# Minimal stubs for workspace members +RUN mkdir -p crates/rblib/src crates/core/src crates/pipeline/src \ + crates/pipeline-macros/src crates/test-utils-macros/src && \ + touch crates/rblib/src/lib.rs crates/core/src/lib.rs crates/pipeline/src/lib.rs \ + crates/pipeline-macros/src/lib.rs crates/test-utils-macros/src/lib.rs + +# Cache dependencies +RUN cargo fetch + +# Copy rblib-core source +COPY crates/core crates/core + +# Build benchmarks +RUN cargo build --release -p rblib-core --features test-utils --benches + +CMD ["cargo", "bench", "-p", "rblib-core"] diff --git a/crates/core/bench/README.md b/crates/core/bench/README.md new file mode 100644 index 0000000..c533a4d --- /dev/null +++ b/crates/core/bench/README.md @@ -0,0 +1,74 @@ +# Benchmarking rblib-core + +## Overview + +The `revm_regression` benchmarks compare checkpoint operations against equivalent direct REVM/BundleState operations to +quantify abstraction overhead. + +### Scenarios + +| Benchmark | What it measures | +|---------------|--------------------------------------------------------------------| +| `apply_*` | Transaction execution: checkpoint chain vs single mutable State | +| `revert_*` | Revert capability: checkpoint fork vs BundleState::revert_latest() | +| `traversal_*` | State lookup cost at varying checkpoint chain depths | +| `forking` | Cost of creating parallel branches from shared checkpoint | + +See [`benches/revm_regression/scenarios.rs`](../benches/revm_regression/scenarios.rs) for detailed +scenario definitions including workloads, approaches compared, and what each benchmark measures. + +### Harnesses + +- **Criterion** (`*_criterion`): Wall-clock timing (cross-platform) +- **Valgrind** (`*_valgrind`): CPU instructions, cache behavior, heap profiling (Linux only) + +## Running Benchmarks + +### macOS (via Docker) + +```bash +# Run all benchmarks in Docker, results copied to target/ +./crates/core/bench/macos.sh +``` + +Docker build: + +```bash +docker build -f crates/core/bench/Dockerfile -t rblib-core-bench . +``` + +### Linux (native) + +```bash +# Criterion only +cargo bench -p rblib-core + +# All including Valgrind (requires valgrind + gungraun-runner) +cargo bench -p rblib-core --bench apply_valgrind --bench revert_valgrind --bench traversal_valgrind +``` + +or a specific benchmark: + +```bash +cargo bench -p rblib-core --bench apply_criterion +``` + +## Results + +### Criterion + +Results in `target/criterion/` with HTML reports. + +### Valgrind (via Gungraun) + +Results in `target/gungraun/`. + +``` +apply_valgrind::tx_execution::single_state tx_10:setup_10() +Instructions: 413062 | 506572 (-18.45%) [-1.23x] + ^ current ^ checkpoint ^ single_state is 18% faster +``` + +- Negative percentage = baseline (revm) uses fewer instructions +- `[-1.23x]` = checkpoint uses 1.23x more instructions + diff --git a/crates/core/bench/macos.sh b/crates/core/bench/macos.sh new file mode 100755 index 0000000..aa36cc3 --- /dev/null +++ b/crates/core/bench/macos.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORKSPACE_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +CORE_DIR="$WORKSPACE_ROOT/crates/core" + +cd "$WORKSPACE_ROOT" + +echo "=== Building Docker image ===" +docker build -f crates/core/bench/Dockerfile -t rblib-core-bench . + +echo "" +echo "=== Running Criterion benchmarks ===" +docker run --rm \ + -v "$CORE_DIR/target/criterion:/workspace/crates/core/target/criterion" \ + rblib-core-bench \ + cargo bench -p rblib-core \ + --bench apply_criterion \ + --bench revert_criterion \ + --bench traversal_criterion \ + --bench forking + +echo "" +echo "=== Running Valgrind benchmarks ===" +docker run --rm \ + -v "$CORE_DIR/target/gungraun:/workspace/target/gungraun" \ + rblib-core-bench \ + cargo bench -p rblib-core \ + --bench apply_valgrind \ + --bench revert_valgrind \ + --bench traversal_valgrind + +echo "" +echo "=== Done ===" +echo "Criterion results: crates/core/target/criterion/" +echo "Valgrind results: crates/core/target/gungraun/" diff --git a/crates/core/benches/revm_regression/apply_criterion.rs b/crates/core/benches/revm_regression/apply_criterion.rs new file mode 100644 index 0000000..c5ee5ed --- /dev/null +++ b/crates/core/benches/revm_regression/apply_criterion.rs @@ -0,0 +1,91 @@ +//! Criterion benchmark: `checkpoint.apply()` vs direct `transact_commit` on +//! single State. + +mod scenarios; + +use { + criterion::{ + BenchmarkId, + Criterion, + Throughput, + criterion_group, + criterion_main, + }, + rblib_core::reth::{ + evm::{ConfigureEvm, Evm}, + revm::{ + State, + db::{WrapDatabaseRef, states::bundle_state::BundleRetention}, + }, + }, + scenarios::{APPLY_SAMPLES, ApplySetup, generate_transactions}, +}; + +fn bench_checkpoint_vs_direct(c: &mut Criterion) { + let mut group = c.benchmark_group("tx_execution"); + + for n in APPLY_SAMPLES { + let txs = generate_transactions(n); + let param = format!("{n}_transfers"); + + group.throughput(Throughput::Elements(n)); + + group.bench_with_input( + BenchmarkId::new("checkpoint_chain", ¶m), + &txs, + |b, txs| { + b.iter_with_setup( + || { + let setup = scenarios::apply_setup(n); + let checkpoint = setup.block.start(); + (setup.block, checkpoint, txs.clone()) + }, + |(_block, mut checkpoint, txs)| { + for tx in txs { + checkpoint = checkpoint.apply(tx).unwrap(); + } + checkpoint + }, + ); + }, + ); + + group.bench_with_input( + BenchmarkId::new("single_state", ¶m), + &txs, + |b, txs| { + b.iter_with_setup( + || { + let ApplySetup { block, .. } = scenarios::apply_setup(n); + (block, txs.clone()) + }, + |(block, txs)| { + let base_state = block.start(); + let mut state = State::builder() + .with_database(WrapDatabaseRef(base_state)) + .with_bundle_update() + .build(); + + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + + for tx in txs { + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&tx) + .expect("tx failed"); + } + + state.merge_transitions(BundleRetention::Reverts); + state.take_bundle() + }, + ); + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_checkpoint_vs_direct); +criterion_main!(benches); diff --git a/crates/core/benches/revm_regression/apply_valgrind.rs b/crates/core/benches/revm_regression/apply_valgrind.rs new file mode 100644 index 0000000..bbb8c06 --- /dev/null +++ b/crates/core/benches/revm_regression/apply_valgrind.rs @@ -0,0 +1,93 @@ +//! Valgrind benchmark: `checkpoint.apply()` vs direct `transact_commit` on +//! single State. + +mod scenarios; + +use { + gungraun::{ + Callgrind, + Dhat, + EventKind, + FlamegraphConfig, + LibraryBenchmarkConfig, + library_benchmark, + library_benchmark_group, + main, + }, + rblib_core::reth::{ + evm::{ConfigureEvm, Evm}, + revm::{ + State, + db::{WrapDatabaseRef, states::bundle_state::BundleRetention}, + }, + }, + scenarios::{ + ApplySetup, + apply_setup_10, + apply_setup_100, + apply_setup_500, + apply_setup_1000, + }, + std::hint::black_box, +}; + +#[library_benchmark] +#[bench::tx_10(apply_setup_10())] +#[bench::tx_100(apply_setup_100())] +#[bench::tx_500(apply_setup_500())] +#[bench::tx_1000(apply_setup_1000())] +fn checkpoint_chain(setup: ApplySetup) { + let ApplySetup { block, txs } = setup; + let mut checkpoint = block.start(); + + for tx in txs { + checkpoint = checkpoint.apply(tx).unwrap(); + } + + black_box(checkpoint); +} + +#[library_benchmark] +#[bench::tx_10(apply_setup_10())] +#[bench::tx_100(apply_setup_100())] +#[bench::tx_500(apply_setup_500())] +#[bench::tx_1000(apply_setup_1000())] +fn single_state(setup: ApplySetup) { + let ApplySetup { block, txs } = setup; + let base_state = block.start(); + + let mut state = State::builder() + .with_database(WrapDatabaseRef(base_state)) + .with_bundle_update() + .build(); + + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + + for tx in txs { + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&tx) + .expect("tx failed"); + } + + state.merge_transitions(BundleRetention::Reverts); + black_box(state.take_bundle()); +} + +library_benchmark_group!( + name = tx_execution; + compare_by_id = true; + benchmarks = checkpoint_chain, single_state +); + +main!( + config = LibraryBenchmarkConfig::default() + .tool( + Callgrind::default() + .soft_limits([(EventKind::Ir, 5.0)]) + .flamegraph(FlamegraphConfig::default()) + ) + .tool(Dhat::default()); + library_benchmark_groups = tx_execution +); diff --git a/crates/core/benches/revm_regression/forking.rs b/crates/core/benches/revm_regression/forking.rs new file mode 100644 index 0000000..4fbe3f1 --- /dev/null +++ b/crates/core/benches/revm_regression/forking.rs @@ -0,0 +1,86 @@ +//! Criterion benchmark: cost of creating and using parallel branches from a +//! shared checkpoint. + +mod scenarios; + +use { + criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}, + rblib_core::reth::primitives::Recovered, + scenarios::{ + BRANCH_COUNTS, + FORK_DEPTHS, + Transaction, + build_base_chain, + generate_branch_txs, + }, +}; + +fn bench_fork_and_apply(c: &mut Criterion) { + let mut group = c.benchmark_group("fork_and_apply"); + + for depth in FORK_DEPTHS { + for num_branches in BRANCH_COUNTS { + let branch_txs: Vec>> = (1..=num_branches) + .map(|signer_idx| { + generate_branch_txs(u32::try_from(signer_idx).unwrap()) + }) + .collect(); + + let fork_point = build_base_chain(depth); + + let id = BenchmarkId::new( + format!("base_{depth}_txs"), + format!("{num_branches}_branches_10tx_each"), + ); + + group.bench_with_input( + id, + &(&fork_point, &branch_txs), + |b, (fork_point, branch_txs)| { + b.iter(|| { + let mut branches = Vec::with_capacity(branch_txs.len()); + + for txs in *branch_txs { + let mut branch = (*fork_point).clone(); + + for tx in txs { + branch = branch.apply(tx.clone()).unwrap(); + } + + branches.push(branch); + } + + branches + }); + }, + ); + } + } + + group.finish(); +} + +fn bench_fork_only(c: &mut Criterion) { + let mut group = c.benchmark_group("clone_10_forks"); + + for depth in FORK_DEPTHS { + let fork_point = build_base_chain(depth); + let param = format!("base_{depth}_txs"); + + group.bench_with_input( + BenchmarkId::from_parameter(¶m), + &fork_point, + |b, fork_point| { + b.iter(|| { + let forks: Vec<_> = (0..10).map(|_| fork_point.clone()).collect(); + forks + }); + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_fork_and_apply, bench_fork_only); +criterion_main!(benches); diff --git a/crates/core/benches/revm_regression/revert_criterion.rs b/crates/core/benches/revm_regression/revert_criterion.rs new file mode 100644 index 0000000..053c2f7 --- /dev/null +++ b/crates/core/benches/revm_regression/revert_criterion.rs @@ -0,0 +1,237 @@ +//! Criterion benchmark: checkpoint revert vs `BundleState::revert_latest()`. + +mod scenarios; + +use { + criterion::{ + BenchmarkId, + Criterion, + Throughput, + criterion_group, + criterion_main, + }, + rblib_core::reth::{ + evm::{ConfigureEvm, Evm}, + revm::{ + State, + db::{ + BundleState, + WrapDatabaseRef, + states::bundle_state::BundleRetention, + }, + }, + }, + scenarios::{ + REVERT_PAIR_COUNTS, + generate_transaction_pairs, + generate_transactions, + }, +}; + +fn bench_apply_revert_continue(c: &mut Criterion) { + let mut group = c.benchmark_group("apply_revert_continue"); + + for num_pairs in REVERT_PAIR_COUNTS { + let pairs = generate_transaction_pairs(num_pairs); + let param = format!("{num_pairs}_pairs"); + + group.throughput(Throughput::Elements(num_pairs)); + + group.bench_with_input( + BenchmarkId::new("checkpoint", ¶m), + &pairs, + |b, pairs| { + b.iter_with_setup( + || { + let setup = scenarios::revert_setup(num_pairs); + let checkpoint = setup.block.start(); + (setup.block, checkpoint, pairs.clone()) + }, + |(_block, mut checkpoint, pairs)| { + for (kept_tx, reverted_tx) in pairs { + checkpoint = checkpoint.apply(kept_tx).unwrap(); + let _discarded = checkpoint.apply(reverted_tx).unwrap(); + } + checkpoint + }, + ); + }, + ); + + group.bench_with_input( + BenchmarkId::new("direct_per_tx_merge", ¶m), + &pairs, + |b, pairs| { + b.iter_with_setup( + || { + let setup = scenarios::revert_setup(num_pairs); + (setup.block, pairs.clone()) + }, + |(block, pairs)| { + let base_state = block.start(); + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + let mut bundle = BundleState::default(); + + for (kept_tx, reverted_tx) in pairs { + let mut state = State::builder() + .with_database(WrapDatabaseRef(&base_state)) + .with_bundle_update() + .with_bundle_prestate(bundle) + .build(); + + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&kept_tx) + .expect("tx failed"); + state.merge_transitions(BundleRetention::Reverts); + + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&reverted_tx) + .expect("tx failed"); + state.merge_transitions(BundleRetention::Reverts); + + bundle = state.take_bundle(); + bundle.revert_latest(); + } + + bundle + }, + ); + }, + ); + + group.bench_with_input( + BenchmarkId::new("direct_batch_baseline", ¶m), + &pairs, + |b, pairs| { + b.iter_with_setup( + || { + let setup = scenarios::revert_setup(num_pairs); + let kept_txs: Vec<_> = + pairs.iter().map(|(kept, _)| kept.clone()).collect(); + (setup.block, kept_txs) + }, + |(block, txs)| { + let base_state = block.start(); + let mut state = State::builder() + .with_database(WrapDatabaseRef(base_state)) + .with_bundle_update() + .build(); + + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + + for tx in txs { + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&tx) + .expect("tx failed"); + } + + state.merge_transitions(BundleRetention::Reverts); + state.take_bundle() + }, + ); + }, + ); + } + + group.finish(); +} + +fn bench_apply_then_revert(c: &mut Criterion) { + let mut group = c.benchmark_group("apply_then_revert"); + + let test_cases: [(u64, u64); 4] = [(10, 5), (50, 25), (100, 10), (100, 50)]; + + for (total, to_revert) in test_cases { + let txs = generate_transactions(total); + let param = format!("{total}_apply_{to_revert}_revert"); + let kept = total - to_revert; + + group.throughput(Throughput::Elements(kept)); + + group.bench_with_input( + BenchmarkId::new("checkpoint", ¶m), + &txs, + |b, txs| { + b.iter_with_setup( + || { + let setup = scenarios::apply_setup(total); + let checkpoint = setup.block.start(); + ( + setup.block, + checkpoint, + txs.clone(), + usize::try_from(to_revert).unwrap(), + ) + }, + |(_block, mut checkpoint, txs, to_revert)| { + let mut checkpoints = Vec::with_capacity(txs.len()); + + for tx in txs { + checkpoint = checkpoint.apply(tx).unwrap(); + checkpoints.push(checkpoint.clone()); + } + + let revert_to_idx = checkpoints.len() - to_revert - 1; + checkpoints[revert_to_idx].clone() + }, + ); + }, + ); + + group.bench_with_input( + BenchmarkId::new("direct_per_tx_merge", ¶m), + &txs, + |b, txs| { + b.iter_with_setup( + || { + let setup = scenarios::apply_setup(total); + ( + setup.block, + txs.clone(), + usize::try_from(to_revert).unwrap(), + ) + }, + |(block, txs, to_revert)| { + let base_state = block.start(); + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + let mut bundle = BundleState::default(); + + for tx in &txs { + let mut state = State::builder() + .with_database(WrapDatabaseRef(&base_state)) + .with_bundle_update() + .with_bundle_prestate(bundle) + .build(); + + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(tx) + .expect("tx failed"); + + state.merge_transitions(BundleRetention::Reverts); + bundle = state.take_bundle(); + } + + bundle.revert(to_revert); + bundle + }, + ); + }, + ); + } + + group.finish(); +} + +criterion_group!( + benches, + bench_apply_revert_continue, + bench_apply_then_revert +); +criterion_main!(benches); diff --git a/crates/core/benches/revm_regression/revert_valgrind.rs b/crates/core/benches/revm_regression/revert_valgrind.rs new file mode 100644 index 0000000..7e77ec6 --- /dev/null +++ b/crates/core/benches/revm_regression/revert_valgrind.rs @@ -0,0 +1,135 @@ +//! Valgrind benchmark: checkpoint revert vs `BundleState::revert_latest()`. + +mod scenarios; + +use { + gungraun::{ + Callgrind, + Dhat, + EventKind, + FlamegraphConfig, + LibraryBenchmarkConfig, + library_benchmark, + library_benchmark_group, + main, + }, + rblib_core::reth::{ + evm::{ConfigureEvm, Evm}, + revm::{ + State, + db::{ + BundleState, + WrapDatabaseRef, + states::bundle_state::BundleRetention, + }, + }, + }, + scenarios::{ + RevertSetup, + revert_setup_5, + revert_setup_25, + revert_setup_100, + revert_setup_250, + }, + std::hint::black_box, +}; + +#[library_benchmark] +#[bench::pairs_5(revert_setup_5())] +#[bench::pairs_25(revert_setup_25())] +#[bench::pairs_100(revert_setup_100())] +#[bench::pairs_250(revert_setup_250())] +fn checkpoint(setup: RevertSetup) { + let RevertSetup { block, pairs } = setup; + let mut checkpoint = block.start(); + + for (kept_tx, reverted_tx) in pairs { + checkpoint = checkpoint.apply(kept_tx).unwrap(); + let _discarded = checkpoint.apply(reverted_tx).unwrap(); + } + + black_box(checkpoint); +} + +#[library_benchmark] +#[bench::pairs_5(revert_setup_5())] +#[bench::pairs_25(revert_setup_25())] +#[bench::pairs_100(revert_setup_100())] +#[bench::pairs_250(revert_setup_250())] +fn direct_per_tx_merge(setup: RevertSetup) { + let RevertSetup { block, pairs } = setup; + let base_state = block.start(); + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + let mut bundle = BundleState::default(); + + for (kept_tx, reverted_tx) in pairs { + let mut state = State::builder() + .with_database(WrapDatabaseRef(&base_state)) + .with_bundle_update() + .with_bundle_prestate(bundle) + .build(); + + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&kept_tx) + .expect("tx failed"); + state.merge_transitions(BundleRetention::Reverts); + + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&reverted_tx) + .expect("tx failed"); + state.merge_transitions(BundleRetention::Reverts); + + bundle = state.take_bundle(); + bundle.revert_latest(); + } + + black_box(bundle); +} + +#[library_benchmark] +#[bench::pairs_5(revert_setup_5())] +#[bench::pairs_25(revert_setup_25())] +#[bench::pairs_100(revert_setup_100())] +#[bench::pairs_250(revert_setup_250())] +fn direct_batch_baseline(setup: RevertSetup) { + let RevertSetup { block, pairs } = setup; + let base_state = block.start(); + + let mut state = State::builder() + .with_database(WrapDatabaseRef(base_state)) + .with_bundle_update() + .build(); + + let evm_config = block.evm_config(); + let evm_env = block.evm_env(); + + for (kept_tx, _) in pairs { + evm_config + .evm_with_env(&mut state, evm_env.clone()) + .transact_commit(&kept_tx) + .expect("tx failed"); + } + + state.merge_transitions(BundleRetention::Reverts); + black_box(state.take_bundle()); +} + +library_benchmark_group!( + name = apply_revert_continue; + compare_by_id = true; + benchmarks = checkpoint, direct_per_tx_merge, direct_batch_baseline +); + +main!( + config = LibraryBenchmarkConfig::default() + .tool( + Callgrind::default() + .soft_limits([(EventKind::Ir, 5.0)]) + .flamegraph(FlamegraphConfig::default()) + ) + .tool(Dhat::default()); + library_benchmark_groups = apply_revert_continue +); diff --git a/crates/core/benches/revm_regression/scenarios.rs b/crates/core/benches/revm_regression/scenarios.rs new file mode 100644 index 0000000..51c8267 --- /dev/null +++ b/crates/core/benches/revm_regression/scenarios.rs @@ -0,0 +1,241 @@ +//! Shared scenario definitions for `revm_regression` benchmarks. +//! +//! This module defines the test scenarios used to measure the cost of +//! abstraction of the Checkpoint system against direct REVM/BundleState usage. +//! +//! # Scenarios +//! +//! ## Apply +//! +//! Measures transaction execution overhead. +//! +//! **Workload**: N simple ETH transfers (21k gas each) from a single funded +//! account to random recipients. +//! +//! **Approaches compared**: +//! - `checkpoint_chain`: Each tx creates a new immutable Checkpoint via +//! `checkpoint.apply(tx)`. State lookups traverse the checkpoint chain. +//! - `single_state`: All txs applied to one mutable `State` via +//! `transact_commit()`. Single `BundleState` accumulates all changes. +//! +//! **What it measures**: Per-transaction overhead of checkpoint creation and +//! the growing cost of state lookups as the chain deepens. +//! +//! ## Revert +//! +//! Measures the cost of maintaining revert capability. +//! +//! **Workload**: Apply-revert-continue pattern. For each pair: apply a "kept" +//! tx, apply a "reverted" tx, then discard the second. +//! +//! **Approaches compared**: +//! - `checkpoint`: Revert by keeping reference to previous checkpoint (O(1)). +//! - `direct_per_tx_merge`: Merge after each tx, use +//! `BundleState::revert_latest()`. +//! - `direct_batch_baseline`: No revert capability, only applies kept txs +//! (baseline). +//! +//! **What it measures**: Cost of revert capability. Checkpoint revert is O(1) +//! but pays traversal cost on subsequent reads. Direct revert is O(accounts) +//! but has O(1) reads. +//! +//! ## Traversal +//! +//! Measures state lookup cost at varying checkpoint chain depths. +//! +//! **Workload**: Build a checkpoint chain of depth N, then perform single +//! account reads. +//! +//! **Read patterns**: +//! - `hot`: Read the signer account (modified in every checkpoint, found at +//! top). +//! - `cold`: Read an untouched account (must traverse entire chain to base +//! state). +//! - `deep`: Read recipient of first tx (found at bottom of chain). +//! +//! **What it measures**: How read latency scales with chain depth. Cold reads +//! show worst-case traversal cost. Hot reads show best-case. +//! +//! ## Forking +//! +//! Measures the cost of creating parallel branches from a shared checkpoint. +//! +//! **Workload**: Build a base chain of depth M, then create N branches from the +//! tip, each applying 10 transactions from a unique signer (no state +//! contention). +//! +//! ```text +//! Base: [0] -> [1] -> ... -> [M] +//! |-> Branch A: 10 txs +//! |-> Branch B: 10 txs +//! |-> Branch C: 10 txs +//! ``` +//! +//! **What it measures**: Cost of checkpoint cloning (Arc efficiency) and +//! whether parallel branches interfere with each other's performance. + +#![allow(dead_code, unreachable_pub)] + +use rblib_core::{ + alloy::primitives::{Address, U256}, + payload::{BlockContext, Checkpoint}, + platform::Ethereum, + reth::primitives::Recovered, + test_utils::{ + BlockContextMocked, + FundedAccounts, + transfer_tx, + transfer_tx_to, + }, +}; + +pub type Transaction = rblib_core::platform::types::Transaction; + +// ============================================================================ +// Apply scenario +// ============================================================================ + +pub const APPLY_SAMPLES: [u64; 4] = [10, 100, 500, 1000]; + +pub struct ApplySetup { + pub block: BlockContext, + pub txs: Vec>, +} + +pub fn generate_transactions(n: u64) -> Vec> { + let signer = FundedAccounts::signer(0); + (0..n) + .map(|nonce| transfer_tx::(&signer, nonce, U256::from(1u64))) + .collect() +} + +pub fn apply_setup(n: u64) -> ApplySetup { + ApplySetup { + block: BlockContext::::mocked(), + txs: generate_transactions(n), + } +} + +pub fn apply_setup_10() -> ApplySetup { + apply_setup(10) +} +pub fn apply_setup_100() -> ApplySetup { + apply_setup(100) +} +pub fn apply_setup_500() -> ApplySetup { + apply_setup(500) +} +pub fn apply_setup_1000() -> ApplySetup { + apply_setup(1000) +} + +// ============================================================================ +// Revert scenario +// ============================================================================ + +pub const REVERT_PAIR_COUNTS: [u64; 4] = [5, 25, 100, 250]; + +pub struct RevertSetup { + pub block: BlockContext, + pub pairs: Vec<(Recovered, Recovered)>, +} + +pub fn generate_transaction_pairs( + num_pairs: u64, +) -> Vec<(Recovered, Recovered)> { + let signer = FundedAccounts::signer(0); + (0..num_pairs) + .map(|i| { + ( + transfer_tx::(&signer, i, U256::from(1u64)), + transfer_tx::(&signer, i + 1, U256::from(1u64)), + ) + }) + .collect() +} + +pub fn revert_setup(num_pairs: u64) -> RevertSetup { + RevertSetup { + block: BlockContext::::mocked(), + pairs: generate_transaction_pairs(num_pairs), + } +} + +pub fn revert_setup_5() -> RevertSetup { + revert_setup(5) +} +pub fn revert_setup_25() -> RevertSetup { + revert_setup(25) +} +pub fn revert_setup_100() -> RevertSetup { + revert_setup(100) +} +pub fn revert_setup_250() -> RevertSetup { + revert_setup(250) +} + +// ============================================================================ +// Traversal scenario +// ============================================================================ + +pub const TRAVERSAL_DEPTHS: [u64; 5] = [10, 50, 100, 500, 1000]; + +pub fn build_checkpoint_chain(n: u64) -> (Checkpoint, Address) { + let block = BlockContext::::mocked(); + let mut checkpoint = block.start(); + let signer = FundedAccounts::signer(0); + let mut first_recipient = Address::ZERO; + + for nonce in 0..n { + let to = Address::random(); + let tx: Recovered = + transfer_tx_to::(&signer, nonce, U256::from(1u64), to); + + if nonce == 0 { + first_recipient = to; + } + checkpoint = checkpoint.apply(tx).unwrap(); + } + + (checkpoint, first_recipient) +} + +// ============================================================================ +// Forking scenario +// ============================================================================ + +pub const FORK_DEPTHS: [u64; 3] = [10, 100, 500]; +pub const BRANCH_COUNTS: [usize; 3] = [2, 5, 10]; +pub const TXS_PER_BRANCH: u64 = 10; + +pub fn build_base_chain(n: u64) -> Checkpoint { + let block = BlockContext::::mocked(); + let mut checkpoint = block.start(); + let signer = FundedAccounts::signer(0); + + for nonce in 0..n { + let tx: Recovered = transfer_tx_to::( + &signer, + nonce, + U256::from(1u64), + Address::random(), + ); + checkpoint = checkpoint.apply(tx).unwrap(); + } + + checkpoint +} + +pub fn generate_branch_txs(signer_index: u32) -> Vec> { + let signer = FundedAccounts::signer(signer_index); + (0..TXS_PER_BRANCH) + .map(|nonce| { + transfer_tx_to::( + &signer, + nonce, + U256::from(1u64), + Address::random(), + ) + }) + .collect() +} diff --git a/crates/core/benches/revm_regression/traversal_criterion.rs b/crates/core/benches/revm_regression/traversal_criterion.rs new file mode 100644 index 0000000..f25cf13 --- /dev/null +++ b/crates/core/benches/revm_regression/traversal_criterion.rs @@ -0,0 +1,78 @@ +//! Criterion benchmark: state lookup cost at varying checkpoint chain depths. +//! +//! Measures how read performance scales with chain depth for hot, cold, and +//! deep accounts. + +mod scenarios; + +use { + criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}, + rblib_core::{ + alloy::primitives::Address, + reth::revm::DatabaseRef, + test_utils::FundedAccounts, + }, + scenarios::{TRAVERSAL_DEPTHS, build_checkpoint_chain}, +}; + +fn bench_hot_read(c: &mut Criterion) { + let mut group = c.benchmark_group("read_hot_account"); + let signer_address = FundedAccounts::address(0); + + for depth in TRAVERSAL_DEPTHS { + let (checkpoint, _) = build_checkpoint_chain(depth); + let param = format!("chain_depth_{depth}"); + + group.bench_with_input( + BenchmarkId::from_parameter(¶m), + &checkpoint, + |b, checkpoint| { + b.iter(|| checkpoint.basic_ref(signer_address)); + }, + ); + } + + group.finish(); +} + +fn bench_cold_read(c: &mut Criterion) { + let mut group = c.benchmark_group("read_untouched_account"); + let untouched_address = Address::repeat_byte(0xDE); + + for depth in TRAVERSAL_DEPTHS { + let (checkpoint, _) = build_checkpoint_chain(depth); + let param = format!("chain_depth_{depth}"); + + group.bench_with_input( + BenchmarkId::from_parameter(¶m), + &checkpoint, + |b, checkpoint| { + b.iter(|| checkpoint.basic_ref(untouched_address)); + }, + ); + } + + group.finish(); +} + +fn bench_deep_read(c: &mut Criterion) { + let mut group = c.benchmark_group("read_deep_account"); + + for depth in TRAVERSAL_DEPTHS { + let (checkpoint, first_recipient) = build_checkpoint_chain(depth); + let param = format!("chain_depth_{depth}"); + + group.bench_with_input( + BenchmarkId::from_parameter(¶m), + &(checkpoint, first_recipient), + |b, (checkpoint, addr)| { + b.iter(|| checkpoint.basic_ref(*addr)); + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_hot_read, bench_cold_read, bench_deep_read); +criterion_main!(benches); diff --git a/crates/core/benches/revm_regression/traversal_valgrind.rs b/crates/core/benches/revm_regression/traversal_valgrind.rs new file mode 100644 index 0000000..5891ff6 --- /dev/null +++ b/crates/core/benches/revm_regression/traversal_valgrind.rs @@ -0,0 +1,99 @@ +//! Valgrind benchmark: state lookup cost at varying checkpoint chain depths. + +#![allow(clippy::needless_pass_by_value)] + +mod scenarios; + +use { + gungraun::{ + Callgrind, + Dhat, + EventKind, + FlamegraphConfig, + LibraryBenchmarkConfig, + library_benchmark, + library_benchmark_group, + main, + }, + rblib_core::{ + alloy::primitives::Address, + payload::Checkpoint, + platform::Ethereum, + reth::revm::DatabaseRef, + test_utils::FundedAccounts, + }, + scenarios::build_checkpoint_chain, + std::hint::black_box, +}; + +struct TraversalSetup { + checkpoint: Checkpoint, + first_recipient: Address, +} + +fn setup(depth: u64) -> TraversalSetup { + let (checkpoint, first_recipient) = build_checkpoint_chain(depth); + TraversalSetup { + checkpoint, + first_recipient, + } +} + +fn setup_10() -> TraversalSetup { + setup(10) +} +fn setup_100() -> TraversalSetup { + setup(100) +} +fn setup_500() -> TraversalSetup { + setup(500) +} +fn setup_1000() -> TraversalSetup { + setup(1000) +} + +#[library_benchmark] +#[bench::depth_10(setup_10())] +#[bench::depth_100(setup_100())] +#[bench::depth_500(setup_500())] +#[bench::depth_1000(setup_1000())] +fn hot_read(setup: TraversalSetup) { + let signer_address = FundedAccounts::address(0); + black_box(setup.checkpoint.basic_ref(signer_address).unwrap()); +} + +#[library_benchmark] +#[bench::depth_10(setup_10())] +#[bench::depth_100(setup_100())] +#[bench::depth_500(setup_500())] +#[bench::depth_1000(setup_1000())] +fn cold_read(setup: TraversalSetup) { + let untouched_address = Address::repeat_byte(0xDE); + black_box(setup.checkpoint.basic_ref(untouched_address).unwrap()); +} + +#[library_benchmark] +#[bench::depth_10(setup_10())] +#[bench::depth_100(setup_100())] +#[bench::depth_500(setup_500())] +#[bench::depth_1000(setup_1000())] +fn deep_read(setup: TraversalSetup) { + black_box(setup.checkpoint.basic_ref(setup.first_recipient).unwrap()); +} + +library_benchmark_group!( + name = chain_traversal; + compare_by_id = true; + benchmarks = hot_read, cold_read, deep_read +); + +main!( + config = LibraryBenchmarkConfig::default() + .tool( + Callgrind::default() + .soft_limits([(EventKind::Ir, 5.0)]) + .flamegraph(FlamegraphConfig::default()) + ) + .tool(Dhat::default()); + library_benchmark_groups = chain_traversal +); diff --git a/crates/core/src/test_utils/mod.rs b/crates/core/src/test_utils/mod.rs index b84c3ae..fbddbd2 100644 --- a/crates/core/src/test_utils/mod.rs +++ b/crates/core/src/test_utils/mod.rs @@ -32,6 +32,7 @@ pub use { test_tx, test_txs, transfer_tx, + transfer_tx_to, }, }; diff --git a/crates/core/src/test_utils/transactions.rs b/crates/core/src/test_utils/transactions.rs index 28b4c17..949b47e 100644 --- a/crates/core/src/test_utils/transactions.rs +++ b/crates/core/src/test_utils/transactions.rs @@ -49,14 +49,15 @@ pub fn test_bundle>>( } #[allow(clippy::missing_panics_doc)] -pub fn transfer_tx( +pub fn transfer_tx_to( signer: &PrivateKeySigner, nonce: u64, value: U256, + address: Address, ) -> Recovered> { let mut tx = types::TransactionRequest::

::default() .with_nonce(nonce) - .with_to(Address::random()) + .with_to(address) .with_value(value) .with_gas_price(1_000_000_000) .with_gas_limit(21_000) @@ -74,6 +75,15 @@ pub fn transfer_tx( signed_tx.with_signer(signer.address()) } +#[allow(clippy::missing_panics_doc)] +pub fn transfer_tx( + signer: &PrivateKeySigner, + nonce: u64, + value: U256, +) -> Recovered> { + transfer_tx_to::

(signer, nonce, value, Address::random()) +} + /// Create a transaction that will revert when executed #[allow(clippy::missing_panics_doc)] pub fn reverting_tx(