diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5e334d13c65..e833c620b68 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,45 +1,10 @@ -* @gakonst -crates/blockchain-tree-api/ @rakita @mattsse @Rjected -crates/blockchain-tree/ @rakita @mattsse @Rjected -crates/chain-state/ @fgimenez @mattsse -crates/chainspec/ @Rjected @joshieDo @mattsse -crates/cli/ @mattsse -crates/consensus/ @mattsse @Rjected -crates/e2e-test-utils/ @mattsse @Rjected @klkvr @fgimenez -crates/engine/ @mattsse @Rjected @fgimenez @mediocregopher @yongkangc -crates/era/ @mattsse @RomanHodulak -crates/errors/ @mattsse -crates/ethereum-forks/ @mattsse @Rjected -crates/ethereum/ @mattsse @Rjected -crates/etl/ @joshieDo @shekhirin -crates/evm/ @rakita @mattsse @Rjected -crates/exex/ @shekhirin -crates/net/ @mattsse @Rjected -crates/net/downloaders/ @Rjected -crates/node/ @mattsse @Rjected @klkvr -crates/optimism/ @mattsse @Rjected @fgimenez -crates/payload/ @mattsse @Rjected -crates/primitives-traits/ @Rjected @RomanHodulak @mattsse @klkvr -crates/primitives/ @Rjected @mattsse @klkvr -crates/prune/ @shekhirin @joshieDo -crates/ress @shekhirin @Rjected -crates/revm/ @mattsse @rakita -crates/rpc/ @mattsse @Rjected @RomanHodulak -crates/stages/ @shekhirin @mediocregopher -crates/static-file/ @joshieDo @shekhirin -crates/storage/codecs/ @joshieDo -crates/storage/db-api/ @joshieDo @rakita -crates/storage/db-common/ @Rjected -crates/storage/db/ @joshieDo @rakita -crates/storage/errors/ @rakita -crates/storage/libmdbx-rs/ @rakita @shekhirin -crates/storage/nippy-jar/ @joshieDo @shekhirin -crates/storage/provider/ @rakita @joshieDo @shekhirin -crates/storage/storage-api/ @joshieDo -crates/tasks/ @mattsse -crates/tokio-util/ @fgimenez -crates/transaction-pool/ @mattsse @yongkangc -crates/trie/ @Rjected @shekhirin @mediocregopher -bin/reth-bench-compare/ @mediocregopher @shekhirin @yongkangc -etc/ @Rjected @shekhirin -.github/ @gakonst @DaniPopes +* @emhane @theochap @BioMark3r +crates/blockchain-tree-api/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +crates/blockchain-tree/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +crates/engine/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +crates/exex/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +crates/node/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +crates/optimism/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +crates/rpc/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane +etc/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane @op-will +.github/ @dhyaniarun1993 @itschaindev @sadiq1971 @meyer9 @emhane diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index b01d4518f75..c552c41d958 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -1,6 +1,6 @@ name: Bug Report description: Create a bug report -labels: ["C-bug", "S-needs-triage"] +labels: ["K-bug", "S-needs-triage"] body: - type: markdown attributes: diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml deleted file mode 100644 index 7b484ec96b9..00000000000 --- a/.github/actionlint.yaml +++ /dev/null @@ -1,7 +0,0 @@ -self-hosted-runner: - labels: - - depot-ubuntu-latest - - depot-ubuntu-latest-2 - - depot-ubuntu-latest-4 - - depot-ubuntu-latest-8 - - depot-ubuntu-latest-16 diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 874b7d508c6..5ead9d285eb 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -77,6 +77,8 @@ exclude_crates=( reth-trie-parallel # tokio reth-trie-sparse-parallel # rayon reth-testing-utils + reth-optimism-exex # reth-exex and reth-optimism-trie + reth-optimism-trie # reth-trie reth-optimism-txpool # reth-transaction-pool reth-era-downloader # tokio reth-era-utils # tokio diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 264b1059ab1..2679b7d3f1e 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -5,7 +5,7 @@ on: # TODO: Disabled temporarily for https://github.com/CodSpeedHQ/runner/issues/55 # merge_group: push: - branches: [main] + branches: [unstable, main] env: CARGO_TERM_COLOR: always @@ -16,7 +16,7 @@ env: name: bench jobs: codspeed: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 with: @@ -35,6 +35,10 @@ jobs: run: ./.github/scripts/codspeed-build.sh - name: Run the benchmarks uses: CodSpeedHQ/action@v4 + env: + CODSPEED_TOKEN: ${{ secrets.CODSPEED_TOKEN }} + # op-reth does not have a codspeed token + if: ${{ env.CODSPEED_TOKEN != '' }} with: run: cargo codspeed run --workspace mode: instrumentation diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index c52a5007adc..333c7577a8a 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -15,7 +15,7 @@ env: jobs: build: - runs-on: depot-ubuntu-latest-8 + runs-on: ubuntu-latest timeout-minutes: 90 steps: - name: Checkout diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index 9293d52b80c..c8875f586c1 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -9,7 +9,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] env: CARGO_TERM_COLOR: always @@ -18,7 +18,7 @@ env: name: compact-codec jobs: compact-codec: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest strategy: matrix: bin: diff --git a/.github/workflows/cov-op-historical-proof.yml b/.github/workflows/cov-op-historical-proof.yml new file mode 100644 index 00000000000..ff987dd1f5d --- /dev/null +++ b/.github/workflows/cov-op-historical-proof.yml @@ -0,0 +1,42 @@ +name: coverage-op-historical-proof + +on: + merge_group: + workflow_dispatch: + +jobs: + reth-op-historical-proof-unit-tests: + name: coverage op-historical-proof unit tests + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: '1.23' + + - name: Install llvm + run: | + sudo apt-get update + sudo apt-get install -y llvm + cargo install cargo-llvm-cov + + - name: Coverage + run: | + cargo llvm-cov --package reth-optimism-exex --package reth-optimism-trie --lcov --output-path unit.lcov + + - name: Upload coverage to codecov.io + uses: codecov/codecov-action@v5 + with: + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + files: unit.lcov + flags: unit + env_vars: OS,RUST + name: reth-op-historical-proof-unit-tests + verbose: true + force: true diff --git a/.github/workflows/e2e-op-historical-proof.yml b/.github/workflows/e2e-op-historical-proof.yml new file mode 100644 index 00000000000..f89e1b7872b --- /dev/null +++ b/.github/workflows/e2e-op-historical-proof.yml @@ -0,0 +1,146 @@ +name: e2e-op-historical-proof + +on: + push: + branches: [ unstable, main] + pull_request: + workflow_dispatch: + +jobs: + op-reth-as-verifier: + name: op-reth-as-verifier + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: true + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: '1.24.0' + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - uses: taiki-e/install-action@cargo-llvm-cov + + - name: Verify tools + run: | + echo "forge: $(command -v forge || true)" + go version + + - name: Build contract artifacts with forge + working-directory: crates/optimism/tests/proofs/contracts + run: | + forge build + + - uses: jdx/mise-action@v3 + + - name: Run e2e tests + working-directory: crates/optimism/tests + run: | + source <(cargo llvm-cov show-env --export-prefix) + make build + make test-e2e-sysgo GO_PKG_NAME=proofs/core OP_DEVSTACK_PROOF_SEQUENCER_EL=op-geth OP_DEVSTACK_PROOF_VALIDATOR_EL=op-reth + make test-e2e-sysgo GO_PKG_NAME=proofs/reorg OP_DEVSTACK_PROOF_SEQUENCER_EL=op-geth OP_DEVSTACK_PROOF_VALIDATOR_EL=op-reth + make test-e2e-sysgo GO_PKG_NAME=proofs/prune OP_DEVSTACK_PROOF_SEQUENCER_EL=op-geth OP_DEVSTACK_PROOF_VALIDATOR_EL=op-reth + cargo llvm-cov report --lcov --output-path verifier_cov.lcov + + - name: Upload coverage to codecov.io + uses: codecov/codecov-action@v5 + with: + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + files: verifier_cov.lcov + flags: e2e + env_vars: OS,RUST + name: verifier-sysgo-tests + verbose: true + + op-reth-as-sequencer: + name: op-reth-as-sequencer + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: true + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: '1.24.0' + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - uses: taiki-e/install-action@cargo-llvm-cov + + - name: Verify tools + run: | + echo "forge: $(command -v forge || true)" + go version + + - name: Build contract artifacts with forge + working-directory: crates/optimism/tests/proofs/contracts + run: | + forge build + + - uses: jdx/mise-action@v3 + + - name: Run e2e tests + working-directory: crates/optimism/tests + run: | + source <(cargo llvm-cov show-env --export-prefix) + make build + make test-e2e-sysgo GO_PKG_NAME=proofs/core OP_DEVSTACK_PROOF_SEQUENCER_EL=op-reth OP_DEVSTACK_PROOF_VALIDATOR_EL=op-geth + make test-e2e-sysgo GO_PKG_NAME=proofs/reorg OP_DEVSTACK_PROOF_SEQUENCER_EL=op-reth OP_DEVSTACK_PROOF_VALIDATOR_EL=op-geth + make test-e2e-sysgo GO_PKG_NAME=proofs/prune OP_DEVSTACK_PROOF_SEQUENCER_EL=op-reth OP_DEVSTACK_PROOF_VALIDATOR_EL=op-geth + cargo llvm-cov report --lcov --output-path sequencer_cov.lcov + + - name: Upload coverage to codecov.io + uses: codecov/codecov-action@v5 + with: + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + files: sequencer_cov.lcov + flags: e2e + env_vars: OS,RUST + name: sequencer-sysgo-tests + verbose: true + + e2e-op-historical-proof-success: + name: e2e-op-historical-proof-success + runs-on: ubuntu-latest + if: always() + needs: + - op-reth-as-verifier + - op-reth-as-sequencer + timeout-minutes: 60 + steps: + - name: E2E tests for Proof History ExEx succeeded + uses: re-actors/alls-green@release/v1 + with: + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index f31fefed35f..9a20bf64cf1 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] env: CARGO_TERM_COLOR: always @@ -20,7 +20,7 @@ concurrency: jobs: test: name: e2e-testsuite - runs-on: depot-ubuntu-latest-4 + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 timeout-minutes: 90 diff --git a/.github/workflows/grafana.yml b/.github/workflows/grafana.yml index f34b342401f..12408c26ce1 100644 --- a/.github/workflows/grafana.yml +++ b/.github/workflows/grafana.yml @@ -4,7 +4,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] jobs: check-dashboard: diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index e94042d6b9b..01de852e8cb 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -32,10 +32,9 @@ jobs: artifact_name: "reth-edge" prepare-hive: - if: github.repository == 'paradigmxyz/reth' + if: github.repository == 'op-rs/op-reth' timeout-minutes: 45 - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Checkout hive tests @@ -191,8 +190,7 @@ jobs: - prepare-reth-edge - prepare-hive name: ${{ matrix.storage }} / ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }} - runs-on: - group: Reth + runs-on: ubuntu-latest permissions: issues: write steps: diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 46f5670c72f..1f39eaefdf3 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] schedule: # Run once a day at 3:00 UTC - cron: "0 3 * * *" @@ -24,7 +24,7 @@ jobs: test: name: test / ${{ matrix.network }} if: github.event_name != 'schedule' - runs-on: depot-ubuntu-latest-4 + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 strategy: diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index a4ad10758c6..009b8f2d45a 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false name: run kurtosis - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest needs: - prepare-reth steps: diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index c846e1b5852..6bd04921ba1 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -30,7 +30,7 @@ jobs: strategy: fail-fast: false name: run kurtosis - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest needs: - prepare-reth steps: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 839e7098372..10e8d9fa17c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -4,7 +4,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] env: CARGO_TERM_COLOR: always @@ -13,7 +13,7 @@ env: jobs: clippy-binaries: name: clippy binaries / ${{ matrix.type }} - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 strategy: matrix: @@ -42,7 +42,7 @@ jobs: clippy: name: clippy - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -59,7 +59,7 @@ jobs: RUSTFLAGS: -D warnings wasm: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -79,7 +79,7 @@ jobs: .github/assets/check_wasm.sh riscv: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/checkout@v6 @@ -98,7 +98,7 @@ jobs: crate-checks: name: crate-checks (${{ matrix.partition }}/${{ matrix.total_partitions }}) - runs-on: depot-ubuntu-latest-4 + runs-on: ubuntu-latest strategy: matrix: partition: [1, 2, 3] @@ -117,7 +117,7 @@ jobs: msrv: name: MSRV - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 strategy: matrix: @@ -140,7 +140,7 @@ jobs: docs: name: docs - runs-on: depot-ubuntu-latest-4 + runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -158,7 +158,7 @@ jobs: fmt: name: fmt - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -172,7 +172,7 @@ jobs: udeps: name: udeps - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -187,7 +187,7 @@ jobs: book: name: book - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -246,7 +246,11 @@ jobs: # Checks that selected crates can compile with power set of features features: name: features - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest + strategy: + matrix: + partition: [1, 2] + total_partitions: [2] timeout-minutes: 30 steps: - uses: actions/checkout@v6 @@ -270,7 +274,7 @@ jobs: # Check crates correctly propagate features feature-propagation: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 20 steps: - uses: actions/checkout@v6 diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index e738c72303b..1be87cf6eb0 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -29,9 +29,9 @@ on: jobs: prepare-reth: - if: github.repository == 'paradigmxyz/reth' + if: github.repository == 'op-rs/op-reth' timeout-minutes: 45 - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - run: mkdir artifacts diff --git a/.github/workflows/stage.yml b/.github/workflows/stage.yml index 342c36a5c5f..4ec6bfffc4e 100644 --- a/.github/workflows/stage.yml +++ b/.github/workflows/stage.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] env: CARGO_TERM_COLOR: always @@ -23,7 +23,7 @@ jobs: name: stage-run-test # Only run stage commands test in merge groups if: github.event_name == 'merge_group' - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 diff --git a/.github/workflows/sync-era.yml b/.github/workflows/sync-era.yml index e6ce12f9626..abb90a00d05 100644 --- a/.github/workflows/sync-era.yml +++ b/.github/workflows/sync-era.yml @@ -18,7 +18,7 @@ concurrency: jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index c1c5794fd07..64c6ab9c10d 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -18,7 +18,7 @@ concurrency: jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 834c4a8ce75..2f88d595726 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [unstable, main] env: CARGO_TERM_COLOR: always @@ -20,7 +20,7 @@ concurrency: jobs: test: name: test / ${{ matrix.type }} / ${{ matrix.storage }} - runs-on: depot-ubuntu-latest-4 + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 EDGE_FEATURES: ${{ matrix.storage == 'edge' && 'edge' || '' }} @@ -59,7 +59,7 @@ jobs: state: name: Ethereum state tests - runs-on: depot-ubuntu-latest-4 + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -94,7 +94,7 @@ jobs: doc: name: doc tests - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 timeout-minutes: 30 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 9bcadad6b8f..d5946c088f5 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -14,7 +14,7 @@ env: jobs: check-reth: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -34,7 +34,7 @@ jobs: run: cargo check --target x86_64-pc-windows-gnu check-op-reth: - runs-on: depot-ubuntu-latest + runs-on: ubuntu-latest timeout-minutes: 60 steps: diff --git a/.gitignore b/.gitignore index cf5014a4810..1cf249c707e 100644 --- a/.gitignore +++ b/.gitignore @@ -75,3 +75,7 @@ __pycache__/ # direnv .envrc .direnv/ + +# Optimism test artifacts +crates/optimism/tests/proofs/contracts/artifacts +crates/optimism/tests/proofs/contracts/cache \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..93f63753598 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "crates/optimism/tests/optimism"] + path = crates/optimism/tests/optimism + url = https://github.com/ChainSafe/optimism.git + branch = jk/op-historical-proofs diff --git a/Cargo.lock b/Cargo.lock index 38828faaaf4..475a93fe345 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b163ff4acf0eac29af05a911397cc418a76e153467b859398adc26cb9335a611" +checksum = "dd208e8a87fbc2ca1a3822dd1ea03b0a7a4a841e6fa70db2c236dd30ae2e7018" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3dcd2b4e208ce5477de90ccdcbd4bde2c8fb06af49a443974e92bb8f2c5e93f" +checksum = "41e46a465e50a339a817070ec23f06eb3fc9fbb8af71612868367b875a9d49e3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee5655f234985f5ab1e31bef7e02ed11f0a899468cf3300e061e1b96e9e11de0" +checksum = "07001b1693af794c7526aab400b42e38075f986ef8fef78841e5ebc745473e56" dependencies = [ "alloy-consensus", "alloy-eips", @@ -164,9 +164,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f01b6d8e5b4f3222aaf7f18613a7292e2fbc9163fe120649cd1b078ca534349" +checksum = "3ef1b07c3ff5bf4fab5b8e6c46190cd40b2f2fd2cd72b5b02527a38125d0bff4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -260,9 +260,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6847d641141b92a1557094aa6c236cbe49c06fb24144d4a21fe6acb970c15888" +checksum = "707337efeb051ddbaece17a73eaec5150945a5a5541112f4146508248edc2e40" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -308,9 +308,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe3192fca2eb0b0c4b122b3c2d8254496b88a4e810558dddd3ea2f30ad9469df" +checksum = "64ba7afffa225272cf50c62ff04ac574adc7bfa73af2370db556340f26fcff5c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -349,9 +349,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4ab3330e491053e9608b2a315f147357bb8acb9377a988c1203f2e8e2b296c9" +checksum = "48562f9b4c4e1514cab54af16feaffc18194a38216bbd0c23004ec4667ad696b" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -364,9 +364,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e22ff194b1e34b4defd1e257e3fe4dce0eee37451c7757a1510d6b23e7379a" +checksum = "364a5eaa598437d7a57bcbcb4b7fcb0518e192cf809a19b09b2b5cf73b9ba1cd" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -390,9 +390,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a6cbb9f431bdad294eebb5af9b293d6979e633bfe5468d1e87c1421a858265" +checksum = "21af5255bd276e528ee625d97033884916e879a1c6edcd5b70a043bd440c0710" dependencies = [ "alloy-consensus", "alloy-eips", @@ -448,7 +448,7 @@ dependencies = [ "foldhash 0.2.0", "getrandom 0.3.4", "hashbrown 0.16.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "k256", "keccak-asm", @@ -466,9 +466,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5dde1abc3d582e53d139904fcdd8b2103f0bd03e8f2acb4292edbbaeaa7e6e" +checksum = "5cc919fe241f9dd28c4c7f7dcff9e66e550c280bafe3545e1019622e1239db38" dependencies = [ "alloy-chains", "alloy-consensus", @@ -511,9 +511,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbfe0a3c553a027f722185fb574124d205147fffb309cae52d0a2094f076887" +checksum = "23a0778833917a71a9e0065e0409bfc00cddef55ca962b3453472be38ebe7035" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -550,14 +550,14 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "alloy-rpc-client" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a94bdef2710322c6770be08689fee0878c2ad75615b8fc40e05d7f3c9618c0b" +checksum = "2b587e63d8c4af437b0a7830dc12d24cb495e956cc8ecbf93e96d62c9cb55b13" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -581,9 +581,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "811a573c8080e1b492d488e6a240ec5dd7677d7167e91ce9cb4d0ec1fcac8027" +checksum = "97b3000edc72a300048cf461df94bfa29fc5d7760ddd88ca7d56ea6fc8b28729" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -594,9 +594,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d2d901eeaf99067f54c97a98c8afddcb9f63e35af1efe0ce8d45d04f9223e50" +checksum = "ebb98103316e6f4a1ebc6e71328c2d18426cdd79fc999c44afd9f0f4e9f5edd6" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -606,9 +606,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "838ca94be532a929f27961851000ec8bbbaeb06e2a2bcca44fac7855a2fe0f6f" +checksum = "f1207e852f30297d6918f91df3e76f758fa7b519ea1e49fbd7d961ce796663f9" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -618,9 +618,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12df0b34551ca2eab8ec83b56cb709ee5da991737282180d354a659b907f00dc" +checksum = "6ebc96cf29095c10a183fb7106a097fe12ca8dd46733895582da255407f54b29" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -629,9 +629,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32598a2443750a2e884c1b48efccaeeaae75e7eb4e0f13df9146b78107b4c301" +checksum = "3cea7c1c22628b13b25d31fd63fa5dfa7fac0b0b78f1c89a5068102b653ff65c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49a3a168a5bf18f1cf7ed5723a650aebe714edf7665b53dacf5707716733d0" +checksum = "7e1a6b13b6f95b80d3ff770998f81e61811264eb1d18b88dfa11c80180acdc1b" dependencies = [ "alloy-primitives", "derive_more", @@ -661,9 +661,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe16cd1dea6089902ec609e04261a9ae6d11ec66005ba24c1f97f0eefbc0fa9" +checksum = "f35af673cc14e89813ab33671d79b6e73fe38788c5f3a8ec3a75476b58225f53" dependencies = [ "alloy-consensus", "alloy-eips", @@ -682,9 +682,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f9f130511b8632686dfe6f9909b38d7ae4c68de3ce17d28991400646a39b25" +checksum = "9cc3f354a5079480acca0a6533d1d3838177a03ea494ef0ae8d1679efea88274" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdeb95f21ba043cbbbc074f7af9c7bb22e2727de02dc3fe95d5ae963a96767a6" +checksum = "10fbd905c35f780926ff0c4c2a74d3ce7d50576cb0e9997dc783ac99c6fd7afb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -719,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cafe859944638c5d57d1a3a0034cdb5d07c98c37de8adce5508f28834acf958f" +checksum = "6d782d80221dfaa5a2f8a7bf277370bdec10e4e8119f5a60d2e2b1adb2e806ca" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -733,9 +733,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afaa06544e36f223b99b1415a12911230fd527994f020736c3c7950d5080208e" +checksum = "a3076c226bb4365f9c3ac0cd4082ba86208aaa1485cbf664383a90aba7c36b26" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -745,9 +745,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "067b718d2e6ac1bb889341fcc7a250cfa49bcd3ba4f23923f1c1eb1f2b10cb7c" +checksum = "a438ce4cd49ec4bc213868c1fe94f2fe103d4c3f22f6a42073db974f9c0962da" dependencies = [ "alloy-primitives", "arbitrary", @@ -757,9 +757,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acff6b251740ef473932386d3b71657d3825daebf2217fb41a7ef676229225d4" +checksum = "389372d6ae4d62b88c8dca8238e4f7d0a7727b66029eb8a5516a908a03161450" dependencies = [ "alloy-primitives", "async-trait", @@ -772,9 +772,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9129ef31975d987114c27c9930ee817cf3952355834d47f2fdf4596404507e8" +checksum = "69c260e78b9c104c444f8a202f283d5e8c6637e6fa52a83f649ad6aaa0b91fd0" dependencies = [ "alloy-consensus", "alloy-network", @@ -800,7 +800,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -812,11 +812,11 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.12.1", + "indexmap 2.13.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "syn-solidity", "tiny-keccak", ] @@ -833,7 +833,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "syn-solidity", ] @@ -861,9 +861,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec1fb08ee484e615f24867c0b154fff5722bb00176102a16868c6532b7c3623" +checksum = "f01c27edb3c0926919586a231d99e06284f9239da6044b5682033ef781e1cc62" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -884,9 +884,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64b722073c76f2de7e118d546ee1921c50710f97feb32aed50db94cfa5b663e1" +checksum = "2cc57657fd3249fc8324cbbc8edbb7d5114af5fbc7c6c32dff944d6b5922f400" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -899,9 +899,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdedcf401aab4b96d8b5e6638b79d04a6afb96c0bfcb50a2324fbadfe65c47b3" +checksum = "92a5a36d4ca1261a29dd1d791cd89c21b71d7465211910e43b0862d1c067a211" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -919,9 +919,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942210908f0c56941097f5653a5f334546940e6fd9073495b257e52216469feb" +checksum = "e81effa6a2db6b2152eefb244b4aa6334b1c42819d0eca8d5a91826ec7a9fdba" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -936,9 +936,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b77b56af09ead281337d06b1d036c88e2dc8a2e45da512a532476dbee94912b" +checksum = "428aa0f0e0658ff091f8f667c406e034b431cb10abd39de4f507520968acc499" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -956,14 +956,14 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04950a13cc4209d8e9b78f306e87782466bad8538c94324702d061ff03e211c9" +checksum = "99dac443033e83b14f68fac56e8c27e76421f1253729574197ceccd06598f3ef" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1048,7 +1048,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1190,7 +1190,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1228,7 +1228,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1317,7 +1317,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1434,7 +1434,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1445,7 +1445,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1483,7 +1483,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1625,7 +1625,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1643,7 +1643,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1755,7 +1755,7 @@ dependencies = [ "boa_interner", "boa_macros", "boa_string", - "indexmap 2.12.1", + "indexmap 2.13.0", "num-bigint", "rustc-hash", ] @@ -1787,7 +1787,7 @@ dependencies = [ "futures-lite 2.6.1", "hashbrown 0.16.1", "icu_normalizer", - "indexmap 2.12.1", + "indexmap 2.13.0", "intrusive-collections", "itertools 0.14.0", "num-bigint", @@ -1833,7 +1833,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.16.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "once_cell", "phf", "rustc-hash", @@ -1850,7 +1850,7 @@ dependencies = [ "cow-utils", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure", ] @@ -1906,7 +1906,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1995,7 +1995,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2236,7 +2236,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2798,7 +2798,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2855,7 +2855,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2870,7 +2870,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2883,7 +2883,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2894,7 +2894,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2905,7 +2905,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2916,7 +2916,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2969,7 +2969,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3028,7 +3028,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3039,7 +3039,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3060,7 +3060,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3070,7 +3070,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3092,7 +3092,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.113", + "syn 2.0.114", "unicode-xid", ] @@ -3224,7 +3224,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3242,6 +3242,12 @@ dependencies = [ "litrs", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "dunce" version = "1.0.5" @@ -3271,7 +3277,7 @@ checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3323,7 +3329,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3431,7 +3437,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3451,7 +3457,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3471,7 +3477,7 @@ checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3547,7 +3553,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4144,6 +4150,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" + [[package]] name = "fsevent-sys" version = "4.1.0" @@ -4271,7 +4283,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4488,9 +4500,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -4498,7 +4510,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -5018,7 +5030,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5059,9 +5071,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "arbitrary", "equivalent", @@ -5137,7 +5149,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5414,7 +5426,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5559,9 +5571,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.179" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a2d376baa530d1238d133232d15e239abad80d05838b4b59354e5268af431f" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libgit2-sys" @@ -5795,7 +5807,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5874,7 +5886,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5884,7 +5896,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3589659543c04c7dc5526ec858591015b87cd8746583b51b48ef4353f99dbcda" dependencies = [ "base64 0.22.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "metrics", "metrics-util", "quanta", @@ -5916,7 +5928,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.16.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "metrics", "ordered-float", "quanta", @@ -6005,6 +6017,32 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "mockall" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "modular-bitfield" version = "0.11.2" @@ -6276,7 +6314,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6290,9 +6328,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" +checksum = "7b5676b5c379cf5b03da1df2b3061c4a4e2aa691086a56ac923e08c143f53f59" dependencies = [ "alloy-rlp", "arbitrary", @@ -6477,16 +6515,27 @@ name = "op-reth" version = "1.9.3" dependencies = [ "clap", + "eyre", + "futures-util", + "humantime", + "reth-chainspec", "reth-cli-util", + "reth-db", + "reth-db-api", + "reth-node-builder", "reth-optimism-chainspec", "reth-optimism-cli", "reth-optimism-consensus", "reth-optimism-evm", + "reth-optimism-exex", "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-optimism-rpc", + "reth-optimism-trie", + "reth-tasks", + "tokio", "tracing", ] @@ -6663,7 +6712,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6778,7 +6827,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6807,7 +6856,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6931,6 +6980,32 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty_assertions" version = "1.4.1" @@ -6948,7 +7023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6999,14 +7074,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -7094,7 +7169,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7105,7 +7180,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7138,7 +7213,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7151,7 +7226,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7252,9 +7327,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -7515,7 +7590,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7963,7 +8038,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9687,6 +9762,7 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-node", "reth-optimism-primitives", + "reth-optimism-trie", "reth-primitives-traits", "reth-provider", "reth-prune", @@ -9762,6 +9838,31 @@ dependencies = [ "thiserror 2.0.17", ] +[[package]] +name = "reth-optimism-exex" +version = "1.9.3" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "eyre", + "futures", + "futures-util", + "reth-db", + "reth-execution-types", + "reth-exex", + "reth-node-api", + "reth-node-builder", + "reth-node-types", + "reth-optimism-chainspec", + "reth-optimism-node", + "reth-optimism-trie", + "reth-provider", + "reth-trie", + "tempfile", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-flashblocks" version = "1.9.3" @@ -9945,10 +10046,12 @@ dependencies = [ "alloy-json-rpc", "alloy-op-hardforks", "alloy-primitives", + "alloy-rlp", "alloy-rpc-client", "alloy-rpc-types-debug", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-serde", "alloy-transport", "alloy-transport-http", "async-trait", @@ -9966,6 +10069,7 @@ dependencies = [ "op-alloy-rpc-types-engine", "op-revm", "reqwest", + "reth-basic-payload-builder", "reth-chain-state", "reth-chainspec", "reth-evm", @@ -9978,8 +10082,12 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-optimism-trie", "reth-optimism-txpool", + "reth-payload-util", "reth-primitives-traits", + "reth-provider", + "reth-revm", "reth-rpc", "reth-rpc-api", "reth-rpc-engine-api", @@ -9990,7 +10098,9 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "revm", + "serde", "serde_json", + "strum 0.27.2", "thiserror 2.0.17", "tokio", "tokio-stream", @@ -10010,6 +10120,50 @@ dependencies = [ "reth-storage-api", ] +[[package]] +name = "reth-optimism-trie" +version = "1.9.3" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "auto_impl", + "bincode 1.3.3", + "bytes", + "derive_more", + "eyre", + "metrics", + "mockall", + "reth-chainspec", + "reth-codecs", + "reth-db", + "reth-db-api", + "reth-db-common", + "reth-ethereum-primitives", + "reth-evm", + "reth-evm-ethereum", + "reth-execution-errors", + "reth-metrics", + "reth-node-api", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-storage-errors", + "reth-tasks", + "reth-testing-utils", + "reth-trie", + "secp256k1 0.30.0", + "serde", + "serial_test", + "strum 0.27.2", + "tempfile", + "test-case", + "thiserror 2.0.17", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-txpool" version = "1.9.3" @@ -11647,7 +11801,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.113", + "syn 2.0.114", "unicode-ident", ] @@ -11774,9 +11928,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "log", "once_cell", @@ -11886,6 +12040,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.28" @@ -11942,6 +12105,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + [[package]] name = "sec1" version = "0.7.3" @@ -12112,16 +12281,16 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "serde_json" -version = "1.0.148" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "memchr", "serde", @@ -12171,7 +12340,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.13.0", "schemars 0.9.0", "schemars 1.2.0", "serde_core", @@ -12189,7 +12358,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12202,6 +12371,32 @@ dependencies = [ "serde", ] +[[package]] +name = "serial_test" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" +dependencies = [ + "futures-executor", + "futures-util", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "sha1" version = "0.10.6" @@ -12523,7 +12718,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12535,7 +12730,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12557,9 +12752,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.113" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678faa00651c9eb72dd2020cbdf275d92eccb2400d568e419efdd64838145cb4" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -12575,7 +12770,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12595,7 +12790,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12664,6 +12859,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "test-case" version = "3.3.1" @@ -12682,7 +12883,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12693,7 +12894,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "test-case-core", ] @@ -12733,7 +12934,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12781,7 +12982,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12792,7 +12993,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12948,7 +13149,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -12963,9 +13164,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -12992,9 +13193,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -13041,7 +13242,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -13055,7 +13256,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -13122,7 +13323,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -13208,7 +13409,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13363,9 +13564,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.18.3" +version = "0.18.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d722a05fe49b31fef971c4732a7d4aa6a18283d9ba46abddab35f484872947" +checksum = "a4f6fc3baeac5d86ab90c772e9e30620fc653bf1864295029921a15ef478e6a5" dependencies = [ "loom", "once_cell", @@ -13375,9 +13576,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb391ac70462b3097a755618fbf9c8f95ecc1eb379a414f7b46f202ed10db1f" +checksum = "c5f7c95348f20c1c913d72157b3c6dee6ea3e30b3d19502c5a7f6d3f160dacbf" dependencies = [ "cc", "windows-targets 0.52.6", @@ -13405,7 +13606,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13493,9 +13694,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -13568,14 +13769,15 @@ checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -13686,7 +13888,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -13789,7 +13991,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "wasm-bindgen-shared", ] @@ -13997,7 +14199,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14008,7 +14210,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14019,7 +14221,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14030,7 +14232,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14488,28 +14690,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14529,7 +14731,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure", ] @@ -14550,7 +14752,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -14584,14 +14786,14 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "zmij" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee2a72b10d087f75fb2e1c2c7343e308fe6970527c22a41caf8372e165ff5c1" +checksum = "2fc5a66a20078bf1251bde995aa2fdcc4b800c70b5d92dd2c62abc5c60f679f8" [[package]] name = "zstd" diff --git a/Cargo.toml b/Cargo.toml index 0210182cd53..d8e332c7138 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ members = [ "crates/optimism/cli", "crates/optimism/consensus", "crates/optimism/evm/", + "crates/optimism/exex/", "crates/optimism/flashblocks/", "crates/optimism/hardforks/", "crates/optimism/node/", @@ -85,6 +86,7 @@ members = [ "crates/optimism/reth/", "crates/optimism/rpc/", "crates/optimism/storage", + "crates/optimism/trie", "crates/optimism/txpool/", "crates/payload/basic/", "crates/payload/builder/", @@ -419,11 +421,13 @@ reth-op = { path = "crates/optimism/reth", default-features = false } reth-optimism-chainspec = { path = "crates/optimism/chainspec", default-features = false } reth-optimism-cli = { path = "crates/optimism/cli", default-features = false } reth-optimism-consensus = { path = "crates/optimism/consensus", default-features = false } +reth-optimism-exex = { path = "crates/optimism/exex" } reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives", default-features = false } reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-optimism-storage = { path = "crates/optimism/storage" } +reth-optimism-trie = { path = "crates/optimism/trie" } reth-optimism-txpool = { path = "crates/optimism/txpool" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-builder-primitives = { path = "crates/payload/builder-primitives" } @@ -677,6 +681,7 @@ similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.20" test-fuzz = "7" rstest = "0.24.0" +serial_test = "3.2.0" test-case = "3" # ssz encoding @@ -741,6 +746,7 @@ vergen = "9.0.4" visibility = "0.1.1" walkdir = "2.3.3" vergen-git2 = "1.0.5" +mockall = "0.13.1" # networking ipnet = "2.11" diff --git a/DockerfileOp b/DockerfileOp index ba6e6627fda..802800a497d 100644 --- a/DockerfileOp +++ b/DockerfileOp @@ -4,43 +4,77 @@ WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" -RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config +RUN apt-get update && apt-get -y upgrade && \ + apt-get install -y libclang-dev pkg-config llvm llvm-dev clang -# Builds a cargo-chef plan +# ============================================================ +# Stage 1: cargo-chef planner +# ============================================================ FROM chef AS planner COPY . . RUN cargo chef prepare --recipe-path recipe.json +# ============================================================ +# Stage 2: cargo-chef builder +# ============================================================ FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json ARG BUILD_PROFILE=maxperf ENV BUILD_PROFILE=$BUILD_PROFILE +ARG FEATURES="" +ENV FEATURES=$FEATURES + ARG RUSTFLAGS="" ENV RUSTFLAGS="$RUSTFLAGS" -ARG FEATURES="" -ENV FEATURES=$FEATURES +ARG CARGO_INCREMENTAL="" +ENV CARGO_INCREMENTAL="$CARGO_INCREMENTAL" -RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json --manifest-path /app/crates/optimism/bin/Cargo.toml +ARG LLVM_PROFILE_FILE="" +ENV LLVM_PROFILE_FILE="$LLVM_PROFILE_FILE" +# Build deps with cargo-chef +RUN cargo chef cook \ + --profile $BUILD_PROFILE \ + --features "$FEATURES" \ + --recipe-path recipe.json \ + --manifest-path /app/crates/optimism/bin/Cargo.toml + +# Build op-reth COPY . . -RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth --manifest-path /app/crates/optimism/bin/Cargo.toml +RUN cargo build \ + --profile $BUILD_PROFILE \ + --features "$FEATURES" \ + --bin op-reth \ + --manifest-path /app/crates/optimism/bin/Cargo.toml +# Copy resulting binary RUN ls -la /app/target/$BUILD_PROFILE/op-reth RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth +# ============================================================ +# Stage 3: runtime container +# ============================================================ FROM ubuntu AS runtime RUN apt-get update && \ - apt-get install -y ca-certificates libssl-dev pkg-config strace && \ + apt-get install -y ca-certificates libssl-dev pkg-config strace llvm && \ rm -rf /var/lib/apt/lists/* WORKDIR /app + +# Copy op-reth binary COPY --from=builder /app/op-reth /usr/local/bin/ RUN chmod +x /usr/local/bin/op-reth + COPY LICENSE-* ./ +# Coverage output directory +ENV LLVM_PROFILE_FILE="/coverage/%m-%p.profraw" +RUN mkdir -p /coverage + EXPOSE 30303 30303/udp 9001 8545 8546 7545 8551 -ENTRYPOINT ["/usr/local/bin/op-reth"] + +ENTRYPOINT ["/usr/local/bin/op-reth"] \ No newline at end of file diff --git a/DockerfileOpProof b/DockerfileOpProof new file mode 100644 index 00000000000..0fedaff5ab7 --- /dev/null +++ b/DockerfileOpProof @@ -0,0 +1,48 @@ +FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef +WORKDIR /app + +LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" + +RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config + +# Builds a cargo-chef plan +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json + +ARG BUILD_PROFILE=release +ENV BUILD_PROFILE=$BUILD_PROFILE + +ARG RUSTFLAGS="" +ENV RUSTFLAGS="$RUSTFLAGS" + +ARG FEATURES="" +ENV FEATURES=$FEATURES + +RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json --manifest-path /app/crates/optimism/bin/Cargo.toml + +COPY . . +RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth --manifest-path /app/crates/optimism/bin/Cargo.toml + +RUN ls -la /app/target/$BUILD_PROFILE/op-reth +RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth + +FROM ubuntu AS runtime + +RUN apt-get update && \ + apt-get install -y ca-certificates libssl-dev pkg-config strace && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app +COPY --from=builder /app/op-reth /usr/local/bin/ +RUN chmod +x /usr/local/bin/op-reth +COPY crates/optimism/tests/scripts/op-reth-entrypoint.sh /usr/local/bin/op-reth-entrypoint.sh +RUN chmod +x /usr/local/bin/op-reth-entrypoint.sh +COPY LICENSE-* ./ + +EXPOSE 30303 30303/udp 9001 8545 8546 7545 8551 +ENTRYPOINT ["/usr/local/bin/op-reth-entrypoint.sh"] diff --git a/README.md b/README.md index caa9d136cb5..8ba5e9c3107 100644 --- a/README.md +++ b/README.md @@ -1,149 +1,177 @@ -# reth +# op-reth historical proofs +[![codecov](https://codecov.io/gh/op-rs/op-reth/branch/main/graph/badge.svg)](https://app.codecov.io/gh/op-rs/op-reth/tree/unstable/crates%2Foptimism?components%5B0%5D=op%20historical%20proof) -[![bench status](https://github.com/paradigmxyz/reth/actions/workflows/bench.yml/badge.svg)](https://github.com/paradigmxyz/reth/actions/workflows/bench.yml) -[![CI status](https://github.com/paradigmxyz/reth/workflows/unit/badge.svg)][gh-ci] -[![cargo-lint status](https://github.com/paradigmxyz/reth/actions/workflows/lint.yml/badge.svg)][gh-lint] -[![Telegram Chat][tg-badge]][tg-url] - -**Modular, contributor-friendly and blazing-fast implementation of the Ethereum protocol** - -![](./assets/reth-prod.png) - -**[Install](https://paradigmxyz.github.io/reth/installation/installation.html)** -| [User Docs](https://reth.rs) -| [Developer Docs](./docs) -| [Crate Docs](https://reth.rs/docs) - -[gh-ci]: https://github.com/paradigmxyz/reth/actions/workflows/unit.yml -[gh-lint]: https://github.com/paradigmxyz/reth/actions/workflows/lint.yml -[tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth - -## What is Reth? +![Description](assets/op-rs-logo.png) +> **⚠️ Under Construction** +> +> This is a work in progress. Stay tuned! -Reth (short for Rust Ethereum, [pronunciation](https://x.com/kelvinfichter/status/1597653609411268608)) is a new Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient. Reth is an Execution Layer (EL) and is compatible with all Ethereum Consensus Layer (CL) implementations that support the [Engine API](https://github.com/ethereum/execution-apis/tree/a0d03086564ab1838b462befbc083f873dcf0c0f/src/engine). It is originally built and driven forward by [Paradigm](https://paradigm.xyz/), and is licensed under the Apache and MIT licenses. +## Motivation -## Goals +Reliable access to recent historical state via `eth_getProof` is a critical requirement for rollups and L2 infrastructure built on Ethereum. -As a full Ethereum node, Reth allows users to connect to the Ethereum network and interact with the Ethereum blockchain. This includes sending and receiving transactions/logs/traces, as well as accessing and interacting with smart contracts. Building a successful Ethereum node requires creating a high-quality implementation that is both secure and efficient, as well as being easy to use on consumer hardware. It also requires building a strong community of contributors who can help support and improve the software. +As described in Reth issue [#18070](https://github.com/paradigmxyz/reth/issues/18070), many applications on Optimism and other rollups (e.g. Base infrastructure, ENS, fault-proof systems) depend on fast and reliable `eth_getProof` queries within a bounded challenge window (typically 7 days). At present, the lack of reliable recent-state proof support is a blocker for broader Reth adoption in these environments. -More concretely, our goals are: +The core issue lies in Reth's architecture for historical state calculation. To serve `eth_getProof` for a historical block, Reth must perform an **in-memory revert**, applying state diffs backwards from the chain tip. While efficient for recent blocks, reverting state for a block 7 days ago requires loading thousands of changesets into Memory. This operation is computationally expensive and often causes the node to crash due to **Out-Of-Memory (OOM)** errors, effectively making deep historical proofs impossible on a standard node. -1. **Modularity**: Every component of Reth is built to be used as a library: well-tested, heavily documented and benchmarked. We envision that developers will import the node's crates, mix and match, and innovate on top of them. Examples of such usage include but are not limited to spinning up standalone P2P networks, talking directly to a node's database, or "unbundling" the node into the components you need. To achieve that, we are licensing Reth under the Apache/MIT permissive license. You can learn more about the project's components [here](./docs/repo/layout.md). -2. **Performance**: Reth aims to be fast, so we use Rust and the [Erigon staged-sync](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) node architecture. We also use our Ethereum libraries (including [Alloy](https://github.com/alloy-rs/alloy/) and [revm](https://github.com/bluealloy/revm/)) which we've battle-tested and optimized via [Foundry](https://github.com/foundry-rs/foundry/). -3. **Free for anyone to use any way they want**: Reth is free open source software, built for the community, by the community. By licensing the software under the Apache/MIT license, we want developers to use it without being bound by business licenses, or having to think about the implications of GPL-like licenses. -4. **Client Diversity**: The Ethereum protocol becomes more antifragile when no node implementation dominates. This ensures that if there's a software bug, the network does not finalize a bad block. By building a new client, we hope to contribute to Ethereum's antifragility. -5. **Support as many EVM chains as possible**: We aspire that Reth can full-sync not only Ethereum, but also other chains like Optimism, Polygon, BNB Smart Chain, and more. If you're working on any of these projects, please reach out. -6. **Configurability**: We want to solve for node operators that care about fast historical queries, but also for hobbyists who cannot operate on large hardware. We also want to support teams and individuals who want both sync from genesis and via "fast sync". We envision that Reth will be configurable enough and provide configurable "profiles" for the tradeoffs that each team faces. +While solutions like Erigon’s compressed archive format demonstrate that full historical proofs can be stored efficiently (~5 TB), most real-world use cases do not require access to *all* historical state. Instead, the overwhelming majority of applications only require proofs over a **recent, bounded time window** (e.g. the last 7 days for challenge games). -## Status +This fork introduces a **Bounded History Sidecar** architecture for historical state proofs. The goal is to provide: +- **Crash-Free Proof Generation:** Serve `eth_getProof` for deep historical blocks without the OOM risks associated with in-memory reverts. +- **Constant Storage Footprint:** Maintain a fixed storage size (linear to the configured window) rather than the unbounded growth. +- **Zero-Overhead Sync:** Utilize Reth's Execution Extensions (ExEx) to process and index history asynchronously, ensuring the main node's sync speed and tip latency are unaffected. -Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. +## Architecture: Bounded History Sidecar -More historical context below: +This module implements a **Sidecar Storage Pattern**. Instead of burdening the main node's database with historical data, we maintain a dedicated, secondary MDBX environment optimized specifically for serving proofs. -- We released 1.0 "production-ready" stable Reth in June 2024. - - Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./audit/sigma_prime_audit_v2.pdf). - - Revm (the EVM used in Reth) underwent an audit with [Guido Vranken](https://x.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. -- We released multiple iterative beta versions, up to [beta.9](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.9) on Monday June 3, 2024, the last beta release. -- We released [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4, 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. -- We shipped iterative improvements until the last alpha release on February 28, 2024, [0.1.0-alpha.21](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.21). -- We [initially announced](https://www.paradigm.xyz/2023/06/reth-alpha) [0.1.0-alpha.1](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.1) on June 20, 2023. +### Core Mechanism: Versioned State +Unlike standard Reth (which stores the *current* state and calculates history by reverting diffs), this module implements a **Versioned State Store**. -### Database compatibility +1. **`AccountTrieHistory` & `StorageTrieHistory`**: Stores the intermediate branch nodes of the Merkle Patricia Trie. Each node is versioned by block number, allowing us to traverse the exact trie structure as it existed at any past block. +2. **`HashedAccountHistory` & `HashedStorageHistory`**: Stores the actual account data (nonce, balance) and storage slot values at the leaves of the trie, also versioned by block number. -We do not have any breaking database changes since beta.1, and we do not plan any in the near future. +### Initialization: State Snapshot +To ensure the service is easy to set up on existing nodes with millions of blocks, we do not require a full chain re-sync. Instead, the module requires an **Initial State Snapshot** via the CLI: -Reth [v0.2.0-beta.1](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) includes -a [set of breaking database changes](https://github.com/paradigmxyz/reth/pull/5191) that makes it impossible to use database files produced by earlier versions. +1. **Capture:** The CLI command captures the *current* state of the blockchain (Account and Storage Tries) from the main database. +2. **Seed:** It populates the sidecar with this baseline state. +3. **Track:** Once initialized, the node begins tracking new blocks and maintaining history from that point forward. -If you had a database produced by alpha versions of Reth, you need to drop it with `reth db drop` -(using the same arguments such as `--config` or `--datadir` that you passed to `reth node`), and resync using the same `reth node` command you've used before. +This ensures that the proof window has a valid starting point immediately. -## For Users +### Data Flow -See the [Reth documentation](https://reth.rs/) for instructions on how to install and run Reth. +1. **Initialization:** The operator runs the initialization CLI command to snapshot the current main DB state and seed the sidecar. +2. **Ingestion (Write):** As the node syncs, the Execution Extension (`ExEx`) captures the TrieUpdates (branch nodes) and HashedPostState (leaf values) in each block and writes them to the sidecar DB tagged with the block number. +3. **Retrieval (Read):** When `eth_getProof` is called for a historical block, we simply look up the trie nodes valid at that specific block version. +4. **Maintenance (Prune):** A background process monitors the chain tip. Once a block falls outside the configured window (e.g., > 7 days old), its specific history versions are deleted to reclaim space. -## For Developers +## New Components -### Using reth as a library +### 1. `reth-optimism-exex` +This crate implements the Execution Extension (ExEx) that acts as the bridge between the main node and the sidecar storage. -You can use individual crates of reth in your project. +- Ingestion Pipeline: Subscribes to the node's canonical state notifications to capture ExecutionOutcomes in real-time. +- Diff Extraction: Isolates the specific TrieUpdates (branch nodes) and HashedPostState (leaf values) changed in each block. +- Persistence: Writes these versioned updates to the sidecar MDBX database without blocking the main datastore. +- Lifecycle Management: Orchestrates the pruning process, ensuring the sidecar storage remains bounded by the configured window. -The crate docs can be found [here](https://reth.rs/docs/). +### 2. `reth-optimism-trie` +This crate provides the Storage Engine and Proof Logic that powers the sidecar. -For a general overview of the crates, see [Project Layout](./docs/repo/layout.md). +- Versioned Storage: Implements MdbxProofsStorage, a specialized database schema optimized for time-series trie node retrieval. +- Proof Generation: Replaces the standard "revert-based" proof logic with a direct "lookup-based" approach. +- Pruning Logic: Implements the smart retention algorithm that safely deletes old history -### Contributing +### 3. RPC Overrides +The module injects custom handlers to intercept specific RPC calls: +* **`eth_getProof`**: Checks if the requested block is historical. If so, it fetches the account and storage proofs from the secondary Proofs DB. +* **`debug_executionWitness`**: Allows debugging and tracing against historical states. +* **`debug_executePayload`**: Executes a payload against the historical state to generate an execution witness. -If you want to contribute, or follow along with contributor discussion, you can use our [main telegram](https://t.me/paradigm_reth) to chat with us about the development of Reth! +## Hardware Requirements -- Our contributor guidelines can be found in [`CONTRIBUTING.md`](./CONTRIBUTING.md). -- See our [contributor docs](./docs) for more information on the project. A good starting point is [Project Layout](./docs/repo/layout.md). +Recommended specifications: -### Building and testing +- **CPU**: 8-Core processor with good single-core performance +- **RAM**: Minimum 16 GB (32 GB recommended) +- **Storage**: NVMe SSD with adequate capacity for chain data plus snapshots + - Calculate: `(2 × current_chain_size) + snapshot_size + 20% buffer` + - *Note*: Storing 4 weeks of full proof history on a network like Base Testnet consumes approximately **~1 TB** of additional storage. +- **Network**: Stable internet connection with good bandwidth - +## Usage -The Minimum Supported Rust Version (MSRV) of this project is [1.88.0](https://blog.rust-lang.org/2025/06/26/Rust-1.88.0/). +### 1. Initialization +Before starting the node with the sidecar enabled, you must initialize the proof storage. This command snapshots the current state of the main database to seed the sidecar. -See the docs for detailed instructions on how to [build from source](https://reth.rs/installation/source/). +```bash +op-reth initialize-op-proofs \ + --datadir=path/to/reth-datadir \ + --proofs-history.storage-path=/path/to/proof-db +``` -To fully test Reth, you will need to have [Geth installed](https://geth.ethereum.org/docs/getting-started/installing-geth), but it is possible to run a subset of tests without Geth. +### 2. Running the Node (Syncing) -First, clone the repository: +Once initialized, start the node with the --proofs-history flags to enable the sidecar service. -```sh -git clone https://github.com/paradigmxyz/reth -cd reth +```bash +op-reth node \ + --chain base-sepolia \ + --datadir=/path/to/reth-datadir \ + --proofs-history \ + --proofs-history.storage-path=/path/to/proofs-db \ + --proofs-history.window=600000 \ + --proofs-history.prune-interval=15s ``` -Next, run the tests: +Configuration Flags -```sh -cargo nextest run --workspace +| Flag | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| `--proofs-history` | Enables the historical proofs module. | `false` | No | +| `--proofs-history.storage-path` | Path to the separate MDBX database for storing proofs. | `None` | **Yes** (if enabled) | +| `--proofs-history.window` | Retention period in **blocks**. Data older than `Tip - Window` is pruned. | `1,296,000` (~30 days) | No | +| `--proofs-history.prune-interval` | How frequently the pruner runs to delete old data. | `1h` | No | -# Run the Ethereum Foundation tests -make ef-tests -``` +### 3. Management -We highly recommend using [`cargo nextest`](https://nexte.st/) to speed up testing. -Using `cargo test` to run tests may work fine, but this is not tested and does not support more advanced features like retries for spurious failures. +We provide custom CLI commands to manage the proof history manually. -> **Note** -> -> Some tests use random number generators to generate test data. If you want to use a deterministic seed, you can set the `SEED` environment variable. +`prune-op-proofs` +Manually triggers the pruning process. Useful for reclaiming space immediately. -## Getting Help +```bash +op-reth prune-op-proofs \ + --datadir=/path/to/reth-datadir \ + --proofs-history.storage-path=/path/to/proof-db \ + --proofs-history.window=600000 \ + --proofs-history.prune-batch-size=10000 +``` -If you have any questions, first see if the answer to your question can be found in the [docs][book]. +`unwind-op-proofs` +Manually unwinds the proof history to a specific block. Useful for recovering from corrupted states. -If the answer is not there: +```bash +op-reth unwind-op-proofs \ + --datadir=/path/to/reth-datadir \ + --proofs-history.storage-path=/path/to/proofs-db \ + --target=90 +``` -- Join the [Telegram][tg-url] to get help, or -- Open a [discussion](https://github.com/paradigmxyz/reth/discussions/new) with your question, or -- Open an issue with [the bug](https://github.com/paradigmxyz/reth/issues/new?assignees=&labels=C-bug%2CS-needs-triage&projects=&template=bug.yml) +### 4. Metrics +A comprehensive Grafana dashboard is available at `etc/grafana/dashboards/op-proof-history.json` to monitor: +- Syncing speed +- Sidecar storage size. +- Pruning performance. +- Proof generation latency. -## Security +Sample metric snapshot available at: https://snapshots.raintank.io/dashboard/snapshot/bzYXscOCugsxO6C2bzFB1XbskxG0KFdo -See [`SECURITY.md`](./SECURITY.md). +## Performance -## Acknowledgements +We benchmarked the sidecar on Base Sepolia to validate latency and throughput under load. -Reth is a new implementation of the Ethereum protocol. In the process of developing the node we investigated the design decisions other nodes have made to understand what is done well, what is not, and where we can improve the status quo. +Metric | Result +-- | -- +Avg Latency | 15 ms +Throughput | ~5,000 req/sec -None of this would have been possible without them, so big shoutout to the teams below: +Benchmark Configuration +- Network: Base Sepolia (Local Node) +- Target: WETH Contract (0x420...0006) +- Range: ~700k blocks (34,011,476 to 34,704,213) +- Load: 10 concurrent workers, 100 requests per block iteration. -- [Geth](https://github.com/ethereum/go-ethereum/): We would like to express our heartfelt gratitude to the go-ethereum team for their outstanding contributions to Ethereum over the years. Their tireless efforts and dedication have helped to shape the Ethereum ecosystem and make it the vibrant and innovative community it is today. Thank you for your hard work and commitment to the project. -- [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes. -- [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80). Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages. +The test script iterates through the block range, spawning 10 concurrent workers. Each worker selects an address round-robin from a pre-defined set, dynamically calculates the storage slot for balanceOf[address], and sends an eth_getProof request. -## Warning +Visual Proof: +- [Grafana Snapshot: Proof Metrics](https://snapshots.raintank.io/dashboard/snapshot/bzYXscOCugsxO6C2bzFB1XbskxG0KFdo) +- [Grafana Snapshot: Reth Metrics](https://snapshots.raintank.io/dashboard/snapshot/hxZaChzsrez3Q3w52IHj0Wab3H1wndUg) -The `NippyJar` and `Compact` encoding formats and their implementations are designed for storing and retrieving data internally. They are not hardened to safely read potentially malicious data. +## Limitations -[book]: https://reth.rs/ -[tg-url]: https://t.me/paradigm_reth +- **High Storage Footprint**: The versioned state model trades storage space for instant computation. Storing versioned Merkle Trie nodes (hashes and branch paths) for every block modification is significantly more storage-intensive than the flat state diffs used by the main node. +- **Forward-Only Availability**: The sidecar implements a "record-forward" strategy. It cannot generate proofs for blocks prior to the sidecar's initialization; it does not backfill history. +- **Pruning & IOPS**: Pruning old history is a random-write intensive operation. High-performance NVMe storage is required to ensure the pruner can keep pace with the chain's growth on high-throughput networks. \ No newline at end of file diff --git a/assets/op-rs-logo.png b/assets/op-rs-logo.png new file mode 100644 index 00000000000..62c8a721dd6 Binary files /dev/null and b/assets/op-rs-logo.png differ diff --git a/assets/reth-alpha.png b/assets/reth-alpha.png deleted file mode 100644 index d29f95e59e2..00000000000 Binary files a/assets/reth-alpha.png and /dev/null differ diff --git a/assets/reth-beta.png b/assets/reth-beta.png deleted file mode 100644 index 61b6c24460a..00000000000 Binary files a/assets/reth-beta.png and /dev/null differ diff --git a/assets/reth-docs.png b/assets/reth-docs.png deleted file mode 100644 index c49ce28364a..00000000000 Binary files a/assets/reth-docs.png and /dev/null differ diff --git a/assets/reth-prod.png b/assets/reth-prod.png deleted file mode 100644 index d06c4579ccf..00000000000 Binary files a/assets/reth-prod.png and /dev/null differ diff --git a/assets/reth.jpg b/assets/reth.jpg deleted file mode 100644 index e8fe2b90141..00000000000 Binary files a/assets/reth.jpg and /dev/null differ diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000000..0d66d8590ef --- /dev/null +++ b/codecov.yml @@ -0,0 +1,28 @@ +coverage: + status: + patch: off + project: + default: + threshold: null + informational: true +github_checks: + annotations: false +comment: + layout: "reach, files, flags, components" + require_changes: true +component_management: + individual_components: + - component_id: reth_binary + name: reth binary + paths: + - bin/** + - crates/config/** + - crates/metrics/** + - crates/tracing/** + - component_id: op-historical-proof + name: op historical proof + paths: + - crates/optimism/exex/** + - crates/optimism/rpc/src/debug.rs + - crates/optimism/rpc/src/eth/proofs.rs + - crates/optimism/trie/** \ No newline at end of file diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 1592cf78e05..4d2af35a441 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -185,7 +185,7 @@ impl Chain { /// 2. The execution outcome representing the final state. /// 3. The trie updates map. /// 4. The hashed state map. - #[allow(clippy::type_complexity)] + #[expect(clippy::type_complexity)] pub fn into_inner( self, ) -> ( @@ -488,6 +488,10 @@ pub(super) mod serde_bincode_compat { serde_bincode_compat::{RecoveredBlock, SerdeBincodeCompat}, Block, NodePrimitives, }; + use reth_trie_common::serde_bincode_compat::{ + hashed_state::HashedPostStateSorted, + updates::{TrieUpdates, TrieUpdatesSorted}, + }; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -517,18 +521,11 @@ pub(super) mod serde_bincode_compat { blocks: RecoveredBlocks<'a, N::Block>, execution_outcome: serde_bincode_compat::ExecutionOutcome<'a, N::Receipt>, #[serde(default, rename = "trie_updates_legacy")] - _trie_updates_legacy: - Option>, + _trie_updates_legacy: Option>, #[serde(default)] - trie_updates: BTreeMap< - BlockNumber, - reth_trie_common::serde_bincode_compat::updates::TrieUpdatesSorted<'a>, - >, + trie_updates: BTreeMap>, #[serde(default)] - hashed_state: BTreeMap< - BlockNumber, - reth_trie_common::serde_bincode_compat::hashed_state::HashedPostStateSorted<'a>, - >, + hashed_state: BTreeMap>, } #[derive(Debug)] diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 57b180eb30b..a8569754261 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -11,7 +11,7 @@ use alloy_primitives::BlockNumber; use reth_ethereum_primitives::Receipt; use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, Executor}; use reth_node_api::{Block as _, BlockBody as _, NodePrimitives}; -use reth_primitives_traits::{format_gas_throughput, RecoveredBlock, SignedTransaction}; +use reth_primitives_traits::{format_gas_throughput, ReceiptTy, RecoveredBlock, SignedTransaction}; use reth_provider::{ BlockReader, Chain, ExecutionOutcome, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, @@ -173,7 +173,7 @@ where { type Item = BackfillJobResult<( RecoveredBlock, - BlockExecutionOutput<::Receipt>, + BlockExecutionOutput>, )>; fn next(&mut self) -> Option { @@ -201,10 +201,8 @@ where pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<( - RecoveredBlock, - BlockExecutionOutput<::Receipt>, - )> { + ) -> BackfillJobResult<(RecoveredBlock, BlockExecutionOutput>)> + { // Fetch the block with senders for execution. let block_with_senders = self .provider diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index ecaa339ab8e..c959dfa8678 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -19,9 +19,20 @@ reth-optimism-evm.workspace = true reth-optimism-payload-builder.workspace = true reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true +reth-optimism-exex = { workspace = true, features = ["metrics"] } +reth-optimism-trie = { workspace = true, features = ["metrics"] } +reth-node-builder.workspace = true +reth-db-api.workspace = true +reth-chainspec.workspace = true +reth-db.workspace = true +reth-tasks.workspace = true clap = { workspace = true, features = ["derive", "env"] } tracing.workspace = true +eyre.workspace = true +futures-util.workspace = true +tokio.workspace = true +humantime.workspace = true [lints] workspace = true @@ -33,6 +44,7 @@ otlp = ["reth-optimism-cli/otlp"] js-tracer = [ "reth-optimism-node/js-tracer", + "reth-node-builder/js-tracer", ] jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 48452326d8b..ad574c96964 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -1,8 +1,23 @@ #![allow(missing_docs, rustdoc::missing_crate_level_docs)] -use clap::Parser; +use clap::{builder::ArgPredicate, Parser}; +use eyre::ErrReport; +use futures_util::FutureExt; +use reth_db::DatabaseEnv; +use reth_db_api::database_metrics::DatabaseMetrics; +use reth_node_builder::{FullNodeComponents, NodeBuilder, WithLaunchContext}; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; +use reth_optimism_exex::OpProofsExEx; use reth_optimism_node::{args::RollupArgs, OpNode}; +use reth_optimism_rpc::{ + debug::{DebugApiExt, DebugApiOverrideServer}, + eth::proofs::{EthApiExt, EthApiOverrideServer}, +}; +use reth_optimism_trie::{db::MdbxProofsStorage, OpProofsStorage}; +use reth_tasks::TaskExecutor; +use std::{path::PathBuf, sync::Arc, time::Duration}; +use tokio::time::sleep; use tracing::info; #[global_allocator] @@ -12,6 +27,165 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::ne #[unsafe(export_name = "_rjem_malloc_conf")] static MALLOC_CONF: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0"; +#[derive(Debug, Clone, PartialEq, Eq, clap::Args)] +#[command(next_help_heading = "Proofs History")] +struct Args { + #[command(flatten)] + pub rollup_args: RollupArgs, + + /// If true, initialize external-proofs exex to save and serve trie nodes to provide proofs + /// faster. + #[arg( + long = "proofs-history", + value_name = "PROOFS_HISTORY", + default_value_ifs([ + ("proofs-history.storage-path", ArgPredicate::IsPresent, "true") + ]) + )] + pub proofs_history: bool, + + /// The path to the storage DB for proofs history. + #[arg(long = "proofs-history.storage-path", value_name = "PROOFS_HISTORY_STORAGE_PATH")] + pub proofs_history_storage_path: Option, + + /// The window to span blocks for proofs history. Value is the number of blocks. + /// Default is 1 month of blocks based on 2 seconds block time. + /// 30 * 24 * 60 * 60 / 2 = `1_296_000` + #[arg( + long = "proofs-history.window", + default_value_t = 1_296_000, + value_name = "PROOFS_HISTORY_WINDOW" + )] + pub proofs_history_window: u64, + + /// Interval between proof-storage prune runs. Accepts human-friendly durations + /// like "100s", "5m", "1h". Defaults to 15s. + /// + /// - Shorter intervals prune smaller batches more often, so each prune run tends to be faster + /// and the blocking pause for writes is shorter, at the cost of more frequent pauses. + /// - Longer intervals prune larger batches less often, which reduces how often pruning runs, + /// but each run can take longer and block writes for longer. + /// + /// A shorter interval is preferred so that prune + /// runs stay small and don’t stall writes for too long. + /// + /// CLI: `--proofs-history.prune-interval 10m` + #[arg( + long = "proofs-history.prune-interval", + value_name = "PROOFS_HISTORY_PRUNE_INTERVAL", + default_value = "15s", + value_parser = humantime::parse_duration + )] + pub proofs_history_prune_interval: Duration, + /// Verification interval: perform full block execution every N blocks for data integrity. + /// - 0: Disabled (Default) (always use fast path with pre-computed data from notifications) + /// - 1: Always verify (always execute blocks, slowest) + /// - N: Verify every Nth block (e.g., 100 = every 100 blocks) + /// + /// Periodic verification helps catch data corruption or consensus bugs while maintaining + /// good performance. + /// + /// CLI: `--proofs-history.verification-interval 100` + #[arg( + long = "proofs-history.verification-interval", + value_name = "PROOFS_HISTORY_VERIFICATION_INTERVAL", + default_value_t = 0 + )] + pub proofs_history_verification_interval: u64, +} + +/// Single entry that handles: +/// - no proofs history (plain node), +/// - in-mem proofs storage, +/// - MDBX proofs storage. +async fn launch_node( + builder: WithLaunchContext, OpChainSpec>>, + args: Args, +) -> eyre::Result<(), ErrReport> { + let proofs_history_enabled = args.proofs_history; + let rollup_args = args.rollup_args.clone(); + let proofs_history_window = args.proofs_history_window; + let proofs_history_prune_interval = args.proofs_history_prune_interval; + let proofs_history_verification_interval = args.proofs_history_verification_interval; + + // Start from a plain OpNode builder + let mut node_builder = builder.node(OpNode::new(rollup_args)); + + if proofs_history_enabled { + let path = args + .proofs_history_storage_path + .clone() + .expect("Path must be provided if not using in-memory storage"); + info!(target: "reth::cli", "Using on-disk storage for proofs history"); + + let mdbx = Arc::new( + MdbxProofsStorage::new(&path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ); + let storage: OpProofsStorage<_> = mdbx.clone().into(); + + let storage_exec = storage.clone(); + + node_builder = node_builder + .on_node_started(move |node| { + spawn_proofs_db_metrics( + node.task_executor, + mdbx, + node.config.metrics.push_gateway_interval, + ); + Ok(()) + }) + .install_exex("proofs-history", async move |exex_context| { + Ok(OpProofsExEx::new( + exex_context, + storage_exec, + proofs_history_window, + proofs_history_prune_interval, + proofs_history_verification_interval, + ) + .run() + .boxed()) + }) + .extend_rpc_modules(move |ctx| { + let api_ext = EthApiExt::new(ctx.registry.eth_api().clone(), storage.clone()); + let debug_ext = DebugApiExt::new( + ctx.node().provider().clone(), + ctx.registry.eth_api().clone(), + storage, + Box::new(ctx.node().task_executor().clone()), + ctx.node().evm_config().clone(), + ); + ctx.modules.replace_configured(api_ext.into_rpc())?; + ctx.modules.replace_configured(debug_ext.into_rpc())?; + Ok(()) + }); + } + + // In all cases (with or without proofs), launch the node. + let handle = node_builder.launch_with_debug_capabilities().await?; + handle.node_exit_future.await +} + +/// Spawns a task that periodically reports metrics for the proofs DB. +fn spawn_proofs_db_metrics( + executor: TaskExecutor, + storage: Arc, + metrics_report_interval: Duration, +) { + executor.spawn_critical("op-proofs-storage-metrics", async move { + info!( + target: "reth::cli", + ?metrics_report_interval, + "Starting op-proofs-storage metrics task" + ); + + loop { + sleep(metrics_report_interval).await; + storage.report_metrics(); + } + }); +} + fn main() { reth_cli_util::sigsegv_handler::install(); @@ -22,14 +196,11 @@ fn main() { } } - if let Err(err) = - Cli::::parse().run(async move |builder, rollup_args| { - info!(target: "reth::cli", "Launching node"); - let handle = - builder.node(OpNode::new(rollup_args)).launch_with_debug_capabilities().await?; - handle.node_exit_future.await - }) - { + if let Err(err) = Cli::::parse().run(async move |builder, args| { + info!(target: "reth::cli", "Launching node"); + launch_node(builder, args.clone()).await?; + Ok(()) + }) { eprintln!("Error: {err:?}"); std::process::exit(1); } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 1c56c07b94f..df90b5a0d8c 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -37,6 +37,7 @@ reth-node-metrics.workspace = true reth-optimism-primitives.workspace = true reth-optimism-chainspec = { workspace = true, features = ["superchain-configs"] } reth-optimism-consensus.workspace = true +reth-optimism-trie.workspace = true reth-chainspec.workspace = true reth-node-events.workspace = true @@ -101,10 +102,7 @@ jemalloc-symbols = [ tracy = ["reth-tracing/tracy", "reth-node-core/tracy"] -dev = [ - "dep:proptest", - "reth-cli-commands/arbitrary", -] +dev = ["dep:proptest", "reth-cli-commands/arbitrary"] serde = [ "alloy-consensus/serde", diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 8785338e5ec..683a40f3a33 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -112,6 +112,9 @@ where Commands::ReExecute(command) => { runner.run_until_ctrl_c(command.execute::(components)) } + Commands::OpProofs(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } } } diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index 5edd55b0ccb..24efdd8959b 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -14,6 +14,7 @@ use std::{fmt, sync::Arc}; pub mod import; pub mod import_receipts; pub mod init_state; +pub mod op_proofs; #[cfg(feature = "dev")] pub mod test_vectors; @@ -61,6 +62,9 @@ pub enum Commands), + /// Manage storage of historical proofs in expanded trie db in fault proof window. + #[command(name = "proofs")] + OpProofs(op_proofs::Command), } impl< @@ -85,6 +89,7 @@ impl< #[cfg(feature = "dev")] Self::TestVectors(_) => None, Self::ReExecute(cmd) => cmd.chain_spec(), + Self::OpProofs(cmd) => cmd.chain_spec(), } } } diff --git a/crates/optimism/cli/src/commands/op_proofs/init.rs b/crates/optimism/cli/src/commands/op_proofs/init.rs new file mode 100644 index 00000000000..a9aba66dc56 --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/init.rs @@ -0,0 +1,100 @@ +//! Command that initializes the OP proofs storage with the current state of the chain. + +use clap::Parser; +use reth_chainspec::ChainInfo; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{db::MdbxProofsStorage, BackfillJob, OpProofsStorage, OpProofsStore}; +use reth_provider::{BlockNumReader, DBProvider, DatabaseProviderFactory}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Initializes the proofs storage with the current state of the chain. +/// +/// This command must be run before starting the node with proofs history enabled. +/// It backfills the proofs storage with trie nodes from the current chain state. +#[derive(Debug, Parser)] +pub struct InitCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + /// + /// This should match the path used when starting the node with + /// `--proofs-history.storage-path`. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, +} + +impl> InitCommand { + /// Execute `initialize-op-proofs` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Initializing OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Create the proofs storage + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + // Check if already initialized + if let Some((block_number, block_hash)) = storage.get_earliest_block_number().await? { + info!( + target: "reth::cli", + block_number = block_number, + block_hash = ?block_hash, + "Proofs storage already initialized" + ); + return Ok(()); + } + + // Get the current chain state + let ChainInfo { best_number, best_hash, .. } = provider_factory.chain_info()?; + + info!( + target: "reth::cli", + best_number = best_number, + best_hash = ?best_hash, + "Starting backfill job for current chain state" + ); + + // Run the backfill job + { + let db_provider = + provider_factory.database_provider_ro()?.disable_long_read_transaction_safety(); + let db_tx = db_provider.into_tx(); + + BackfillJob::new(storage.clone(), &db_tx).run(best_number, best_hash).await?; + } + + info!( + target: "reth::cli", + best_number = best_number, + best_hash = ?best_hash, + "Proofs storage initialized successfully" + ); + + Ok(()) + } +} + +impl InitCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/cli/src/commands/op_proofs/mod.rs b/crates/optimism/cli/src/commands/op_proofs/mod.rs new file mode 100644 index 00000000000..44ac1fc3866 --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/mod.rs @@ -0,0 +1,61 @@ +//! OP Proofs management commands + +use clap::{Parser, Subcommand}; +use reth_cli::chainspec::ChainSpecParser; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use std::sync::Arc; + +pub mod init; +pub mod prune; +pub mod unwind; + +/// `op-reth op-proofs` command +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +impl> Command { + /// Execute `op-proofs` command + pub async fn execute< + N: reth_cli_commands::common::CliNodeTypes< + ChainSpec = C::ChainSpec, + Primitives = OpPrimitives, + >, + >( + self, + ) -> eyre::Result<()> { + match self.command { + Subcommands::Init(cmd) => cmd.execute::().await, + Subcommands::Prune(cmd) => cmd.execute::().await, + Subcommands::Unwind(cmd) => cmd.execute::().await, + } + } +} + +impl Command { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + match &self.command { + Subcommands::Init(cmd) => cmd.chain_spec(), + Subcommands::Prune(cmd) => cmd.chain_spec(), + Subcommands::Unwind(cmd) => cmd.chain_spec(), + } + } +} + +/// `op-reth op-proofs` subcommands +#[derive(Debug, Subcommand)] +pub enum Subcommands { + /// Initialize the proofs storage with the current state of the chain + #[command(name = "init")] + Init(init::InitCommand), + /// Prune old proof history to reclaim space + #[command(name = "prune")] + Prune(prune::PruneCommand), + /// Unwind the proofs storage to a specific block + #[command(name = "unwind")] + Unwind(unwind::UnwindCommand), +} diff --git a/crates/optimism/cli/src/commands/op_proofs/prune.rs b/crates/optimism/cli/src/commands/op_proofs/prune.rs new file mode 100644 index 00000000000..48d30e1ab2c --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/prune.rs @@ -0,0 +1,90 @@ +//! Command that prunes the OP proofs storage. + +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{ + db::MdbxProofsStorage, OpProofStoragePruner, OpProofsStorage, OpProofsStore, +}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Prunes the proofs storage by removing old proof history and state updates. +#[derive(Debug, Parser)] +pub struct PruneCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, + + /// The window to span blocks for proofs history. Value is the number of blocks. + /// Default is 1 month of blocks based on 2 seconds block time. + /// 30 * 24 * 60 * 60 / 2 = `1_296_000` + #[arg( + long = "proofs-history.window", + default_value_t = 1_296_000, + value_name = "PROOFS_HISTORY_WINDOW" + )] + pub proofs_history_window: u64, + + /// The batch size for pruning operations. + #[arg( + long = "proofs-history.prune-batch-size", + default_value_t = 1000, + value_name = "PROOFS_HISTORY_PRUNE_BATCH_SIZE" + )] + pub proofs_history_prune_batch_size: u64, +} + +impl> PruneCommand { + /// Execute [`PruneCommand`]. + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Pruning OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + let earliest_block = storage.get_earliest_block_number().await?; + let latest_block = storage.get_latest_block_number().await?; + info!( + target: "reth::cli", + ?earliest_block, + ?latest_block, + "Current proofs storage block range" + ); + + let pruner = OpProofStoragePruner::new( + storage, + provider_factory, + self.proofs_history_window, + self.proofs_history_prune_batch_size, + ); + pruner.run().await; + Ok(()) + } +} + +impl PruneCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/cli/src/commands/op_proofs/unwind.rs b/crates/optimism/cli/src/commands/op_proofs/unwind.rs new file mode 100644 index 00000000000..0d4e67bf60a --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/unwind.rs @@ -0,0 +1,106 @@ +//! Command that unwinds the OP proofs storage to a specific block number. + +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{db::MdbxProofsStorage, OpProofsStorage, OpProofsStore}; +use reth_provider::{BlockReader, TransactionVariant}; +use std::{path::PathBuf, sync::Arc}; +use tracing::{info, warn}; + +/// Unwinds the proofs storage to a specific block number. +/// +/// This command removes all proof history and state updates after the target block number. +#[derive(Debug, Parser)] +pub struct UnwindCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, + + /// The target block number to unwind to. + /// + /// All history *after* this block will be removed. + #[arg(long, value_name = "TARGET_BLOCK")] + pub target: u64, +} + +impl UnwindCommand { + /// Validates that the target block number is within a valid range for unwinding. + async fn validate_unwind_range( + &self, + storage: &OpProofsStorage, + ) -> eyre::Result { + let (Some((earliest, _)), Some((latest, _))) = + (storage.get_earliest_block_number().await?, storage.get_latest_block_number().await?) + else { + warn!(target: "reth::cli", "No blocks found in proofs storage. Nothing to unwind."); + return Ok(false); + }; + + if self.target <= earliest { + warn!(target: "reth::cli", unwind_target = ?self.target, ?earliest, "Target block is less than the earliest block in proofs storage. Nothing to unwind."); + return Ok(false); + } + + if self.target > latest { + warn!(target: "reth::cli", unwind_target = ?self.target, ?latest, "Target block is not less than the latest block in proofs storage. Nothing to unwind."); + return Ok(false); + } + + Ok(true) + } +} + +impl> UnwindCommand { + /// Execute [`UnwindCommand`]. + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Unwinding OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Create the proofs storage + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + // Validate that the target block is within a valid range for unwinding + if !self.validate_unwind_range(&storage).await? { + return Ok(()); + } + + // Get the target block from the main database + let block = provider_factory + .recovered_block(self.target.into(), TransactionVariant::NoHash)? + .ok_or_else(|| { + eyre::eyre!("Target block {} not found in the main database", self.target) + })?; + + info!(target: "reth::cli", block_number = block.number, block_hash = %block.hash(), "Unwinding to target block"); + storage.unwind_history(block.block_with_parent()).await?; + + Ok(()) + } +} + +impl UnwindCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/consensus/src/error.rs b/crates/optimism/consensus/src/error.rs index 73480a23661..7ada2e9bbaf 100644 --- a/crates/optimism/consensus/src/error.rs +++ b/crates/optimism/consensus/src/error.rs @@ -1,5 +1,6 @@ //! Optimism consensus errors +use alloc::sync::Arc; use alloy_primitives::B256; use reth_consensus::ConsensusError; use reth_storage_errors::provider::ProviderError; @@ -26,5 +27,55 @@ pub enum OpConsensusError { }, /// L1 [`ConsensusError`], that also occurs on L2. #[error(transparent)] - Eth(#[from] ConsensusError), + Eth(ConsensusError), +} + +impl From for ConsensusError { + fn from(error: OpConsensusError) -> Self { + match error { + OpConsensusError::Eth(err) => err, + _ => Self::Custom(Arc::new(error)), + } + } +} + +impl From for OpConsensusError { + fn from(error: ConsensusError) -> Self { + if let ConsensusError::Custom(ref err) = error && + let Some(op_err) = err.downcast_ref::() + { + return op_err.clone() + } + Self::Eth(error) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn consensus_error_from_op_consensus_error() { + let consensus_err = ConsensusError::BaseFeeMissing; + let op_err = OpConsensusError::Eth(consensus_err); + let converted: ConsensusError = op_err.into(); + assert!(matches!(converted, ConsensusError::BaseFeeMissing)); + + let op_specific_err = OpConsensusError::WithdrawalsNonEmpty; + let converted: ConsensusError = op_specific_err.into(); + assert!(matches!(converted, ConsensusError::Custom(_))); + } + + #[test] + fn op_consensus_error_from_consensus_error() { + let consensus_err = ConsensusError::BaseFeeMissing; + let op_err: OpConsensusError = consensus_err.into(); + assert!(matches!(op_err, OpConsensusError::Eth(_))); + + let original = OpConsensusError::WithdrawalsNonEmpty; + let as_consensus: ConsensusError = original.into(); + let back_to_op: OpConsensusError = as_consensus.into(); + + assert!(matches!(back_to_op, OpConsensusError::WithdrawalsNonEmpty)); + } } diff --git a/crates/optimism/consensus/src/validation/jovian.rs b/crates/optimism/consensus/src/validation/jovian.rs new file mode 100644 index 00000000000..d369ed5a040 --- /dev/null +++ b/crates/optimism/consensus/src/validation/jovian.rs @@ -0,0 +1,28 @@ +//! Block verification w.r.t. consensus rules new in Jovian hardfork. + +use alloy_consensus::BlockHeader; +use reth_consensus::ConsensusError; +use reth_execution_types::BlockExecutionResult; +use reth_optimism_primitives::DepositReceipt; +use reth_primitives_traits::GotExpected; + +/// Validates that the blob gas used is present and correctly computed if Jovian is active. +/// +/// After Jovian activation, blocks must include the `blob_gas_used` field in the header, +/// and it must match the computed blob gas used from execution. +pub fn validate_blob_gas_used( + header: impl BlockHeader, + result: &BlockExecutionResult, +) -> Result<(), ConsensusError> { + let computed_blob_gas_used = result.blob_gas_used; + let header_blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + + if computed_blob_gas_used != header_blob_gas_used { + return Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: computed_blob_gas_used, + expected: header_blob_gas_used, + })); + } + + Ok(()) +} diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 50c45f7172c..b0d6d18b6b8 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -2,6 +2,7 @@ pub mod canyon; pub mod isthmus; +pub mod jovian; // Re-export the decode_holocene_base_fee function for compatibility use reth_execution_types::BlockExecutionResult; @@ -92,16 +93,7 @@ pub fn validate_block_post_execution( ) -> Result<(), ConsensusError> { // Validate that the blob gas used is present and correctly computed if Jovian is active. if chain_spec.is_jovian_active_at_timestamp(header.timestamp()) { - let computed_blob_gas_used = result.blob_gas_used; - let header_blob_gas_used = - header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; - - if computed_blob_gas_used != header_blob_gas_used { - return Err(ConsensusError::BlobGasUsedDiff(GotExpected { - got: computed_blob_gas_used, - expected: header_blob_gas_used, - })); - } + jovian::validate_blob_gas_used(&header, result)?; } let receipts = &result.receipts; diff --git a/crates/optimism/exex/Cargo.toml b/crates/optimism/exex/Cargo.toml new file mode 100644 index 00000000000..cd5f8b2a760 --- /dev/null +++ b/crates/optimism/exex/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "reth-optimism-exex" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Execution extensions for OP-Reth" + +[lints] +workspace = true + +[dependencies] +# reth +reth-exex.workspace = true +reth-execution-types.workspace = true +reth-node-types.workspace = true +reth-node-api.workspace = true +reth-trie.workspace = true +reth-provider.workspace = true + +# op-reth +# proofs exex handles `TrieUpdates` in notifications +reth-optimism-trie = { workspace = true, features = ["serde-bincode-compat"] } + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true + +# misc +eyre.workspace = true +futures-util.workspace = true +tracing.workspace = true + +[dev-dependencies] +tokio = { workspace = true, features = ["test-util", "rt-multi-thread", "macros"] } +futures.workspace = true +reth-db = { workspace = true, features = ["op", "test-utils"] } +reth-node-builder.workspace = true +reth-optimism-node.workspace = true +reth-optimism-chainspec.workspace = true +tempfile.workspace = true + +[features] +test-utils = [ + "reth-db/test-utils", + "reth-trie/test-utils", + "reth-node-builder/test-utils", + "reth-optimism-node/test-utils", + "reth-provider/test-utils", +] +metrics = ["reth-optimism-trie/metrics"] + +[package.metadata.cargo-udeps.ignore] +development = [ + "reth-node-builder", + "reth-optimism-node", + "reth-optimism-chainspec", + "tempfile.workspace", +] diff --git a/crates/optimism/exex/src/lib.rs b/crates/optimism/exex/src/lib.rs new file mode 100644 index 00000000000..a651a98d225 --- /dev/null +++ b/crates/optimism/exex/src/lib.rs @@ -0,0 +1,447 @@ +//! ExEx unique for OP-Reth. See also [`reth_exex`] for more op-reth execution extensions. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_consensus::BlockHeader; +use alloy_eips::eip1898::BlockWithParent; +use futures_util::TryStreamExt; +use reth_execution_types::Chain; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_types::NodeTypes; +use reth_optimism_trie::{ + live::LiveTrieCollector, OpProofStoragePrunerTask, OpProofsStorage, OpProofsStore, +}; +use reth_provider::{BlockReader, TransactionVariant}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted}; +use std::{sync::Arc, time::Duration}; +use tracing::{debug, info}; + +// Safety threshold for maximum blocks to prune automatically on startup. +// If the required prune exceeds this, the node will error out and require manual pruning. +const MAX_PRUNE_BLOCKS_STARTUP: u64 = 1000; + +/// OP Proofs ExEx - processes blocks and tracks state changes within fault proof window. +/// +/// Saves and serves trie nodes to make proofs faster. This handles the process of +/// saving the current state, new blocks as they're added, and serving proof RPCs +/// based on the saved data. +/// +/// # Examples +/// +/// The following example shows how to install the ExEx with either in-memory or persistent storage. +/// This can be used when launching an OP-Reth node via a binary. +/// We are currently using it in optimism/bin/src/main.rs. +/// +/// ``` +/// use futures_util::FutureExt; +/// use reth_db::test_utils::create_test_rw_db; +/// use reth_node_api::NodeTypesWithDBAdapter; +/// use reth_node_builder::{NodeBuilder, NodeConfig}; +/// use reth_optimism_chainspec::BASE_MAINNET; +/// use reth_optimism_exex::OpProofsExEx; +/// use reth_optimism_node::{args::RollupArgs, OpNode}; +/// use reth_optimism_trie::{db::MdbxProofsStorage, InMemoryProofsStorage, OpProofsStorage}; +/// use reth_provider::providers::BlockchainProvider; +/// use std::{sync::Arc, time::Duration}; +/// +/// let config = NodeConfig::new(BASE_MAINNET.clone()); +/// let db = create_test_rw_db(); +/// let args = RollupArgs::default(); +/// let op_node = OpNode::new(args); +/// +/// // Create in-memory or persistent storage +/// let storage: OpProofsStorage> = +/// Arc::new(InMemoryProofsStorage::new()).into(); +/// +/// // Example for creating persistent storage +/// # let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); +/// # let storage_path = temp_dir.path().join("proofs_storage"); +/// +/// # let storage: OpProofsStorage> = Arc::new( +/// # MdbxProofsStorage::new(&storage_path).expect("Failed to create MdbxProofsStorage"), +/// # ).into(); +/// +/// let storage_exec = storage.clone(); +/// let proofs_history_window = 1_296_000u64; +/// let proofs_history_prune_interval = Duration::from_secs(3600); +/// +/// // Verification interval: perform full execution every N blocks +/// let verification_interval = 0; // 0 = disabled, 100 = verify every 100 blocks +/// +/// // Can also use install_exex_if along with a boolean flag +/// // Set this based on your configuration or CLI args +/// let _builder = NodeBuilder::new(config) +/// .with_database(db) +/// .with_types_and_provider::>>() +/// .with_components(op_node.components()) +/// .install_exex("proofs-history", move |exex_context| async move { +/// Ok(OpProofsExEx::new( +/// exex_context, +/// storage_exec, +/// proofs_history_window, +/// proofs_history_prune_interval, +/// verification_interval, // 0 = no verification, 100 = every 100 blocks +/// ) +/// .run() +/// .boxed()) +/// }) +/// .on_node_started(|_full_node| Ok(())) +/// .check_launch(); +/// ``` +#[derive(Debug)] +pub struct OpProofsExEx +where + Node: FullNodeComponents, +{ + /// The ExEx context containing the node related utilities e.g. provider, notifications, + /// events. + ctx: ExExContext, + /// The type of storage DB. + storage: OpProofsStorage, + /// The window to span blocks for proofs history. Value is the number of blocks, received as + /// cli arg. + proofs_history_window: u64, + /// Interval between proof-storage prune runs + proofs_history_prune_interval: Duration, + /// Verification interval: perform full block execution every N blocks for data integrity. + /// If 0, verification is disabled (always use fast path when available). + /// If 1, verification is always enabled (always execute blocks). + verification_interval: u64, +} + +impl OpProofsExEx +where + Node: FullNodeComponents, +{ + /// Create a new `OpProofsExEx` instance. + pub const fn new( + ctx: ExExContext, + storage: OpProofsStorage, + proofs_history_window: u64, + proofs_history_prune_interval: Duration, + verification_interval: u64, + ) -> Self { + Self { + ctx, + storage, + proofs_history_window, + proofs_history_prune_interval, + verification_interval, + } + } +} + +impl OpProofsExEx +where + Node: FullNodeComponents>, + Primitives: NodePrimitives, + Storage: OpProofsStore + Clone + 'static, +{ + /// Main execution loop for the ExEx + pub async fn run(mut self) -> eyre::Result<()> { + self.ensure_initialized().await?; + + let prune_task = OpProofStoragePrunerTask::new( + self.storage.clone(), + self.ctx.provider().clone(), + self.proofs_history_window, + self.proofs_history_prune_interval, + ); + self.ctx + .task_executor() + .spawn_with_graceful_shutdown_signal(|signal| Box::pin(prune_task.run(signal))); + + let collector = LiveTrieCollector::new( + self.ctx.evm_config().clone(), + self.ctx.provider().clone(), + &self.storage, + ); + + while let Some(notification) = self.ctx.notifications.try_next().await? { + self.handle_notification(notification, &collector).await?; + } + + Ok(()) + } + + /// Ensure proofs storage is initialized + async fn ensure_initialized(&self) -> eyre::Result<()> { + // Check if proofs storage is initialized + #[cfg_attr(not(feature = "metrics"), expect(unused_variables))] + let earliest_block_number = match self.storage.get_earliest_block_number().await? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!( + "Proofs storage not initialized. Please run 'op-reth initialize-op-proofs --proofs-history.storage-path ' first." + )); + } + }; + + let latest_block_number = match self.storage.get_latest_block_number().await? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!( + "Proofs storage not initialized. Please run 'op-reth initialize-op-proofs --proofs-history.storage-path ' first." + )); + } + }; + + // Check if we have accumulated too much history for the configured window. + // If the gap between what we have and what we want to keep is too large, the auto-pruner + // will stall the node. + let target_earliest = latest_block_number.saturating_sub(self.proofs_history_window); + if target_earliest > earliest_block_number { + let blocks_to_prune = target_earliest - earliest_block_number; + if blocks_to_prune > MAX_PRUNE_BLOCKS_STARTUP { + return Err(eyre::eyre!( + "Configuration requires pruning {} blocks, which exceeds the safety threshold of {}. \ + Huge prune operations can stall the node. \ + Please run 'op-reth proofs prune' manually before starting the node.", + blocks_to_prune, + MAX_PRUNE_BLOCKS_STARTUP + )); + } + } + + // Need to update the earliest block metric on startup as this is not called frequently and + // can show outdated info. When metrics are disabled, this is a no-op. + #[cfg(feature = "metrics")] + { + self.storage + .metrics() + .block_metrics() + .earliest_number + .set(earliest_block_number as f64); + } + + Ok(()) + } + + async fn handle_notification( + &self, + notification: ExExNotification, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + let latest_stored = match self.storage.get_latest_block_number().await? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!("No blocks stored in proofs storage")); + } + }; + + match ¬ification { + ExExNotification::ChainCommitted { new } => { + self.handle_chain_committed(new.clone(), latest_stored, collector).await? + } + ExExNotification::ChainReorged { old, new } => { + self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector) + .await? + } + ExExNotification::ChainReverted { old } => { + self.handle_chain_reverted(old.clone(), latest_stored, collector).await? + } + } + + if let Some(committed_chain) = notification.committed_chain() { + self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + + Ok(()) + } + + async fn handle_chain_committed( + &self, + new: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + block_hash = ?new.tip().hash(), + "ChainCommitted notification received", + ); + + // If tip is not newer than what we have, nothing to do. + if new.tip().number() <= latest_stored { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + latest_stored, + "Already processed, skipping" + ); + return Ok(()); + } + + // Process each block from latest_stored + 1 to tip + let start = latest_stored.saturating_add(1); + for block_number in start..=new.tip().number() { + self.process_block(block_number, &new, collector).await?; + } + + Ok(()) + } + + /// Process a single block - either from chain or provider + async fn process_block( + &self, + block_number: u64, + chain: &Chain, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + // Check if this block should be verified via full execution + let should_verify = self.verification_interval > 0 && + block_number.is_multiple_of(self.verification_interval); + + // Try to get block data from the chain first + // 1. Fast Path: Try to use pre-computed state from the notification + if let Some(block) = chain.blocks().get(&block_number) { + // Check if we have BOTH trie updates and hashed state. + // If either is missing, we fall back to execution to ensure data integrity. + if let (Some(trie_updates), Some(hashed_state)) = + (chain.trie_updates_at(block_number), chain.hashed_state_at(block_number)) + { + // Use fast path only if we're not scheduled to verify this block + if !should_verify { + debug!( + target: "optimism::exex", + block_number, + "Using pre-computed state updates from notification" + ); + + collector + .store_block_updates( + block.block_with_parent(), + (**trie_updates).clone(), + (**hashed_state).clone(), + ) + .await?; + + return Ok(()); + } + + info!( + target: "optimism::exex", + block_number, + verification_interval = self.verification_interval, + "Periodic verification: performing full block execution" + ); + } + + debug!( + target: "optimism::exex", + block_number, + "Block present in notification but state updates missing, falling back to execution" + ); + } + + // 2. Slow Path: Block not in chain (or state missing), fetch from provider and execute + debug!( + target: "optimism::exex", + block_number, + "Fetching block from provider for execution", + ); + + let block = self + .ctx + .provider() + .recovered_block(block_number.into(), TransactionVariant::NoHash)? + .ok_or_else(|| eyre::eyre!("Missing block {} in provider", block_number))?; + + collector.execute_and_store_block_updates(&block).await?; + Ok(()) + } + + async fn handle_chain_reorged( + &self, + old: Arc>, + new: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + info!( + old_block_number = old.tip().number(), + old_block_hash = ?old.tip().hash(), + new_block_number = new.tip().number(), + new_block_hash = ?new.tip().hash(), + "ChainReorged notification received", + ); + + if old.first().number() > latest_stored { + debug!(target: "optimism::exex", "Reorg beyond stored blocks, skipping"); + return Ok(()); + } + + // find the common ancestor + let mut block_updates: Vec<( + BlockWithParent, + Arc, + Arc, + )> = Vec::with_capacity(new.len()); + for block_number in new.blocks().keys() { + // verify if the fork point matches + if old.fork_block() != new.fork_block() { + return Err(eyre::eyre!( + "Fork blocks do not match: old fork block {:?}, new fork block {:?}", + old.fork_block(), + new.fork_block() + )); + } + + let block = new + .blocks() + .get(block_number) + .ok_or_else(|| eyre::eyre!("Missing block {} in new chain", block_number))?; + let trie_updates = new.trie_updates_at(*block_number).ok_or_else(|| { + eyre::eyre!("Missing Trie updates for block {} in new chain", block_number) + })?; + let hashed_state = new.hashed_state_at(*block_number).ok_or_else(|| { + eyre::eyre!("Missing Hashed state for block {} in new chain", block_number) + })?; + + block_updates.push(( + block.block_with_parent(), + trie_updates.clone(), + hashed_state.clone(), + )); + } + + collector.unwind_and_store_block_updates(block_updates).await?; + + Ok(()) + } + + async fn handle_chain_reverted( + &self, + old: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + info!( + target: "optimism::exex", + old_block_number = old.tip().number(), + old_block_hash = ?old.tip().hash(), + "ChainReverted notification received", + ); + + if old.first().number() > latest_stored { + debug!( + target: "optimism::exex", + first_block_number = old.first().number(), + latest_stored = latest_stored, + "Fork block number is greater than latest stored, skipping", + ); + return Ok(()); + } + + collector.unwind_history(old.first().block_with_parent()).await?; + Ok(()) + } +} diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 0c3fc9136e8..386026f234f 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -306,8 +306,7 @@ mod test { use alloy_op_hardforks::BASE_SEPOLIA_JOVIAN_TIMESTAMP; use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; - use reth_chainspec::ChainSpec; - use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; + use reth_optimism_chainspec::BASE_SEPOLIA; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; @@ -323,24 +322,6 @@ mod test { }}; } - fn get_chainspec() -> Arc { - let base_sepolia_spec = BASE_SEPOLIA.inner.clone(); - - Arc::new(OpChainSpec { - inner: ChainSpec { - chain: base_sepolia_spec.chain, - genesis: base_sepolia_spec.genesis, - genesis_header: base_sepolia_spec.genesis_header, - paris_block_and_final_difficulty: base_sepolia_spec - .paris_block_and_final_difficulty, - hardforks: base_sepolia_spec.hardforks, - base_fee_params: base_sepolia_spec.base_fee_params, - prune_delete_limit: 10000, - ..Default::default() - }, - }) - } - const fn get_attributes( eip_1559_params: Option, min_base_fee: Option, @@ -364,8 +345,10 @@ mod test { #[test] fn test_well_formed_attributes_pre_holocene() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(None, None, 1732633199); let result = as EngineApiValidator< @@ -378,8 +361,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_no_eip1559_params() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(None, None, 1732633200); let result = as EngineApiValidator< @@ -392,8 +377,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000000000008")), None, 1732633200); let result = as EngineApiValidator< @@ -406,8 +393,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_eip1559_params_zero_elasticity() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000800000000")), None, 1732633200); let result = as EngineApiValidator< @@ -420,8 +409,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_valid() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000800000008")), None, 1732633200); let result = as EngineApiValidator< @@ -434,8 +425,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_valid_all_zero() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000000000000")), None, 1732633200); let result = as EngineApiValidator< @@ -448,8 +441,10 @@ mod test { #[test] fn test_well_formed_attributes_jovian_valid() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000000000000")), Some(1), BASE_SEPOLIA_JOVIAN_TIMESTAMP); @@ -464,8 +459,10 @@ mod test { /// After Jovian (and holocene), eip1559 params must be Some #[test] fn test_malformed_attributes_jovian_with_eip_1559_params_none() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(None, Some(1), BASE_SEPOLIA_JOVIAN_TIMESTAMP); let result = as EngineApiValidator< @@ -479,8 +476,10 @@ mod test { /// Before Jovian, min base fee must be None #[test] fn test_malformed_attributes_pre_jovian_with_min_base_fee() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000000000000")), Some(1), 1732633200); let result = as EngineApiValidator< @@ -494,8 +493,10 @@ mod test { /// After Jovian, min base fee must be Some #[test] fn test_malformed_attributes_post_jovian_with_min_base_fee_none() { - let validator = - OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); + let validator = OpEngineValidator::new::( + BASE_SEPOLIA.clone(), + NoopProvider::default(), + ); let attributes = get_attributes(Some(b64!("0000000000000000")), None, BASE_SEPOLIA_JOVIAN_TIMESTAMP); diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 5d926caf159..d9e6aa8fc1f 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-primitives-traits = { workspace = true, features = ["op"] } reth-storage-api.workspace = true @@ -21,6 +22,7 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true +reth-revm.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-node-api.workspace = true @@ -28,6 +30,8 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-rpc-engine-api.workspace = true +reth-payload-util.workspace = true +reth-provider.workspace = true # op-reth reth-optimism-evm.workspace = true @@ -37,14 +41,17 @@ reth-optimism-txpool.workspace = true # TODO remove node-builder import reth-optimism-primitives = { workspace = true, features = ["reth-codec", "serde-bincode-compat", "serde"] } reth-optimism-forks.workspace = true +reth-optimism-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-json-rpc.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-rpc-client.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-debug.workspace = true +alloy-serde.workspace = true alloy-transport.workspace = true alloy-transport-http.workspace = true alloy-consensus.workspace = true @@ -58,6 +65,7 @@ revm.workspace = true op-revm.workspace = true # async +serde.workspace = true tokio.workspace = true futures.workspace = true tokio-stream.workspace = true @@ -81,6 +89,9 @@ derive_more = { workspace = true, features = ["constructor"] } reth-metrics.workspace = true metrics.workspace = true +# enum +strum.workspace = true + [dev-dependencies] reth-optimism-chainspec.workspace = true alloy-op-hardforks.workspace = true diff --git a/crates/optimism/rpc/src/debug.rs b/crates/optimism/rpc/src/debug.rs new file mode 100644 index 00000000000..48dcb75dc88 --- /dev/null +++ b/crates/optimism/rpc/src/debug.rs @@ -0,0 +1,337 @@ +//! Historical proofs RPC server implementation for `debug_` namespace. + +use crate::{ + metrics::{DebugApiExtMetrics, DebugApis}, + state::OpStateProviderFactory, +}; +use alloy_consensus::BlockHeader; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::B256; +use alloy_rlp::Encodable; +use alloy_rpc_types_debug::ExecutionWitness; +use async_trait::async_trait; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::RpcResult; +use jsonrpsee_types::error::ErrorObject; +use reth_basic_payload_builder::PayloadConfig; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_node_api::{BuildNextEnv, NodePrimitives, PayloadBuilderError}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_payload_builder::{ + builder::{OpBuilder, OpPayloadBuilderCtx}, + OpAttributes, OpPayloadPrimitives, +}; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore}; +use reth_optimism_txpool::OpPooledTransaction as OpPooledTx2; +use reth_payload_util::NoopPayloadTransactions; +use reth_primitives_traits::{SealedHeader, TxTy}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, HeaderProvider, NodePrimitivesProvider, ProviderError, + ProviderResult, StateProviderFactory, +}; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord, State}; +use reth_rpc_api::eth::helpers::FullEthApi; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_tasks::TaskSpawner; +use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, sync::Arc}; +use tokio::sync::{oneshot, Semaphore}; + +/// Represents the current proofs sync status. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct ProofsSyncStatus { + /// The earliest block number for which proofs are available. + earliest: Option, + /// The latest block number for which proofs are available. + latest: Option, +} + +#[cfg_attr(not(test), rpc(server, namespace = "debug"))] +#[cfg_attr(test, rpc(server, client, namespace = "debug"))] +pub trait DebugApiOverride { + /// Executes a payload and returns the execution witness. + #[method(name = "executePayload")] + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attributes, + ) -> RpcResult; + + /// Returns the execution witness for a given block. + #[method(name = "executionWitness")] + async fn execution_witness(&self, block: BlockNumberOrTag) -> RpcResult; + + /// Returns the current proofs sync status. + #[method(name = "proofsSyncStatus")] + async fn proofs_sync_status(&self) -> RpcResult; +} + +#[derive(Debug)] +/// Overrides applied to the `debug_` namespace of the RPC API for the OP Proofs ExEx. +pub struct DebugApiExt { + inner: Arc>, +} + +impl DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + Storage: OpProofsStore + Clone + 'static, + Provider: BlockReaderIdExt + NodePrimitivesProvider, + EvmConfig: ConfigureEvm + 'static, +{ + /// Creates a new instance of the `DebugApiExt`. + pub fn new( + provider: Provider, + eth_api: Eth, + preimage_store: OpProofsStorage, + task_spawner: Box, + evm_config: EvmConfig, + ) -> Self { + Self { + inner: Arc::new(DebugApiExtInner::new( + provider, + eth_api, + preimage_store, + task_spawner, + evm_config, + )), + } + } +} + +#[derive(Debug)] +/// Overrides applied to the `debug_` namespace of the RPC API for historical proofs ExEx. +pub struct DebugApiExtInner { + provider: Provider, + eth_api: Eth, + storage: OpProofsStorage, + state_provider_factory: OpStateProviderFactory, + evm_config: EvmConfig, + task_spawner: Box, + semaphore: Semaphore, + _attrs: PhantomData, + metrics: DebugApiExtMetrics, +} + +impl DebugApiExtInner +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Provider: NodePrimitivesProvider, +{ + fn new( + provider: Provider, + eth_api: Eth, + storage: OpProofsStorage

, + task_spawner: Box, + evm_config: EvmConfig, + ) -> Self { + Self { + provider, + storage: storage.clone(), + state_provider_factory: OpStateProviderFactory::new(eth_api.clone(), storage), + eth_api, + evm_config, + task_spawner, + semaphore: Semaphore::new(3), + _attrs: PhantomData, + metrics: DebugApiExtMetrics::new(), + } + } +} + +impl DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Provider: BlockReaderIdExt + + NodePrimitivesProvider + + HeaderProvider

::BlockHeader>, +{ + fn parent_header( + &self, + parent_block_hash: B256, + ) -> ProviderResult> { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +#[async_trait] +impl DebugApiOverrideServer + for DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Attrs: OpAttributes>, + N: OpPayloadPrimitives, + EvmConfig: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + > + 'static, + Provider: BlockReaderIdExt
+ + StateProviderFactory + + ChainSpecProvider + + NodePrimitivesProvider + + HeaderProvider
+ + Clone + + 'static, + op_alloy_consensus::OpPooledTransaction: + TryFrom<::_TX, Error: core::error::Error>, + ::_TX: From, +{ + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attrs::RpcPayloadAttributes, + ) -> RpcResult { + self.inner + .metrics + .record_operation_async(DebugApis::DebugExecutePayload, async { + let _permit = self.inner.semaphore.acquire().await; + + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + + let (tx, rx) = oneshot::channel(); + let this = self.inner.clone(); + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let result = async { + let parent_hash = parent_header.hash(); + let attributes = Attrs::try_new(parent_hash, attributes, 3) + .map_err(PayloadBuilderError::other)?; + + let config = + PayloadConfig { parent_header: Arc::new(parent_header), attributes }; + let ctx = OpPayloadBuilderCtx { + evm_config: this.evm_config.clone(), + chain_spec: this.provider.chain_spec(), + config, + cancel: Default::default(), + best_payload: Default::default(), + builder_config: Default::default(), + }; + + let state_provider = this + .state_provider_factory + .state_provider(Some(BlockId::Hash(parent_hash.into()))) + .await + .map_err(PayloadBuilderError::other)?; + + let builder = OpBuilder::new(|_| { + NoopPayloadTransactions::< + OpPooledTx2< + ::_TX, + op_alloy_consensus::OpPooledTransaction, + >, + >::default() + }); + + builder.witness(state_provider, &ctx).map_err(PayloadBuilderError::other) + }; + + let _ = tx.send(result.await); + })); + + rx.await + .map_err(|err| internal_rpc_err(err.to_string()))? + .map_err(|err| internal_rpc_err(err.to_string())) + }) + .await + } + + async fn execution_witness(&self, block_id: BlockNumberOrTag) -> RpcResult { + self.inner + .metrics + .record_operation_async(DebugApis::DebugExecutionWitness, async { + let _permit = self.inner.semaphore.acquire().await; + + let block = self + .inner + .eth_api + .recovered_block(block_id.into()) + .await? + .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; + + let this = self.inner.clone(); + let block_number = block.header().number(); + + let state_provider = this + .state_provider_factory + .state_provider(Some(BlockId::Number(block.parent_num_hash().number.into()))) + .await + .map_err(EthApiError::from)?; + let db = StateProviderDatabase::new(&state_provider); + let block_executor = this.eth_api.evm_config().executor(db); + + let mut witness_record = ExecutionWitnessRecord::default(); + + let _ = block_executor + .execute_with_state_closure(&block, |statedb: &State<_>| { + witness_record.record_executed_state(statedb); + }) + .map_err(EthApiError::from)?; + + let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } = + witness_record; + + let state = state_provider + .witness(Default::default(), hashed_state) + .map_err(EthApiError::from)?; + let mut exec_witness = + ExecutionWitness { state, codes, keys, ..Default::default() }; + + let smallest = match lowest_block_number { + Some(smallest) => smallest, + None => { + // Return only the parent header, if there were no calls to the + // BLOCKHASH opcode. + block_number.saturating_sub(1) + } + }; + + let range = smallest..block_number; + exec_witness.headers = self + .inner + .provider + .headers_range(range) + .map_err(EthApiError::from)? + .into_iter() + .map(|header| { + let mut serialized_header = Vec::new(); + header.encode(&mut serialized_header); + serialized_header.into() + }) + .collect(); + + Ok(exec_witness) + }) + .await + } + + async fn proofs_sync_status(&self) -> RpcResult { + let earliest = self + .inner + .storage + .get_earliest_block_number() + .await + .map_err(|err| internal_rpc_err(err.to_string()))?; + let latest = self + .inner + .storage + .get_latest_block_number() + .await + .map_err(|err| internal_rpc_err(err.to_string()))?; + + Ok(ProofsSyncStatus { + earliest: earliest.map(|(block_number, _)| block_number), + latest: latest.map(|(block_number, _)| block_number), + }) + } +} diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index f4b9fcb08fd..421349ec028 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -1,6 +1,7 @@ //! OP-Reth `eth_` endpoint implementation. pub mod ext; +pub mod proofs; pub mod receipt; pub mod transaction; diff --git a/crates/optimism/rpc/src/eth/proofs.rs b/crates/optimism/rpc/src/eth/proofs.rs new file mode 100644 index 00000000000..07522c96b20 --- /dev/null +++ b/crates/optimism/rpc/src/eth/proofs.rs @@ -0,0 +1,95 @@ +//! Historical proofs RPC server implementation. + +use crate::{metrics::EthApiExtMetrics, state::OpStateProviderFactory}; +use alloy_eips::BlockId; +use alloy_primitives::Address; +use alloy_rpc_types_eth::EIP1186AccountProofResponse; +use alloy_serde::JsonStorageKey; +use async_trait::async_trait; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::RpcResult; +use jsonrpsee_types::error::ErrorObject; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore}; +use reth_provider::StateProofProvider; +use reth_rpc_api::eth::helpers::FullEthApi; +use std::time::Instant; + +#[cfg_attr(not(test), rpc(server, namespace = "eth"))] +#[cfg_attr(test, rpc(server, client, namespace = "eth"))] +pub trait EthApiOverride { + /// Returns the account and storage values of the specified account including the Merkle-proof. + /// This call can be used to verify that the data you are pulling from is not tampered with. + #[method(name = "getProof")] + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult; +} + +#[derive(Debug)] +/// Overrides applied to the `eth_` namespace of the RPC API for historical proofs ExEx. +pub struct EthApiExt { + state_provider_factory: OpStateProviderFactory, + metrics: EthApiExtMetrics, +} + +impl EthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, +{ + /// Creates a new instance of the `EthApiExt`. + pub fn new(eth_api: Eth, preimage_store: OpProofsStorage

) -> Self { + let metrics = EthApiExtMetrics::default(); + Self { + state_provider_factory: OpStateProviderFactory::new(eth_api, preimage_store), + metrics, + } + } +} + +#[async_trait] +impl EthApiOverrideServer for EthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, +{ + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult { + let start = Instant::now(); + self.metrics.get_proof_requests.increment(1); + + let storage_keys = keys.iter().map(|key| key.as_b256()).collect::>(); + + let result = async { + let proof = self + .state_provider_factory + .state_provider(block_number) + .await + .map_err(Into::into)? + .proof(Default::default(), address, &storage_keys) + .map_err(Into::into)?; + + Ok(proof.into_eip1186_response(keys)) + } + .await; + + match &result { + Ok(_) => { + self.metrics.get_proof_latency.record(start.elapsed().as_secs_f64()); + self.metrics.get_proof_successful_responses.increment(1); + } + Err(_) => self.metrics.get_proof_failures.increment(1), + } + + result + } +} diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 10f8ad5dccd..08233550a63 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] +pub mod debug; pub mod engine; pub mod error; pub mod eth; @@ -15,6 +16,7 @@ pub mod historical; pub mod metrics; pub mod miner; pub mod sequencer; +pub mod state; pub mod witness; #[cfg(feature = "client")] @@ -22,5 +24,5 @@ pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; -pub use metrics::SequencerMetrics; +pub use metrics::{EthApiExtMetrics, SequencerMetrics}; pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/metrics.rs b/crates/optimism/rpc/src/metrics.rs index 5aa5e3eff3d..17ac94f6aa6 100644 --- a/crates/optimism/rpc/src/metrics.rs +++ b/crates/optimism/rpc/src/metrics.rs @@ -1,8 +1,11 @@ //! RPC metrics unique for OP-stack. +use alloy_primitives::map::HashMap; use core::time::Duration; -use metrics::Histogram; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; +use std::time::Instant; +use strum::{EnumCount, EnumIter, IntoEnumIterator}; /// Optimism sequencer metrics #[derive(Metrics, Clone)] @@ -19,3 +22,114 @@ impl SequencerMetrics { self.sequencer_forward_latency.record(duration.as_secs_f64()); } } + +/// Optimism ETH API extension metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.eth_api_ext")] +pub struct EthApiExtMetrics { + /// How long it takes to handle a `eth_getProof` request successfully + pub(crate) get_proof_latency: Histogram, + + /// Total number of `eth_getProof` requests + pub(crate) get_proof_requests: Counter, + + /// Total number of successful `eth_getProof` responses + pub(crate) get_proof_successful_responses: Counter, + + /// Total number of failures handling `eth_getProof` requests + pub(crate) get_proof_failures: Counter, +} + +/// Types of debug apis +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, EnumCount, EnumIter)] +pub enum DebugApis { + /// `DebugExecutePayload` Api + DebugExecutePayload, + /// `DebugExecutionWitness` Api + DebugExecutionWitness, +} + +impl DebugApis { + /// Returns the operation as a string for metrics labels. + pub const fn as_str(&self) -> &'static str { + match self { + Self::DebugExecutePayload => "debug_execute_payload", + Self::DebugExecutionWitness => "debug_execution_witness", + } + } +} + +/// Metrics for Debug API extension calls. +#[derive(Debug)] +pub struct DebugApiExtMetrics { + /// Per-api metrics handles + apis: HashMap, +} + +impl DebugApiExtMetrics { + /// Initializes new `DebugApiExtMetrics` + pub fn new() -> Self { + let mut apis = HashMap::default(); + for api in DebugApis::iter() { + apis.insert(api, DebugApiExtRpcMetrics::new_with_labels(&[("api", api.as_str())])); + } + Self { apis } + } + + /// Record a Debug API call async (tracks latency, requests, success, failures). + pub async fn record_operation_async(&self, api: DebugApis, f: F) -> Result + where + F: Future>, + { + if let Some(metrics) = self.apis.get(&api) { + metrics.record_async(f).await + } else { + f.await + } + } +} + +impl Default for DebugApiExtMetrics { + fn default() -> Self { + Self::new() + } +} + +/// Optimism Debug API extension metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.debug_api_ext")] +pub struct DebugApiExtRpcMetrics { + /// End-to-end time to handle this API call + pub(crate) latency: Histogram, + + /// Total number of requests for this API + pub(crate) requests: Counter, + + /// Total number of successful responses for this API + pub(crate) successful_responses: Counter, + + /// Total number of failures for this API + pub(crate) failures: Counter, +} + +impl DebugApiExtRpcMetrics { + /// Record rpc api call async. + async fn record_async(&self, f: F) -> Result + where + F: Future>, + { + let start = Instant::now(); + let result = f.await; + + self.latency.record(start.elapsed().as_secs_f64()); + self.requests.increment(1); + + if result.is_ok() { + self.successful_responses.increment(1); + } else { + self.failures.increment(1); + } + + result + } +} diff --git a/crates/optimism/rpc/src/state.rs b/crates/optimism/rpc/src/state.rs new file mode 100644 index 00000000000..a4b683f6fe9 --- /dev/null +++ b/crates/optimism/rpc/src/state.rs @@ -0,0 +1,64 @@ +//! State provider factory for OP Proofs ExEx. + +use alloy_eips::BlockId; +use derive_more::Constructor; +use jsonrpsee_types::error::ErrorObject; +use reth_optimism_trie::{provider::OpProofsStateProviderRef, OpProofsStorage, OpProofsStore}; +use reth_provider::{BlockIdReader, ProviderError, ProviderResult, StateProvider}; +use reth_rpc_api::eth::helpers::FullEthApi; +use reth_rpc_eth_types::EthApiError; + +/// Creates a factory for state providers using OP Proofs external proofs storage. +#[derive(Debug, Constructor)] +pub struct OpStateProviderFactory { + eth_api: Eth, + preimage_store: OpProofsStorage

, +} + +impl<'a, Eth, P> OpStateProviderFactory +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'a, +{ + /// Creates a state provider for the given block id. + pub async fn state_provider( + &'a self, + block_id: Option, + ) -> ProviderResult> { + let block_id = block_id.unwrap_or_default(); + // Check whether the distance to the block exceeds the maximum configured window. + let block_number = self + .eth_api + .provider() + .block_number_for_id(block_id)? + .ok_or(EthApiError::HeaderNotFound(block_id)) + .map_err(ProviderError::other)?; + + let historical_provider = + self.eth_api.state_at_block_id(block_id).await.map_err(ProviderError::other)?; + + let (Some((latest_block_number, _)), Some((earliest_block_number, _))) = ( + self.preimage_store + .get_latest_block_number() + .await + .map_err(|e| ProviderError::Database(e.into()))?, + self.preimage_store + .get_earliest_block_number() + .await + .map_err(|e| ProviderError::Database(e.into()))?, + ) else { + // if no earliest block, db is empty - use historical provider + return Ok(historical_provider); + }; + + if block_number < earliest_block_number || block_number > latest_block_number { + return Ok(historical_provider); + } + + let external_overlay_provider = + OpProofsStateProviderRef::new(historical_provider, &self.preimage_store, block_number); + + Ok(Box::new(external_overlay_provider)) + } +} diff --git a/crates/optimism/tests/Makefile b/crates/optimism/tests/Makefile new file mode 100644 index 00000000000..c6fa968aaa2 --- /dev/null +++ b/crates/optimism/tests/Makefile @@ -0,0 +1,93 @@ +# Variables +DOCKER_IMAGE_NAME := op-reth +DOCKER_TAG := local +DOCKERFILE_PATH := ../../../DockerfileOpProof +KURTOSIS_PACKAGE := github.com/ethpandaops/optimism-package@998796c0f3bb478d63d729e65f0b76e24112e00d +DEVNET ?= opgeth-seq-opreth-val +GO_PKG_NAME ?= proofs/core +SOURCE_DIR := $(shell pwd) +OP_DEVSTACK_PROOF_SEQUENCER_EL ?= op-geth +OP_DEVSTACK_PROOF_VALIDATOR_EL ?= op-reth + +.PHONY: all build-docker build-contracts unzip-contract-artifacts update-packages run clean help + +# Default target +all: build-docker run + +# Build op-reth +build: + @echo "Building op-reth binary..." + cd ../../../ && cargo build --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml + +# Build the op-reth Docker image +build-docker: + @echo "Building $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) Docker image..." + cd ../../../ && docker build -f $(notdir $(DOCKERFILE_PATH)) -t $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) . + +# Build coverage-enabled op-reth Docker image +build-docker-with-cov: + @echo "Building coverage-enabled $(DOCKER_IMAGE_NAME):cov Docker image..." + cd ../../../ && docker build \ + --build-arg RUSTFLAGS="-Cinstrument-coverage" \ + --build-arg CARGO_INCREMENTAL=0 \ + --build-arg LLVM_PROFILE_FILE="/coverage/%m-%p.profraw" \ + -f $(notdir $(DOCKERFILE_PATH)) \ + -t $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) . + +# Run Kurtosis with the optimism devnet +run: + @echo "Starting Optimism devnet with historical proof configuration..." + @DEVNET_PATH="./devnets/$(DEVNET).yaml"; \ + if [ ! -z "$(DEVNET_CUSTOM_PATH)" ]; then \ + DEVNET_PATH="$(DEVNET_CUSTOM_PATH)"; \ + fi; \ + kurtosis run $(KURTOSIS_PACKAGE) --args-file $$DEVNET_PATH --enclave $(DEVNET) + +# Build smart contract artifacts with Foundry +build-contracts: + @echo "Building contracts with forge..." + @cd "$(SOURCE_DIR)/proofs/contracts" && forge build || { echo "forge build failed"; exit 1; } + +# Unzip contract artifacts +unzip-contract-artifacts: + @echo "Unzipping contract artifacts..." + mkdir -p "$(SOURCE_DIR)/artifacts/src"; \ + tar --zstd -xf "$(SOURCE_DIR)/artifacts/compressed/artifacts.tzst" -C "$(SOURCE_DIR)/artifacts/src" + +# Update contract artifacts from the optimism submodule +update-packages: + @echo "Updating contract artifacts from optimism submodule..." + cd "$(SOURCE_DIR)/optimism/op-deployer" && just build-contracts copy-contract-artifacts + mkdir -p "$(SOURCE_DIR)/artifacts/compressed" + cp "$(SOURCE_DIR)/optimism/op-deployer/pkg/deployer/artifacts/forge-artifacts/artifacts.tzst" "$(SOURCE_DIR)/artifacts/compressed/artifacts.tzst" + +# Run E2E tests using Kurtosis +test-e2e-kurtosis: build-contracts + @echo "Running E2E tests with Kurtosis for $(DEVNET)" + @DEVNET_PATH="$(SOURCE_DIR)/devnets/$(DEVNET).yaml"; \ + if [ ! -z "$(DEVNET_CUSTOM_PATH)" ]; then \ + DEVNET_PATH="$(DEVNET_CUSTOM_PATH)"; \ + fi; \ + export OP_DEPLOYER_ARTIFACTS="$(SOURCE_DIR)/artifacts/src/forge-artifacts"; \ + export DEVNET_ENV_URL="ktnative://$(DEVNET)$$DEVNET_PATH"; \ + export DISABLE_OP_E2E_LEGACY=true; \ + export DEVSTACK_ORCHESTRATOR=sysext; \ + go test -count=1 -timeout 40m -v ./$(GO_PKG_NAME) + +# Run E2E tests using Sysgo +test-e2e-sysgo: unzip-contract-artifacts build-contracts + @echo "Running E2E tests with Sysgo" + export OP_DEPLOYER_ARTIFACTS="$(SOURCE_DIR)/artifacts/src/forge-artifacts"; \ + export DISABLE_OP_E2E_LEGACY=true; \ + export DEVSTACK_ORCHESTRATOR=sysgo; \ + export OP_RETH_ENABLE_PROOF_HISTORY=true; \ + export SKIP_P2P_CONNECTION_CHECK=true; \ + export OP_RETH_EXEC_PATH="${SOURCE_DIR}/../../../target/debug/op-reth"; \ + export OP_DEVSTACK_PROOF_SEQUENCER_EL=$(OP_DEVSTACK_PROOF_SEQUENCER_EL); \ + export OP_DEVSTACK_PROOF_VALIDATOR_EL=$(OP_DEVSTACK_PROOF_VALIDATOR_EL); \ + go test -count=1 -timeout 40m -v ./$(GO_PKG_NAME) + +# Stop and clean Kurtosis services +clean: + @echo "Cleaning up Kurtosis services..." + kurtosis clean -a diff --git a/crates/optimism/tests/README.md b/crates/optimism/tests/README.md new file mode 100644 index 00000000000..7169c09da70 --- /dev/null +++ b/crates/optimism/tests/README.md @@ -0,0 +1,67 @@ +# E2E tests for op-reth + +This folder contains the end-to-end testing resources for op-reth. Tests use the Optimism "devstack" (from the Optimism monorepo) and Kurtosis to deploy ephemeral devnets. + +This README documents common workflows and Makefile commands used to build the local Docker image, start the devnet with Kurtosis, run e2e tests, and clean up resources. + +## Prerequisites + +- Docker (Desktop) running on your machine +- Kurtosis CLI installed and able to reach the Kurtosis engine +- Go (to run Go-based e2e tests) + +## Commands (Makefile targets) + +Build the Docker image used by the devnet (tags `op-reth:local`): + +```sh +make build +``` + +Start the Optimism devnet (default: `simple-historical-proof`): + +```sh +# uses the Makefile's DEVNET variable (devnets/.yaml) +# OPTIONAL. Default: opgeth-seq-opreth-val +make run DEVNET= + +# or with a custom devnet YAML path +make run DEVNET_CUSTOM_PATH=/absolute/path/to/devnet.yaml +``` + +Run the e2e test suite that exercises the deployed devnet (Go tests): + +```sh +# runs go test with a long timeout; set GO_PKG_NAME to the package to test +make test-e2e-kurtosis + +# run a specific test or package +make test-e2e-kurtosis GO_PKG_NAME=path/to/pkg +``` + +Stop and remove Kurtosis resources (cleanup): + +```sh +make clean +``` + +## Implementation notes + +- The Makefile in this directory calls the repository root `DockerfileOp` to build an op-reth image tagged `op-reth:local`. +- The default Kurtosis package used is `github.com/ethpandaops/optimism-package@1.4.0`. The Makefile passes the YAML under `devnets/$(DEVNET).yaml` to `kurtosis run`. + +## Quick workflow example + +```sh +# build image +make build + +# start devnet +make run + +# run tests (set GO_PKG_NAME if needed) +make test-e2e-kurtosis GO_PKG_NAME=proofs + +# cleanup +make clean +``` diff --git a/crates/optimism/tests/artifacts/.gitignore b/crates/optimism/tests/artifacts/.gitignore new file mode 100644 index 00000000000..e1b09822c79 --- /dev/null +++ b/crates/optimism/tests/artifacts/.gitignore @@ -0,0 +1,2 @@ +forge-artifacts +src \ No newline at end of file diff --git a/crates/optimism/tests/artifacts/compressed/README.md b/crates/optimism/tests/artifacts/compressed/README.md new file mode 100644 index 00000000000..cc09a725873 --- /dev/null +++ b/crates/optimism/tests/artifacts/compressed/README.md @@ -0,0 +1,2 @@ +Artifacts in this directory will be embedded inside the `op-deployer` binary. The directory can be populated by running +`make unzip-contract-artifacts`. \ No newline at end of file diff --git a/crates/optimism/tests/artifacts/compressed/artifacts.tzst b/crates/optimism/tests/artifacts/compressed/artifacts.tzst new file mode 100644 index 00000000000..3be176ef01f Binary files /dev/null and b/crates/optimism/tests/artifacts/compressed/artifacts.tzst differ diff --git a/crates/optimism/tests/devnets/opgeth-seq-opreth-val.yaml b/crates/optimism/tests/devnets/opgeth-seq-opreth-val.yaml new file mode 100644 index 00000000000..65aaa2f3203 --- /dev/null +++ b/crates/optimism/tests/devnets/opgeth-seq-opreth-val.yaml @@ -0,0 +1,74 @@ +# A simple network configuration for kurtosis (https://github.com/ethpandaops/optimism-package) +# Spins up chain with two participating EL/CL pairs. +# One with op-geth/op-node (sequencer role) and one with op-reth/op-node (verifier role). + +optimism_package: + observability: + enabled: true + grafana_params: + # Will load the dashboards from default branch. + dashboard_sources: + - github.com/op-rs/op-reth/etc/grafana + image: "grafana/grafana:12.3.0" + faucet: + enabled: true + test-sequencers: + sequencer: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-test-sequencer:9243bb0452efa3fd255556631688d1255723384a + enabled: true + chains: + chain0: + # Chain with only two nodes + participants: + sequencer: + el: + type: op-geth + log_level: "debug" + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: true + verifier: + el: + type: op-reth + # Note: we use the local image for now. This allows us to run the tests in CI pipelines without publishing new docker images every time. + image: op-reth:local + extra_params: [ + --proofs-history, + --proofs-history.window=200, + --proofs-history.prune-interval=1m, + --proofs-history.storage-path=/data/proofs-history + ] + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: false + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + cl_type: teku + cl_image: consensys/teku:25.7.1 + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: ' + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + } + ' diff --git a/crates/optimism/tests/devnets/opreth-seq-opgeth-val.yaml b/crates/optimism/tests/devnets/opreth-seq-opgeth-val.yaml new file mode 100644 index 00000000000..2b43aa72d17 --- /dev/null +++ b/crates/optimism/tests/devnets/opreth-seq-opgeth-val.yaml @@ -0,0 +1,74 @@ +# A simple network configuration for kurtosis (https://github.com/ethpandaops/optimism-package) +# Spins up chain with two participating EL/CL pairs. +# One with op-geth/op-node (verifier role) and one with op-reth/op-node (sequencer role). + +optimism_package: + observability: + enabled: true + grafana_params: + # Will load the dashboards from default branch. + dashboard_sources: + - github.com/op-rs/op-reth/etc/grafana + image: "grafana/grafana:12.3.0" + faucet: + enabled: true + test-sequencers: + sequencer: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-test-sequencer:9243bb0452efa3fd255556631688d1255723384a + enabled: true + chains: + chain0: + # Chain with only two nodes + participants: + sequencer: + el: + type: op-reth + # Note: we use the local image for now. This allows us to run the tests in CI pipelines without publishing new docker images every time. + image: op-reth:local + extra_params: [ + --proofs-history, + --proofs-history.window=200, + --proofs-history.prune-interval=1m, + --proofs-history.storage-path=/data/proofs-history + ] + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: true + verifier: + el: + type: op-geth + log_level: "debug" + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: false + + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + cl_type: teku + cl_image: consensys/teku:25.7.1 + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: ' + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + } + ' diff --git a/crates/optimism/tests/go.mod b/crates/optimism/tests/go.mod new file mode 100644 index 00000000000..bc1a3751bcc --- /dev/null +++ b/crates/optimism/tests/go.mod @@ -0,0 +1,285 @@ +module github.com/op-rs/op-geth + +go 1.24.0 + +// We're using the "develop" branch of the Optimism repo to include the latest changes to the `devstack` package. +require github.com/ethereum-optimism/optimism v1.16.4 + +require ( + github.com/BurntSushi/toml v1.5.0 + github.com/bmatcuk/doublestar/v4 v4.8.1 + github.com/chelnak/ysmrr v0.6.0 + github.com/ethereum/go-ethereum v1.16.3 + github.com/stretchr/testify v1.10.0 + golang.org/x/sync v0.14.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/VictoriaMetrics/fastcache v1.13.0 // indirect + github.com/adrg/xdg v0.4.0 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/base/go-bip39 v1.1.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/btcsuite/btcd v0.24.2 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect + github.com/btcsuite/btcd/btcutil v1.1.5 // indirect + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/coder/websocket v1.8.13 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/dchest/siphash v1.2.3 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/deepmap/oapi-codegen v1.8.2 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/docker/docker v27.5.1+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect + github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e // indirect + github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20251121143344-5ac16e0fbb00 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect + github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-yaml/yaml v2.1.0+incompatible // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/graph-gophers/graphql-go v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-bexpr v0.1.11 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/raft v1.7.3 // indirect + github.com/hashicorp/raft-boltdb/v2 v2.3.1 // indirect + github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/honeycombio/otel-config-go v1.17.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect + github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect + github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-datastore v0.6.0 // indirect + github.com/ipfs/go-ds-leveldb v0.5.0 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 // indirect + github.com/kurtosis-tech/kurtosis/api/golang v1.8.2-0.20250602144112-2b7d06430e48 // indirect + github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b // indirect + github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc // indirect + github.com/kurtosis-tech/kurtosis/path-compression v0.0.0-20250108161014-0819b8ca912f // indirect + github.com/kurtosis-tech/stacktrace v0.0.0-20211028211901-1c67a77b5409 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p v0.36.2 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-mplex v0.9.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.12.0 // indirect + github.com/libp2p/go-libp2p-testing v0.12.0 // indirect + github.com/libp2p/go-mplex v0.7.0 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/lmittmann/w3 v0.19.5 // indirect + github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mholt/archiver v3.1.1+incompatible // indirect + github.com/miekg/dns v1.1.62 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.14.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect + github.com/nwaples/rardecode v1.1.3 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/onsi/ginkgo/v2 v2.20.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.34 // indirect + github.com/pion/interceptor v0.1.30 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns v0.0.12 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.14 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v2 v2.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v2 v2.1.6 // indirect + github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/protolambda/ctxlock v0.1.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/quic-go v0.46.0 // indirect + github.com/quic-go/webtransport-go v0.8.0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rs/cors v1.11.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/schollz/progressbar/v3 v3.18.0 // indirect + github.com/sethvargo/go-envconfig v1.1.0 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect + github.com/shirou/gopsutil/v4 v4.24.6 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/urfave/cli/v2 v2.27.6 // indirect + github.com/wlynxg/anet v0.0.4 // indirect + github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.etcd.io/bbolt v1.3.5 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/host v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.28.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect + go.uber.org/mock v0.4.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.25.0 // indirect + golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.29.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + lukechampine.com/blake3 v1.3.0 // indirect +) + +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101605.0-rc.1 + +replace github.com/ethereum-optimism/optimism => ./optimism diff --git a/crates/optimism/tests/go.sum b/crates/optimism/tests/go.sum new file mode 100644 index 00000000000..7c7baeae102 --- /dev/null +++ b/crates/optimism/tests/go.sum @@ -0,0 +1,1190 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= +github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= +github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= +github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= +github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/base/go-bip39 v1.1.0 h1:ely6zK09KaQbfX8wpcmN4pRXy0SbbqMT2QF45P1BNh0= +github.com/base/go-bip39 v1.1.0/go.mod h1:grZZXX8gYycovDC4cLS/RS0DmctofwHN+MUhedYCbO0= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= +github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chelnak/ysmrr v0.6.0 h1:kMhO0oI02tl/9szvxrOE0yeImtrK4KQhER0oXu1K/iM= +github.com/chelnak/ysmrr v0.6.0/go.mod h1:56JSrmQgb7/7xoMvuD87h3PE/qW6K1+BQcrgWtVLTUo= +github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= +github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= +github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= +github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= +github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= +github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= +github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e h1:iy1vBIzACYUyOVyoADUwvAiq2eOPC0yVsDUdolPwQjk= +github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e/go.mod h1:DYj7+vYJ4cIB7zera9mv4LcAynCL5u4YVfoeUu6Wa+w= +github.com/ethereum-optimism/op-geth v1.101605.0-rc.1 h1:rzmwuBKOMZnQc4QNBm5iEqBrnEo1M5cbklWHkC5Oszo= +github.com/ethereum-optimism/op-geth v1.101605.0-rc.1/go.mod h1:9J7De8kDwXE/lrMgVEHc0F33TZqcN1Lb5nYaW6UZt38= +github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20251121143344-5ac16e0fbb00 h1:TR5Y7B+5m63V0Dno7MHcFqv/XZByQzx/4THV1T1A7+U= +github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20251121143344-5ac16e0fbb00/go.mod h1:NZ816PzLU1TLv1RdAvYAb6KWOj4Zm5aInT0YpDVml2Y= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 h1:Ep/joEub9YwcjRY6ND3+Y/w0ncE540RtGatVhtZL0/Q= +github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA= +github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.11 h1:6DqdA/KBjurGby9yTY0bmkathya0lfwF2SeuubCI7dY= +github.com/hashicorp/go-bexpr v0.1.11/go.mod h1:f03lAo0duBlDIUMGCuad8oLcgejw4m7U+N8T+6Kz1AE= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/raft v1.7.3 h1:DxpEqZJysHN0wK+fviai5mFcSYsCkNpFUl1xpAW8Rbo= +github.com/hashicorp/raft v1.7.3/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= +github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e h1:SK4y8oR4ZMHPvwVHryKI88kJPJda4UyWYvG5A6iEQxc= +github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e/go.mod h1:EMz/UIuG93P0MBeHh6CbXQAEe8ckVJLZjhD17lBzK5Q= +github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc= +github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/honeycombio/otel-config-go v1.17.0 h1:3/zig0L3IGnfgiCrEfAwBsM0rF57+TKTyJ/a8yqW2eM= +github.com/honeycombio/otel-config-go v1.17.0/go.mod h1:g2mMdfih4sYKfXBtz2mNGvo3HiQYqX4Up4pdA8JOF2s= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs= +github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 h1:izciXrFyFR+ihJ7nLTOkoIX5GzBPIp8gVKlw94gIc98= +github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2/go.mod h1:bWSMQK3WHVTGHX9CjxPAb/LtzcmfOxID2wdzakSWQxo= +github.com/kurtosis-tech/kurtosis/api/golang v1.8.2-0.20250602144112-2b7d06430e48 h1:iBbwJrQQ+9Erq9FiEJAp/Rk4ZdMBvA8UX+irXleWu+c= +github.com/kurtosis-tech/kurtosis/api/golang v1.8.2-0.20250602144112-2b7d06430e48/go.mod h1:VZXj/IVyUGVSFy27sD6BJp+6dhZgveuOLPT/crpGjxg= +github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b h1:hMoIM99QKcYQqsnK4AF7Lovi9ZD9ac6lZLZ5D/jx2x8= +github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b/go.mod h1:4pFdrRwDz5R+Fov2ZuTaPhAVgjA2jhGh1Izf832sX7A= +github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc h1:7IlEpSehmWcNXOFpNP24Cu5HQI3af7GCBQw//m+LnvQ= +github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc/go.mod h1:TOWMQgvAJH/NiWWERGXg/plT9lS7aFcXFxCa0M5sfHo= +github.com/kurtosis-tech/kurtosis/path-compression v0.0.0-20250108161014-0819b8ca912f h1:kys3RDy0uHk+VwYS1mVh48YnogkRTAxUUCV7kpwMNOQ= +github.com/kurtosis-tech/kurtosis/path-compression v0.0.0-20250108161014-0819b8ca912f/go.mod h1:aDMrPeS7Gii8W6SDKSKyrBNgEQAUYidriyeKGf+Ml3I= +github.com/kurtosis-tech/stacktrace v0.0.0-20211028211901-1c67a77b5409 h1:YQTATifMUwZEtZYb0LVA7DK2pj8s71iY8rzweuUQ5+g= +github.com/kurtosis-tech/stacktrace v0.0.0-20211028211901-1c67a77b5409/go.mod h1:y5weVs5d9wXXHcDA1awRxkIhhHC1xxYJN8a7aXnE6S8= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= +github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-mplex v0.9.0 h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8= +github.com/libp2p/go-libp2p-mplex v0.9.0/go.mod h1:ro1i4kuwiFT+uMPbIDIFkcLs1KRbNp0QwnUXM+P64Og= +github.com/libp2p/go-libp2p-pubsub v0.12.0 h1:PENNZjSfk8KYxANRlpipdS7+BfLmOl3L2E/6vSNjbdI= +github.com/libp2p/go-libp2p-pubsub v0.12.0/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lmittmann/w3 v0.19.5 h1:WwVRyIwhRLfIahmpB1EglsB3o1XWsgydgrxIUp5upFQ= +github.com/lmittmann/w3 v0.19.5/go.mod h1:pN97sGGYGvsbqOYj/ms3Pd+7k/aiK/9OpNcxMmmzSOI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU= +github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= +github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= +github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= +github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= +github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= +github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/protolambda/ctxlock v0.1.0 h1:rCUY3+vRdcdZXqT07iXgyr744J2DU2LCBIXowYAjBCE= +github.com/protolambda/ctxlock v0.1.0/go.mod h1:vefhX6rIZH8rsg5ZpOJfEDYQOppZi19SfPiGOFrNnwM= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= +github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= +github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= +github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= +github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sethvargo/go-envconfig v1.1.0 h1:cWZiJxeTm7AlCvzGXrEXaSTCNgip5oJepekh/BOQuog= +github.com/sethvargo/go-envconfig v1.1.0/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v4 v4.24.6 h1:9qqCSYF2pgOU+t+NgJtp7Co5+5mHF/HyKBUckySQL64= +github.com/shirou/gopsutil/v4 v4.24.6/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g= +github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0 h1:KG6fOUk3EwSH1dEpsAbsLKFbn3cFwN9xDu8plGu55zI= +go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0/go.mod h1:bSd579exEkh/P5msRcom8YzVB6NsUxYKyV+D/FYOY7Y= +go.opentelemetry.io/contrib/instrumentation/host v0.53.0 h1:X4r+5n6bSqaQUbPlSO5baoM7tBvipkT0mJFyuPFnPAU= +go.opentelemetry.io/contrib/instrumentation/host v0.53.0/go.mod h1:NTaDj8VCnJxWleEcRQRQaN36+aCZjO9foNIdJunEjUQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 h1:nOlJEAJyrcy8hexK65M+dsCHIx7CVVbybcFDNkcTcAc= +go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0/go.mod h1:u79lGGIlkg3Ryw425RbMjEkGYNxSnXRyR286O840+u4= +go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= +go.opentelemetry.io/contrib/propagators/b3 v1.28.0/go.mod h1:DWRkzJONLquRz7OJPh2rRbZ7MugQj62rk7g6HRnEqh0= +go.opentelemetry.io/contrib/propagators/ot v1.28.0 h1:rmlG+2pc5k5M7Y7izDrxAHZUIwDERdGMTD9oMV7llMk= +go.opentelemetry.io/contrib/propagators/ot v1.28.0/go.mod h1:MNgXIn+UrMbNGpd7xyckyo2LCHIgCdmdjEE7YNZGG+w= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= +lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/crates/optimism/tests/optimism b/crates/optimism/tests/optimism new file mode 160000 index 00000000000..920f5c334d0 --- /dev/null +++ b/crates/optimism/tests/optimism @@ -0,0 +1 @@ +Subproject commit 920f5c334d096a1be009a31b7a4f56c2f1fa24e3 diff --git a/crates/optimism/tests/proofs/contracts/foundry.toml b/crates/optimism/tests/proofs/contracts/foundry.toml new file mode 100644 index 00000000000..cad32fb0c14 --- /dev/null +++ b/crates/optimism/tests/proofs/contracts/foundry.toml @@ -0,0 +1,3 @@ +[profile.default] +src = "src" +out = "artifacts" diff --git a/crates/optimism/tests/proofs/contracts/src/MultiStorage.sol b/crates/optimism/tests/proofs/contracts/src/MultiStorage.sol new file mode 100644 index 00000000000..fc0bac34e74 --- /dev/null +++ b/crates/optimism/tests/proofs/contracts/src/MultiStorage.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +contract MultiStorage { + uint256 public slotA; + uint256 public slotB; + address public owner; + + constructor() { + owner = msg.sender; + } + + function setValues(uint256 _a, uint256 _b) external { + slotA = _a; + slotB = _b; + } +} \ No newline at end of file diff --git a/crates/optimism/tests/proofs/contracts/src/SimpleStorage.sol b/crates/optimism/tests/proofs/contracts/src/SimpleStorage.sol new file mode 100644 index 00000000000..a975eab781f --- /dev/null +++ b/crates/optimism/tests/proofs/contracts/src/SimpleStorage.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +contract SimpleStorage { + uint256 public value; + + function setValue(uint256 newValue) external { + value = newValue; + } +} \ No newline at end of file diff --git a/crates/optimism/tests/proofs/contracts/src/TokenVault.sol b/crates/optimism/tests/proofs/contracts/src/TokenVault.sol new file mode 100644 index 00000000000..3ee6f256413 --- /dev/null +++ b/crates/optimism/tests/proofs/contracts/src/TokenVault.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title TokenVault - realistic contract for eth_getProof testing +/// @notice Demonstrates mappings, nested mappings, and dynamic arrays +contract TokenVault { + struct Allowance { + uint256 amount; + bool active; + } + + // Mapping: user => balance + mapping(address => uint256) public balances; + + // Nested Mapping: owner => spender => allowance info + mapping(address => mapping(address => Allowance)) public allowances; + + // Dynamic array: list of all depositors + address[] public depositors; + + constructor() { + // initialize contract with a few entries + address alice = address(0xA11CE); + address bob = address(0xB0B); + + balances[alice] = 1000; + balances[bob] = 2000; + + allowances[alice][bob] = Allowance({amount: 300, active: true}); + allowances[bob][alice] = Allowance({amount: 150, active: false}); + + depositors.push(alice); + depositors.push(bob); + } + + function deposit() external payable { + balances[msg.sender] += msg.value; + depositors.push(msg.sender); + } + + function approve(address spender, uint256 amount) external { + allowances[msg.sender][spender] = Allowance({ + amount: amount, + active: true + }); + } + + function deactivateAllowance(address spender) external { + allowances[msg.sender][spender].active = false; + } + + function getDepositors() external view returns (address[] memory) { + return depositors; + } +} \ No newline at end of file diff --git a/crates/optimism/tests/proofs/core/account_proofs_test.go b/crates/optimism/tests/proofs/core/account_proofs_test.go new file mode 100644 index 00000000000..15772bdafab --- /dev/null +++ b/crates/optimism/tests/proofs/core/account_proofs_test.go @@ -0,0 +1,119 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +// TestL2MultipleTransactionsInDifferentBlocks tests transactions from different accounts +// on L2 across multiple blocks. This verifies account state changes across multiple L2 blocks. +// Check if the proof retrieved from geth and reth match for each account at each block height, +// and verify the proofs against the respective block state roots. +func TestL2MultipleTransactionsInDifferentBlocks(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + + const numAccounts = 2 + const initialFunding = 10 + accounts := sys.FunderL2.NewFundedEOAs(numAccounts, eth.Ether(initialFunding)) + + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + + // Block 1: Send transaction from first account + currentBlock := sys.L2ELSequencerNode().WaitForBlock() + t.Logf("Current L2 block number: %d", currentBlock.Number) + + transferAmount := eth.Ether(1) + tx1 := accounts[0].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 0: %s", accounts[0].Address().Hex()) + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) + t.Logf("Transaction 1 included in block: %d", receipt1.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt1.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt1.BlockNumber.Uint64()) + sys.L2ELSequencerNode().WaitForBlockNumber(currentBlock.Number + 1) + + // Block 2: Send transaction from second account + currentBlock = sys.L2ELSequencerNode().WaitForBlock() + t.Logf("Current L2 block number: %d", currentBlock.Number) + + tx2 := accounts[1].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 1: %s", accounts[1].Address().Hex()) + receipt2, err := tx2.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt2.Status) + t.Logf("Transaction 2 included in block: %d", receipt2.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt2.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, receipt2.BlockNumber.Uint64()) + + // Also verify we can get proofs for account 0 at block 2 (different block height) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt2.BlockNumber.Uint64()) +} + +// TestL2MultipleTransactionsInSingleBlock tests 2 different accounts sending transactions +// that get included in the same L2 block. +// It verifies that the account proofs for both accounts can be retrieved and verified +// against the same block's state root, and that the proofs from geth and reth match. +func TestL2MultipleTransactionsInSingleBlock(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + + const numAccounts = 2 + const initialFunding = 10 + accounts := sys.FunderL2.NewFundedEOAs(numAccounts, eth.Ether(initialFunding)) + + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + + transferAmount := eth.Ether(1) + + t.Log("Sending transactions from both accounts") + tx0 := accounts[0].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 0: %s", accounts[0].Address().Hex()) + + tx1 := accounts[1].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 1: %s", accounts[1].Address().Hex()) + + // Wait for both transactions to be included + receipt0, err := tx0.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt0.Status) + t.Logf("Transaction 0 included in block %d", receipt0.BlockNumber.Uint64()) + + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) + t.Logf("Transaction 1 included in block %d", receipt1.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt1.BlockNumber.Uint64()) + // Txns can land in the same or different blocks depending on timing. + if receipt0.BlockNumber.Uint64() == receipt1.BlockNumber.Uint64() { + t.Logf("Both transactions included in the same L2 block: %d", receipt0.BlockNumber.Uint64()) + + // Verify both proofs against the same block state root + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt0.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, receipt0.BlockNumber.Uint64()) + + } else { + t.Logf("Transactions in different blocks: %d and %d", + receipt0.BlockNumber.Uint64(), receipt1.BlockNumber.Uint64()) + + // Different blocks: verify each proof's merkle root matches its respective block's state root + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt0.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, receipt1.BlockNumber.Uint64()) + } + + t.Logf("Proof for account 0 and 1 verified successfully") +} diff --git a/crates/optimism/tests/proofs/core/execute_payload_test.go b/crates/optimism/tests/proofs/core/execute_payload_test.go new file mode 100644 index 00000000000..0216eae68f4 --- /dev/null +++ b/crates/optimism/tests/proofs/core/execute_payload_test.go @@ -0,0 +1,118 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/op-rs/op-geth/proofs/utils" +) + +func TestExecutePayloadSuccess(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + opRethELNode := sys.RethL2ELNode() + + plannedTxOption := user.PlanTransfer(user.Address(), eth.OneWei) + plannedTx := txplan.NewPlannedTx(plannedTxOption) + signedTx, err := plannedTx.Signed.Eval(ctx) + if err != nil { + gt.Fatal(err) + } + + raw, err := signedTx.MarshalBinary() + if err != nil { + gt.Fatal(err) + } + + lastBlock, err := opRethELNode.Escape().L2EthClient().InfoByLabel(ctx, eth.Unsafe) + if err != nil { + gt.Fatal(err) + } + + blockTime := lastBlock.Time() + 1 + gasLimit := eth.Uint64Quantity(lastBlock.GasLimit()) + + var prevRandao eth.Bytes32 + copy(prevRandao[:], lastBlock.MixDigest().Bytes()) + + var zero1559 eth.Bytes8 + minBaseFee := uint64(10) + + attrs := eth.PayloadAttributes{ + Timestamp: eth.Uint64Quantity(blockTime), + PrevRandao: prevRandao, + SuggestedFeeRecipient: lastBlock.Coinbase(), + Withdrawals: nil, + ParentBeaconBlockRoot: lastBlock.ParentBeaconRoot(), + Transactions: []eth.Data{eth.Data(raw)}, + NoTxPool: true, + GasLimit: &gasLimit, + EIP1559Params: &zero1559, + MinBaseFee: &minBaseFee, + } + + witness, err := opRethELNode.Escape().L2EthClient().PayloadExecutionWitness(ctx, lastBlock.Hash(), attrs) + if err != nil { + gt.Fatal(err) + } + if witness == nil { + gt.Fatal("empty witness") + } +} + +func TestExecutePayloadWithInvalidParentHash(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + opRethELNode := sys.RethL2ELNode() + + plannedTxOption := user.PlanTransfer(user.Address(), eth.OneWei) + plannedTx := txplan.NewPlannedTx(plannedTxOption) + signedTx, err := plannedTx.Signed.Eval(ctx) + if err != nil { + gt.Fatal(err) + } + + raw, err := signedTx.MarshalBinary() + if err != nil { + gt.Fatal(err) + } + + lastBlock, err := opRethELNode.Escape().L2EthClient().InfoByLabel(ctx, eth.Unsafe) + if err != nil { + gt.Fatal(err) + } + + blockTime := lastBlock.Time() + 1 + gasLimit := eth.Uint64Quantity(lastBlock.GasLimit()) + + var prevRandao eth.Bytes32 + copy(prevRandao[:], lastBlock.MixDigest().Bytes()) + + var zero1559 eth.Bytes8 + minBaseFee := uint64(10) + + attrs := eth.PayloadAttributes{ + Timestamp: eth.Uint64Quantity(blockTime), + PrevRandao: prevRandao, + SuggestedFeeRecipient: lastBlock.Coinbase(), + Withdrawals: nil, + ParentBeaconBlockRoot: lastBlock.ParentBeaconRoot(), + Transactions: []eth.Data{eth.Data(raw)}, + NoTxPool: true, + GasLimit: &gasLimit, + EIP1559Params: &zero1559, + MinBaseFee: &minBaseFee, + } + + _, err = opRethELNode.Escape().L2EthClient().PayloadExecutionWitness(ctx, common.Hash{}, attrs) + if err == nil { + gt.Fatal("expected error") + } +} diff --git a/crates/optimism/tests/proofs/core/execution_witness_test.go b/crates/optimism/tests/proofs/core/execution_witness_test.go new file mode 100644 index 00000000000..2cbbc2b73e8 --- /dev/null +++ b/crates/optimism/tests/proofs/core/execution_witness_test.go @@ -0,0 +1,126 @@ +package core + +import ( + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +// ExecutionWitness represents the response from debug_executionWitness +type ExecutionWitness struct { + Keys []hexutil.Bytes `json:"keys"` + Codes []hexutil.Bytes `json:"codes"` + State []hexutil.Bytes `json:"state"` + Headers []hexutil.Bytes `json:"headers"` +} + +// TestDebugExecutionWitness tests the debug_executionWitness RPC method on Reth L2. +// This verifies that the execution witness can be retrieved for a block containing transactions +// and that the response contains valid state, codes, keys, and headers data. +func TestDebugExecutionWitness(gt *testing.T) { + t := devtest.SerialT(gt) + sys := utils.NewMixedOpProofPreset(t) + opRethELNode := sys.RethL2ELNode() + + // Create a funded account and recipient + account := sys.FunderL2.NewFundedEOA(eth.Ether(10)) + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + + // Wait for current block + currentBlock := sys.L2ELSequencerNode().WaitForBlock() + t.Logf("Current L2 block number: %d", currentBlock.Number) + + // Send a transaction to create some state changes + transferAmount := eth.Ether(1) + tx := account.Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account: %s to recipient: %s", account.Address().Hex(), recipientAddr.Hex()) + + receipt, err := tx.Included.Eval(t.Ctx()) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + t.Logf("Transaction included in block: %d", receipt.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt.BlockNumber.Uint64()) + l2RethClient := opRethELNode.Escape().L2EthClient() + + // Get the block to inspect the state changes + block, err := l2RethClient.InfoByNumber(t.Ctx(), receipt.BlockNumber.Uint64()) + require.NoError(t, err) + t.Logf("Block %d has state root: %s", block.NumberU64(), block.Root().Hex()) + + // Call debug_executionWitness via RPC + var witness ExecutionWitness + blockNumberHex := hexutil.EncodeUint64(block.NumberU64()) + + // Use the RPC client's CallContext method directly + err = l2RethClient.RPC().CallContext(t.Ctx(), &witness, "debug_executionWitness", blockNumberHex) + require.NoError(t, err, "debug_executionWitness RPC call should succeed") + + // Verify the witness contains expected data + require.NotEmpty(t, witness.Keys, "Witness should contain keys data") + require.NotEmpty(t, witness.Codes, "Witness should contain codes data") + require.NotEmpty(t, witness.State, "State should not be empty") + require.NotNil(t, witness.Headers, "Witness should contain headers data") + + // Verify the parent header is present and decode it + require.NotEmpty(t, witness.Headers, "Headers should contain at least the parent block") + parentHeaderBytes := witness.Headers[len(witness.Headers)-1] + require.NotEmpty(t, parentHeaderBytes, "Parent header should not be empty") + t.Logf("Parent header size: %d bytes", len(parentHeaderBytes)) + + // Decode the parent header to verify it's valid RLP and extract state root + var parentHeader types.Header + err = rlp.DecodeBytes(parentHeaderBytes, &parentHeader) + require.NoError(t, err, "Parent header should be valid RLP-encoded") + + // Verify the parent header matches the expected parent block + expectedParentNumber := block.NumberU64() - 1 + require.Equal(t, expectedParentNumber, parentHeader.Number.Uint64(), + "Parent header should be for block %d", expectedParentNumber) + + // Get the actual parent block from the chain to verify state root + actualParentBlock, err := l2RethClient.InfoByNumber(t.Ctx(), expectedParentNumber) + require.NoError(t, err, "Should be able to fetch parent block from chain") + + // Verify the parent header's state root matches the actual parent block's state root + require.Equal(t, actualParentBlock.Root(), parentHeader.Root, + "Parent header state root in witness should match actual parent block state root") + t.Logf("Verified parent header state root matches chain: %s", parentHeader.Root.Hex()) + + // Verify that the witness contains keys for the accounts involved in the transaction + senderAddrHex := strings.ToLower(account.Address().Hex()) + recipientAddrHex := strings.ToLower(recipientAddr.Hex()) + + // Check if the witness keys contains the accounts + // The witness format may vary, so we check for the presence of either the address or its hash + foundSender := false + foundRecipient := false + + for _, value := range witness.Keys { + keyLower := strings.ToLower(value.String()) + if strings.Contains(keyLower, senderAddrHex) { + foundSender = true + } + if strings.Contains(keyLower, recipientAddrHex) { + foundRecipient = true + } + } + + // We should find at least the sender since they initiated the transaction + require.True(t, foundSender, "Witness should contain state data for the transaction sender") + t.Logf("Verified sender account is present in execution witness") + + // The recipient might not always be in the witness depending on the implementation + if foundRecipient { + t.Logf("Verified recipient account is present in execution witness") + } + t.Log("Successfully retrieved and validated execution witness from Reth") +} diff --git a/crates/optimism/tests/proofs/core/init_test.go b/crates/optimism/tests/proofs/core/init_test.go new file mode 100644 index 00000000000..df6fe1ec27d --- /dev/null +++ b/crates/optimism/tests/proofs/core/init_test.go @@ -0,0 +1,14 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/op-rs/op-geth/proofs/utils" +) + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + // Other setups may be added here, hydrated from the same orchestrator + presets.DoMain(m, utils.WithMixedOpProofPreset()) +} diff --git a/crates/optimism/tests/proofs/core/resyncing_test.go b/crates/optimism/tests/proofs/core/resyncing_test.go new file mode 100644 index 00000000000..1e7722a3055 --- /dev/null +++ b/crates/optimism/tests/proofs/core/resyncing_test.go @@ -0,0 +1,64 @@ +package core + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +func TestResyncing(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := utils.NewMixedOpProofPreset(t) + + alice := sys.FunderL2.NewFundedEOA(eth.OneEther) + bob := sys.FunderL2.NewFundedEOA(eth.OneEther) + + tx := alice.Transfer(bob.Address(), eth.OneHundredthEther) + receipt, err := tx.Included.Eval(ctx) + require.NoError(gt, err) + require.Equal(gt, types.ReceiptStatusSuccessful, receipt.Status) + + t.Logf("Stopping validator L2 CL and EL to simulate downtime") + // According to devnet config, `B` will be the validator node. + sys.L2ELValidatorNode().Stop() + sys.L2CLValidator.Stop() + + var blockNumbers []uint64 + // produce some transactions while the node is down + for i := 0; i < 5; i++ { + tx := alice.Transfer(bob.Address(), eth.OneHundredthEther) + receipt, err := tx.Included.Eval(ctx) + require.NoError(gt, err) + require.Equal(gt, types.ReceiptStatusSuccessful, receipt.Status) + blockNumbers = append(blockNumbers, receipt.BlockNumber.Uint64()) + } + + // restart the node and ensure it can sync the missing blocks + t.Logf("Restarting validator L2 CL and EL to resync") + sys.L2ELValidatorNode().Start() + sys.L2CLValidator.Start() + + time.Sleep(3 * time.Second) + + err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { + status := sys.L2CLValidator.SyncStatus() + return status.UnsafeL2.Number > blockNumbers[len(blockNumbers)-1], nil + }) + require.NoError(gt, err, "Validator L2 CL failed to resync to latest block") + + t.Logf("Fetching and verifying proofs for transactions produced while node was down") + // verify the proofs for the transactions produced while the node was down + for _, blockNumber := range blockNumbers { + utils.FetchAndVerifyProofs(t, sys, bob.Address(), []common.Hash{}, blockNumber) + utils.FetchAndVerifyProofs(t, sys, alice.Address(), []common.Hash{}, blockNumber) + } +} diff --git a/crates/optimism/tests/proofs/core/simple_storage_test.go b/crates/optimism/tests/proofs/core/simple_storage_test.go new file mode 100644 index 00000000000..e2c59f69000 --- /dev/null +++ b/crates/optimism/tests/proofs/core/simple_storage_test.go @@ -0,0 +1,172 @@ +package core + +import ( + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/op-rs/op-geth/proofs/utils" +) + +func TestStorageProofUsingSimpleStorageContract(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + + // deploy contract via helper + contract, receipt := utils.DeploySimpleStorage(t, user) + t.Logf("contract deployed at address %s in L2 block %d", contract.Address().Hex(), receipt.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt.BlockNumber.Uint64()) + // fetch and verify initial proof (should be zeroed storage) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0")}, receipt.BlockNumber.Uint64()) + + type caseEntry struct { + Block uint64 + Value *big.Int + } + var cases []caseEntry + for i := 1; i <= 5; i++ { + writeVal := big.NewInt(int64(i * 10)) + callRes := contract.SetValue(user, writeVal) + + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + Value: writeVal, + }) + t.Logf("setValue transaction included in L2 block %d", callRes.BlockNumber) + } + + // test reset storage to zero + callRes := contract.SetValue(user, big.NewInt(0)) + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + Value: big.NewInt(0), + }) + t.Logf("reset setValue transaction included in L2 block %d", callRes.BlockNumber) + + sys.L2ELValidatorNode().WaitForBlockNumber(callRes.BlockNumber.Uint64()) + // for each case, get proof and verify + for _, c := range cases { + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0")}, c.Block) + } + + // test with non-existent storage slot + nonExistentSlot := common.HexToHash("0xdeadbeef") + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{nonExistentSlot}, cases[len(cases)-1].Block) +} + +func TestStorageProofUsingMultiStorageContract(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + + // deploy contract via helper + contract, receipt := utils.DeployMultiStorage(t, user) + t.Logf("contract deployed at address %s in L2 block %d", contract.Address().Hex(), receipt.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt.BlockNumber.Uint64()) + // fetch and verify initial proof (should be zeroed storage) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0"), common.HexToHash("0x1")}, receipt.BlockNumber.Uint64()) + + // set multiple storage slots + type caseEntry struct { + Block uint64 + SlotValues map[common.Hash]*big.Int + } + var cases []caseEntry + + for i := 1; i <= 5; i++ { + aVal := big.NewInt(int64(i * 10)) + bVal := big.NewInt(int64(i * 20)) + callRes := contract.SetValues(user, aVal, bVal) + + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + SlotValues: map[common.Hash]*big.Int{ + common.HexToHash("0x0"): aVal, + common.HexToHash("0x1"): bVal, + }, + }) + t.Logf("setValues transaction included in L2 block %d", callRes.BlockNumber) + } + + // test reset storage slots to zero + callRes := contract.SetValues(user, big.NewInt(0), big.NewInt(0)) + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + SlotValues: map[common.Hash]*big.Int{ + common.HexToHash("0x0"): big.NewInt(0), + common.HexToHash("0x1"): big.NewInt(0), + }, + }) + t.Logf("reset setValues transaction included in L2 block %d", callRes.BlockNumber) + + sys.L2ELValidatorNode().WaitForBlockNumber(callRes.BlockNumber.Uint64()) + // for each case, get proof and verify + for _, c := range cases { + var slots []common.Hash + for slot := range c.SlotValues { + slots = append(slots, slot) + } + + utils.FetchAndVerifyProofs(t, sys, contract.Address(), slots, c.Block) + } +} + +func TestTokenVaultStorageProofs(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := utils.NewMixedOpProofPreset(t) + // funder EOA that will deploy / interact + alice := sys.FunderL2.NewFundedEOA(eth.OneEther) + bob := sys.FunderL2.NewFundedEOA(eth.OneEther) + + // deploy contract + contract, deployBlock := utils.DeployTokenVault(t, alice) + t.Logf("TokenVault deployed at %s block=%d", contract.Address().Hex(), deployBlock.BlockNumber.Uint64()) + + userAddr := alice.Address() + + // call deposit (payable) + depositAmount := eth.OneHundredthEther + depRes := contract.Deposit(alice, depositAmount) + depositBlock := depRes.BlockNumber.Uint64() + t.Logf("deposit included in block %d", depositBlock) + + // call approve(spender, amount) - use same user as spender for simplicity, or create another funded EOA + approveAmount := big.NewInt(100) + spenderAddr := bob.Address() + approveRes := contract.Approve(alice, spenderAddr, approveAmount) + approveBlock := approveRes.BlockNumber.Uint64() + t.Logf("approve included in block %d", approveBlock) + + // call deactivateAllowance(spender) + deactRes := contract.DeactivateAllowance(alice, spenderAddr) + deactBlock := deactRes.BlockNumber.Uint64() + t.Logf("deactivateAllowance included in block %d", deactBlock) + + sys.L2ELValidatorNode().WaitForBlockNumber(deactBlock) + + // balance slot for user + balanceSlot := contract.GetBalanceSlot(userAddr) + // nested allowance slot owner=user, spender=spenderAddr + allowanceSlot := contract.GetAllowanceSlot(userAddr, spenderAddr) + // depositors[0] element slot + depositor0Slot := contract.GetDepositorSlot(0) + + // fetch & verify proofs at appropriate blocks + // balance after deposit (depositBlock) + t.Logf("Verifying balance slot %s at deposit block %d", balanceSlot.Hex(), depositBlock) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{balanceSlot, depositor0Slot}, depositBlock) + // allowance after approve (approveBlock) + t.Logf("Verifying allowance slot %s at approve block %d", allowanceSlot.Hex(), approveBlock) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{allowanceSlot}, approveBlock) + // after deactivation, allowance should be zero at deactBlock + t.Logf("Verifying allowance slot %s at deactivate block %d", allowanceSlot.Hex(), deactBlock) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{allowanceSlot}, deactBlock) +} diff --git a/crates/optimism/tests/proofs/prune/init_test.go b/crates/optimism/tests/proofs/prune/init_test.go new file mode 100644 index 00000000000..bd9082e259d --- /dev/null +++ b/crates/optimism/tests/proofs/prune/init_test.go @@ -0,0 +1,14 @@ +package prune + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/op-rs/op-geth/proofs/utils" +) + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + // Other setups may be added here, hydrated from the same orchestrator + presets.DoMain(m, utils.WithMixedOpProofPreset()) +} diff --git a/crates/optimism/tests/proofs/prune/prune_test.go b/crates/optimism/tests/proofs/prune/prune_test.go new file mode 100644 index 00000000000..9ba50d1ac88 --- /dev/null +++ b/crates/optimism/tests/proofs/prune/prune_test.go @@ -0,0 +1,138 @@ +package prune + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +// Steps: +// 1) Create some tx and validate proof for a block (pre-prune): +// 2) Wait for that specific block to be pruned: +// - Ensure the chain advances enough so the pruner *can* move `earliest` past `targetBlock` +// (i.e. latest >= targetBlock + proofWindow). +// - Poll debug_proofsSyncStatus until earliest > targetBlock (meaning targetBlock is now pruned). +// +// 3) Call validate checks for getProof and check everything is consistent for the new earliest block. +func TestPruneProofStorageWithGetProofConsistency(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := utils.NewMixedOpProofPreset(t) + + // Defined in the devnet yaml + var proofWindow = uint64(200) + + // An expected time within the prune should be detected. + var pruneDetectTimeout = 5 * time.Minute + + opRethELNode := sys.RethL2ELNode() + ethClient := opRethELNode.Escape().EthClient() + + // ----------------------------- + // (1) Create tx + validate proof pre-prune + // ----------------------------- + const numAccounts = 2 + const initialFunding = 10 + + accounts := sys.FunderL2.NewFundedEOAs(numAccounts, eth.Ether(initialFunding)) + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + transferAmount := eth.Ether(1) + + t.Log("Sending transactions from both accounts (to create state changes)") + tx0 := accounts[0].Transfer(recipientAddr, transferAmount) + tx1 := accounts[1].Transfer(recipientAddr, transferAmount) + + receipt0, err := tx0.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt0.Status) + + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) + + // Choose a deterministic target block: the later of the two inclusion blocks. + targetBlock := receipt0.BlockNumber.Uint64() + if receipt1.BlockNumber.Uint64() > targetBlock { + targetBlock = receipt1.BlockNumber.Uint64() + } + t.Logf("Target block for proof validation (pre-prune): %d", targetBlock) + + // Make sure validator has the block too (keeps the test stable). + sys.L2ELValidatorNode().WaitForBlockNumber(targetBlock) + + // Pre-prune proof verification at targetBlock. + // This verifies the proof against the block's state root (efficient correctness check). + t.Logf("Pre-prune: verifying getProof proofs at block %d", targetBlock) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, targetBlock) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, targetBlock) + t.Log("Pre-prune: proofs verified successfully") + + // ----------------------------- + // (2) Wait until targetBlock is pruned (earliest > targetBlock) + // ----------------------------- + initialStatus := getProofSyncStatus(t, ethClient) + t.Logf("Initial proofs sync status: earliest=%d latest=%d", initialStatus.Earliest, initialStatus.Latest) + + // Ensure we advance far enough that pruning *can* move earliest past targetBlock. + // If latest < targetBlock + proofWindow, earliest cannot advance beyond targetBlock yet. + requiredLatest := targetBlock + proofWindow + if initialStatus.Latest < requiredLatest { + t.Logf("Waiting for chain to advance to at least block %d so pruning can pass targetBlock", requiredLatest) + opRethELNode.WaitForBlockNumber(requiredLatest) + } + + t.Logf("Waiting for pruner to advance earliest past targetBlock=%d ...", targetBlock) + waitUntil := time.Now().Add(pruneDetectTimeout) + + var prunedStatus proofSyncStatus + for { + if time.Now().After(waitUntil) { + t.Errorf("Timed out waiting for prune: earliest did not advance past targetBlock=%d within %s", targetBlock, pruneDetectTimeout) + } + + prunedStatus = getProofSyncStatus(t, ethClient) + t.Logf("Polling proofs sync status: earliest=%d latest=%d (target=%d)", prunedStatus.Earliest, prunedStatus.Latest, targetBlock) + + // This is the key condition: the specific block we validated earlier is now out of window. + if prunedStatus.Earliest > targetBlock { + break + } + + time.Sleep(5 * time.Second) + } + + currentProofWindow := prunedStatus.Latest - prunedStatus.Earliest + require.GreaterOrEqual(t, currentProofWindow, proofWindow, "pruner should maintain at least the configured proof window") + t.Logf("Detected prune past targetBlock. Now earliest=%d latest=%d window=%d", prunedStatus.Earliest, prunedStatus.Latest, currentProofWindow) + + // ----------------------------- + // (3) Post-prune consistency checks for getProof + // ----------------------------- + t.Logf("Post-prune: expecting getProof verification to succeed at new earliest block=%d", prunedStatus.Earliest) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, prunedStatus.Earliest) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, prunedStatus.Earliest) + t.Log("Post-prune: getProof consistency checks passed") +} + +type proofSyncStatus struct { + Earliest uint64 `json:"earliest"` + Latest uint64 `json:"latest"` +} + +func getProofSyncStatus(t devtest.T, client apis.EthClient) proofSyncStatus { + var result proofSyncStatus + err := client.RPC().CallContext(t.Ctx(), &result, "debug_proofsSyncStatus") + if err != nil { + t.Errorf("debug_proofsSyncStatus call failed: %v", err) + } + return result +} diff --git a/crates/optimism/tests/proofs/reorg/init_test.go b/crates/optimism/tests/proofs/reorg/init_test.go new file mode 100644 index 00000000000..cfcf49cbe55 --- /dev/null +++ b/crates/optimism/tests/proofs/reorg/init_test.go @@ -0,0 +1,14 @@ +package reorg + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/op-rs/op-geth/proofs/utils" +) + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + // Other setups may be added here, hydrated from the same orchestrator + presets.DoMain(m, utils.WithMixedOpProofPreset()) +} diff --git a/crates/optimism/tests/proofs/reorg/reorg_test.go b/crates/optimism/tests/proofs/reorg/reorg_test.go new file mode 100644 index 00000000000..fd8cd3174a8 --- /dev/null +++ b/crates/optimism/tests/proofs/reorg/reorg_test.go @@ -0,0 +1,195 @@ +package reorg + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +func TestReorgUsingAccountProof(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := utils.NewMixedOpProofPreset(t) + l := sys.Log + + ia := sys.TestSequencer.Escape().ControlAPI(sys.L2Chain.ChainID()) + + // stop batcher on chain A + sys.L2Batcher.Stop() + + // two EOAs for a sample transfer tx used later in a conflicting block + alice := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + bob := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + + user := sys.FunderL2.NewFundedEOA(eth.OneEther) + contract, deployBlock := utils.DeploySimpleStorage(t, user) + t.Logf("SimpleStorage deployed at %s block=%d", contract.Address().Hex(), deployBlock.BlockNumber.Uint64()) + + time.Sleep(12 * time.Second) + divergenceHead := sys.L2Chain.WaitForBlock() + // build up some blocks that will be reorged away + + type caseEntry struct { + Block uint64 + addr common.Address + slots []common.Hash + } + var cases []caseEntry + for i := 0; i < 3; i++ { + tx := alice.Transfer(bob.Address(), eth.OneGWei) + receipt, err := tx.Included.Eval(ctx) + require.NoError(gt, err) + require.Equal(gt, types.ReceiptStatusSuccessful, receipt.Status) + + cases = append(cases, caseEntry{ + Block: receipt.BlockNumber.Uint64(), + addr: alice.Address(), + slots: []common.Hash{}, + }) + cases = append(cases, caseEntry{ + Block: receipt.BlockNumber.Uint64(), + addr: bob.Address(), + slots: []common.Hash{}, + }) + + // also include the contract account in the proofs to verify + val := big.NewInt(int64(i * 10)) + callRes := contract.SetValue(user, val) + + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + addr: contract.Address(), + slots: []common.Hash{common.HexToHash("0x0")}, + }) + } + + // deploy another contract in the reorged blocks + { + rContract, rDeployBlock := utils.DeploySimpleStorage(t, user) + t.Logf("Reorg SimpleStorage deployed at %s block=%d", rContract.Address().Hex(), rDeployBlock.BlockNumber.Uint64()) + + cases = append(cases, caseEntry{ + Block: rDeployBlock.BlockNumber.Uint64(), + addr: rContract.Address(), + slots: []common.Hash{common.HexToHash("0x0")}, + }) + } + + sys.L2CLSequencer.StopSequencer() + + var divergenceBlockNumber uint64 + var originalRef eth.L2BlockRef + // prepare and sequence a conflicting block for the L2A chain + { + divergenceBlockRef := sys.L2ELSequencerNode().BlockRefByNumber(divergenceHead.Number) + + l.Info("Expect to reorg the chain on block", "number", divergenceBlockRef.Number, "head", divergenceHead, "parent", divergenceBlockRef.ParentID().Hash) + divergenceBlockNumber = divergenceBlockRef.Number + originalRef = divergenceBlockRef + + parentOfDivergenceHead := divergenceBlockRef.ParentID() + + l.Info("Sequencing a conflicting block", "divergenceBlockRef", divergenceBlockRef, "parent", parentOfDivergenceHead) + + // sequence a conflicting block with a simple transfer tx, based on the parent of the parent of the unsafe head + { + err := ia.New(ctx, seqtypes.BuildOpts{ + Parent: parentOfDivergenceHead.Hash, + L1Origin: nil, + }) + require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") + + // include simple transfer tx in opened block + { + to := bob.PlanTransfer(alice.Address(), eth.OneGWei) + opt := txplan.Combine(to) + ptx := txplan.NewPlannedTx(opt) + signed_tx, err := ptx.Signed.Eval(ctx) + require.NoError(t, err, "Expected to be able to evaluate a planned transaction on op-test-sequencer, but got error") + txdata, err := signed_tx.MarshalBinary() + require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") + + err = ia.IncludeTx(ctx, txdata) + require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") + + cases = append(cases, caseEntry{ + Block: divergenceHead.Number, + addr: alice.Address(), + slots: []common.Hash{}, + }) + cases = append(cases, caseEntry{ + Block: divergenceHead.Number, + addr: bob.Address(), + slots: []common.Hash{}, + }) + } + + err = ia.Next(ctx) + require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") + } + } + + // start batcher on chain A + sys.L2Batcher.Start() + + // sequence a second block with op-test-sequencer (no L1 origin override) + { + l.Info("Sequencing with op-test-sequencer (no L1 origin override)") + err := ia.New(ctx, seqtypes.BuildOpts{ + Parent: sys.L2ELSequencerNode().BlockRefByLabel(eth.Unsafe).Hash, + L1Origin: nil, + }) + require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") + time.Sleep(2 * time.Second) + + err = ia.Next(ctx) + require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") + time.Sleep(2 * time.Second) + } + + // continue sequencing with consensus node (op-node) + sys.L2CLSequencer.StartSequencer() + + for i := 0; i < 3; i++ { + sys.L2Chain.WaitForBlock() + } + + latestBlock := sys.L2Chain.WaitForBlock() + sys.L2ELValidatorNode().WaitForBlockNumber(latestBlock.Number) + + // verify that the L2A validator has reorged and reached the latest block + err := wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { + blockRef, err := sys.L2ELValidatorNode().Escape().EthClient().BlockRefByNumber(ctx, latestBlock.Number) + if err != nil { + // this could happen if the validator is still syncing after reorg + l.Warn("Error fetching block reference from validator", "error", err) + return false, nil + } + return blockRef.Hash == latestBlock.Hash, nil + }) + require.NoError(t, err, "Expected block hash to match latest block hash on validator") + + reorgedRef_A, err := sys.L2ELSequencerNode().Escape().EthClient().BlockRefByNumber(ctx, divergenceBlockNumber) + require.NoError(t, err, "Expected to be able to call BlockRefByNumber API, but got error") + + l.Info("Reorged chain on divergence block number (prior the reorg)", "number", divergenceBlockNumber, "head", originalRef.Hash, "parent", originalRef.ParentID().Hash) + l.Info("Reorged chain on divergence block number (after the reorg)", "number", divergenceBlockNumber, "head", reorgedRef_A.Hash, "parent", reorgedRef_A.ParentID().Hash) + require.NotEqual(t, originalRef.Hash, reorgedRef_A.Hash, "Expected to get different heads on divergence block number, but got the same hash, so no reorg happened on chain A") + require.Equal(t, originalRef.ParentID().Hash, reorgedRef_A.ParentHash, "Expected to get same parent hashes on divergence block number, but got different hashes") + + // verify that the accounts involved in the conflicting blocks + for _, c := range cases { + utils.FetchAndVerifyProofs(t, sys, c.addr, c.slots, c.Block) + } +} diff --git a/crates/optimism/tests/proofs/utils/contract.go b/crates/optimism/tests/proofs/utils/contract.go new file mode 100644 index 00000000000..e4e5cffa491 --- /dev/null +++ b/crates/optimism/tests/proofs/utils/contract.go @@ -0,0 +1,26 @@ +package utils + +import ( + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" +) + +type Contract struct { + address common.Address + parsedABI abi.ABI +} + +func NewContract(address common.Address, parsedABI abi.ABI) *Contract { + return &Contract{ + address: address, + parsedABI: parsedABI, + } +} + +func (c *Contract) Address() common.Address { + return c.address +} + +func (c *Contract) ABI() abi.ABI { + return c.parsedABI +} diff --git a/crates/optimism/tests/proofs/utils/multistorage.go b/crates/optimism/tests/proofs/utils/multistorage.go new file mode 100644 index 00000000000..9c3ae260f16 --- /dev/null +++ b/crates/optimism/tests/proofs/utils/multistorage.go @@ -0,0 +1,45 @@ +package utils + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +const MultiStorageArtifact = "../contracts/artifacts/MultiStorage.sol/MultiStorage.json" + +type MultiStorage struct { + *Contract + t devtest.T +} + +func (c *MultiStorage) SetValues(user *dsl.EOA, a, b *big.Int) *types.Receipt { + ctx := c.t.Ctx() + callData, err := c.parsedABI.Pack("setValues", a, b) + if err != nil { + require.NoError(c.t, err, "failed to pack set call data") + } + + callTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(callData)) + callRes, err := callTx.Included.Eval(ctx) + if err != nil { + require.NoError(c.t, err, "failed to create set tx") + } + + if callRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "set transaction failed") + } + + return callRes +} + +func DeployMultiStorage(t devtest.T, user *dsl.EOA) (*MultiStorage, *types.Receipt) { + parsedABI, bin := LoadArtifact(t, MultiStorageArtifact) + contractAddress, receipt := DeployContract(t, user, bin) + contract := NewContract(contractAddress, parsedABI) + return &MultiStorage{contract, t}, receipt +} diff --git a/crates/optimism/tests/proofs/utils/preset.go b/crates/optimism/tests/proofs/utils/preset.go new file mode 100644 index 00000000000..7e284d111d5 --- /dev/null +++ b/crates/optimism/tests/proofs/utils/preset.go @@ -0,0 +1,327 @@ +package utils + +import ( + "os" + "strings" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type L2ELClient string + +const ( + L2ELClientGeth L2ELClient = "geth" + L2ELClientReth L2ELClient = "reth" +) + +type L2ELNodeID struct { + stack.L2ELNodeID + Client L2ELClient +} + +type L2ELNode struct { + *dsl.L2ELNode + Client L2ELClient +} + +type MixedOpProofPreset struct { + Log log.Logger + T devtest.T + ControlPlane stack.ControlPlane + + L1Network *dsl.L1Network + L1EL *dsl.L1ELNode + + L2Chain *dsl.L2Network + L2Batcher *dsl.L2Batcher + + L2ELSequencer *L2ELNode + L2CLSequencer *dsl.L2CLNode + + L2ELValidator *L2ELNode + L2CLValidator *dsl.L2CLNode + + Wallet *dsl.HDWallet + + FaucetL1 *dsl.Faucet + FaucetL2 *dsl.Faucet + FunderL1 *dsl.Funder + FunderL2 *dsl.Funder + + TestSequencer *dsl.TestSequencer +} + +func (m *MixedOpProofPreset) L2Network() *dsl.L2Network { + return m.L2Chain +} + +func (m *MixedOpProofPreset) L2ELSequencerNode() *dsl.L2ELNode { + return m.L2ELSequencer.L2ELNode +} + +func (m *MixedOpProofPreset) L2ELValidatorNode() *dsl.L2ELNode { + return m.L2ELValidator.L2ELNode +} + +// GethL2ELNode returns first L2 EL nodes that are running op-geth +func (m *MixedOpProofPreset) GethL2ELNode() *dsl.L2ELNode { + if m.L2ELSequencer.Client == L2ELClientGeth { + return m.L2ELSequencer.L2ELNode + } + + if m.L2ELValidator.Client == L2ELClientGeth { + return m.L2ELValidator.L2ELNode + } + + return nil +} + +// RethL2ELNode returns first L2 EL nodes that are running op-reth +func (m *MixedOpProofPreset) RethL2ELNode() *dsl.L2ELNode { + if m.L2ELSequencer.Client == L2ELClientReth { + return m.L2ELSequencer.L2ELNode + } + + if m.L2ELValidator.Client == L2ELClientReth { + return m.L2ELValidator.L2ELNode + } + return nil +} + +func WithMixedOpProofPreset() stack.CommonOption { + return stack.MakeCommon(DefaultMixedOpProofSystem(&DefaultMixedOpProofSystemIDs{})) +} + +func L2NodeMatcher[ + I interface { + comparable + Key() string + }, E stack.Identifiable[I]](value ...string) stack.Matcher[I, E] { + return match.MatchElemFn[I, E](func(elem E) bool { + for _, v := range value { + if !strings.Contains(elem.ID().Key(), v) { + return false + } + } + return true + }) +} + +func NewMixedOpProofPreset(t devtest.T) *MixedOpProofPreset { + system := shim.NewSystem(t) + orch := presets.Orchestrator() + orch.Hydrate(system) + + t.Gate().Equal(len(system.L2Networks()), 1, "expected exactly one L2 network") + t.Gate().Equal(len(system.L1Networks()), 1, "expected exactly one L1 network") + + l1Net := system.L1Network(match.FirstL1Network) + l2Net := system.L2Network(match.Assume(t, match.L2ChainA)) + + t.Gate().GreaterOrEqual(len(l2Net.L2CLNodes()), 2, "expected at least two L2CL nodes") + + sequencerCL := l2Net.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) + sequencerELInner := l2Net.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) + var sequencerEL *L2ELNode + if strings.Contains(sequencerELInner.ID().String(), "op-reth") { + sequencerEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), + Client: L2ELClientReth, + } + } else if strings.Contains(sequencerELInner.ID().String(), "op-geth") { + sequencerEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), + Client: L2ELClientGeth, + } + } else { + t.Error("unexpected L2EL client for sequencer") + t.FailNow() + } + + verifierCL := l2Net.L2CLNode(match.Assume(t, + match.And( + match.Not(match.WithSequencerActive(t.Ctx())), + match.Not(sequencerCL.ID()), + ))) + verifierELInner := l2Net.L2ELNode(match.Assume(t, + match.And( + match.EngineFor(verifierCL), + match.Not(sequencerEL.ID()), + ))) + var verifierEL *L2ELNode + if strings.Contains(verifierELInner.ID().String(), "op-reth") { + verifierEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), + Client: L2ELClientReth, + } + } else if strings.Contains(verifierELInner.ID().String(), "op-geth") { + verifierEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), + Client: L2ELClientGeth, + } + } else { + t.Error("unexpected L2EL client for verifier") + t.FailNow() + } + + out := &MixedOpProofPreset{ + Log: t.Logger(), + T: t, + ControlPlane: orch.ControlPlane(), + L1Network: dsl.NewL1Network(l1Net), + L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), + L2Chain: dsl.NewL2Network(l2Net, orch.ControlPlane()), + L2Batcher: dsl.NewL2Batcher(l2Net.L2Batcher(match.Assume(t, match.FirstL2Batcher))), + L2ELSequencer: sequencerEL, + L2CLSequencer: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), + L2ELValidator: verifierEL, + L2CLValidator: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), + Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation + FaucetL2: dsl.NewFaucet(l2Net.Faucet(match.Assume(t, match.FirstFaucet))), + + TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), + } + out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2ELSequencer) + return out +} + +type DefaultMixedOpProofSystemIDs struct { + L1 stack.L1NetworkID + L1EL stack.L1ELNodeID + L1CL stack.L1CLNodeID + + L2 stack.L2NetworkID + + L2CLSequencer stack.L2CLNodeID + L2ELSequencer L2ELNodeID + + L2CLValidator stack.L2CLNodeID + L2ELValidator L2ELNodeID + + L2Batcher stack.L2BatcherID + L2Proposer stack.L2ProposerID + L2Challenger stack.L2ChallengerID + + TestSequencer stack.TestSequencerID +} + +func NewDefaultMixedOpProofSystemIDs(l1ID, l2ID eth.ChainID) DefaultMixedOpProofSystemIDs { + ids := DefaultMixedOpProofSystemIDs{ + L1: stack.L1NetworkID(l1ID), + L1EL: stack.NewL1ELNodeID("l1", l1ID), + L1CL: stack.NewL1CLNodeID("l1", l1ID), + L2: stack.L2NetworkID(l2ID), + L2CLSequencer: stack.NewL2CLNodeID("sequencer", l2ID), + L2CLValidator: stack.NewL2CLNodeID("validator", l2ID), + L2Batcher: stack.NewL2BatcherID("main", l2ID), + L2Proposer: stack.NewL2ProposerID("main", l2ID), + L2Challenger: stack.NewL2ChallengerID("main", l2ID), + TestSequencer: "test-sequencer", + } + + // default to op-geth for sequencer and op-reth for validator + if os.Getenv("OP_DEVSTACK_PROOF_SEQUENCER_EL") == "op-reth" { + ids.L2ELSequencer = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-reth", l2ID), + Client: L2ELClientReth, + } + } else { + ids.L2ELSequencer = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-geth", l2ID), + Client: L2ELClientGeth, + } + } + + if os.Getenv("OP_DEVSTACK_PROOF_VALIDATOR_EL") == "op-geth" { + ids.L2ELValidator = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("validator-op-geth", l2ID), + Client: L2ELClientGeth, + } + } else { + ids.L2ELValidator = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("validator-op-reth", l2ID), + Client: L2ELClientReth, + } + } + + return ids +} + +func DefaultMixedOpProofSystem(dest *DefaultMixedOpProofSystemIDs) stack.Option[*sysgo.Orchestrator] { + ids := NewDefaultMixedOpProofSystemIDs(sysgo.DefaultL1ID, sysgo.DefaultL2AID) + return defaultMixedOpProofSystemOpts(&ids, dest) +} + +func defaultMixedOpProofSystemOpts(src, dest *DefaultMixedOpProofSystemIDs) stack.CombinedOption[*sysgo.Orchestrator] { + opt := stack.Combine[*sysgo.Orchestrator]() + opt.Add(stack.BeforeDeploy(func(o *sysgo.Orchestrator) { + o.P().Logger().Info("Setting up") + })) + + opt.Add(sysgo.WithMnemonicKeys(devkeys.TestMnemonic)) + + // Get artifacts path + artifactsPath := os.Getenv("OP_DEPLOYER_ARTIFACTS") + if artifactsPath == "" { + panic("OP_DEPLOYER_ARTIFACTS is not set") + } + + opt.Add(sysgo.WithDeployer(), + sysgo.WithDeployerPipelineOption( + sysgo.WithDeployerCacheDir(artifactsPath), + ), + sysgo.WithDeployerOptions( + func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + builder.WithL1ContractsLocator(artifacts.MustNewFileLocator(artifactsPath)) + builder.WithL2ContractsLocator(artifacts.MustNewFileLocator(artifactsPath)) + }, + sysgo.WithCommons(src.L1.ChainID()), + sysgo.WithPrefundedL2(src.L1.ChainID(), src.L2.ChainID()), + ), + ) + + opt.Add(sysgo.WithL1Nodes(src.L1EL, src.L1CL)) + + // Spawn L2 sequencer nodes + if src.L2ELSequencer.Client == L2ELClientReth { + opt.Add(sysgo.WithOpReth(src.L2ELSequencer.L2ELNodeID)) + } else { + opt.Add(sysgo.WithOpGeth(src.L2ELSequencer.L2ELNodeID)) + } + opt.Add(sysgo.WithL2CLNode(src.L2CLSequencer, src.L1CL, src.L1EL, src.L2ELSequencer.L2ELNodeID, sysgo.L2CLSequencer())) + + // Spawn L2 validator nodes + if src.L2ELValidator.Client == L2ELClientReth { + opt.Add(sysgo.WithOpReth(src.L2ELValidator.L2ELNodeID)) + } else { + opt.Add(sysgo.WithOpGeth(src.L2ELValidator.L2ELNodeID)) + } + opt.Add(sysgo.WithL2CLNode(src.L2CLValidator, src.L1CL, src.L1EL, src.L2ELValidator.L2ELNodeID)) + + opt.Add(sysgo.WithBatcher(src.L2Batcher, src.L1EL, src.L2CLSequencer, src.L2ELSequencer.L2ELNodeID)) + opt.Add(sysgo.WithProposer(src.L2Proposer, src.L1EL, &src.L2CLSequencer, nil)) + + opt.Add(sysgo.WithFaucets([]stack.L1ELNodeID{src.L1EL}, []stack.L2ELNodeID{src.L2ELSequencer.L2ELNodeID})) + + opt.Add(sysgo.WithTestSequencer(src.TestSequencer, src.L1CL, src.L2CLSequencer, src.L1EL, src.L2ELSequencer.L2ELNodeID)) + + opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { + *dest = *src + })) + + return opt +} diff --git a/crates/optimism/tests/proofs/utils/proof.go b/crates/optimism/tests/proofs/utils/proof.go new file mode 100644 index 00000000000..16a6287b9be --- /dev/null +++ b/crates/optimism/tests/proofs/utils/proof.go @@ -0,0 +1,137 @@ +package utils + +import ( + "bytes" + "fmt" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/require" +) + +// NormalizeProofResponse standardizes an AccountResult obtained from eth_getProof +// across different client implementations (e.g., Geth, Reth) so that they can be +// compared meaningfully in tests. +// +// Ethereum clients may encode empty or zeroed data structures differently while +// still representing the same logical state. For example: +// - An empty storage proof may appear as [] (Geth) or ["0x80"] (Reth). +// +// This function normalizes such differences by: +// - Converting single-element proofs containing "0x80" to an empty proof slice. +func NormalizeProofResponse(res *eth.AccountResult) { + for i := range res.StorageProof { + if len(res.StorageProof[i].Proof) == 1 && bytes.Equal(res.StorageProof[i].Proof[0], []byte{0x80}) { + res.StorageProof[i].Proof = []hexutil.Bytes{} + } + } +} + +// VerifyProof verifies an account and its storage proofs against a given state root. +// +// This function extends the standard behavior of go-ethereum’s AccountResult.Verify() +// by gracefully handling the case where the account’s storage trie root is empty +// (0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421). +func VerifyProof(res *eth.AccountResult, stateRoot common.Hash) error { + // Skip storage proof verification if the storage trie is empty. + if res.StorageHash != types.EmptyRootHash { + for i, entry := range res.StorageProof { + // load all MPT nodes into a DB + db := memorydb.New() + for j, encodedNode := range entry.Proof { + nodeKey := encodedNode + if len(encodedNode) >= 32 { // small MPT nodes are not hashed + nodeKey = crypto.Keccak256(encodedNode) + } + if err := db.Put(nodeKey, encodedNode); err != nil { + return fmt.Errorf("failed to load storage proof node %d of storage value %d into mem db: %w", j, i, err) + } + } + path := crypto.Keccak256(entry.Key) + val, err := trie.VerifyProof(res.StorageHash, path, db) + if err != nil { + return fmt.Errorf("failed to verify storage value %d with key %s (path %x) in storage trie %s: %w", i, entry.Key.String(), path, res.StorageHash, err) + } + if val == nil && entry.Value.ToInt().Cmp(common.Big0) == 0 { // empty storage is zero by default + continue + } + comparison, err := rlp.EncodeToBytes(entry.Value.ToInt().Bytes()) + if err != nil { + return fmt.Errorf("failed to encode storage value %d with key %s (path %x) in storage trie %s: %w", i, entry.Key.String(), path, res.StorageHash, err) + } + if !bytes.Equal(val, comparison) { + return fmt.Errorf("value %d in storage proof does not match proven value at key %s (path %x)", i, entry.Key.String(), path) + } + } + } + + accountClaimed := []any{uint64(res.Nonce), res.Balance.ToInt().Bytes(), res.StorageHash, res.CodeHash} + accountClaimedValue, err := rlp.EncodeToBytes(accountClaimed) + if err != nil { + return fmt.Errorf("failed to encode account from retrieved values: %w", err) + } + + // create a db with all account trie nodes + db := memorydb.New() + for i, encodedNode := range res.AccountProof { + nodeKey := encodedNode + if len(encodedNode) >= 32 { // small MPT nodes are not hashed + nodeKey = crypto.Keccak256(encodedNode) + } + if err := db.Put(nodeKey, encodedNode); err != nil { + return fmt.Errorf("failed to load account proof node %d into mem db: %w", i, err) + } + } + path := crypto.Keccak256(res.Address[:]) + accountProofValue, err := trie.VerifyProof(stateRoot, path, db) + if err != nil { + return fmt.Errorf("failed to verify account value with key %s (path %x) in account trie %s: %w", res.Address, path, stateRoot, err) + } + + if !bytes.Equal(accountClaimedValue, accountProofValue) { + return fmt.Errorf("L1 RPC is tricking us, account proof does not match provided deserialized values:\n"+ + " claimed: %x\n"+ + " proof: %x", accountClaimedValue, accountProofValue) + } + return nil +} + +// FetchAndVerifyProofs fetches account proofs from both L2EL and L2ELB for the given address +func FetchAndVerifyProofs(t devtest.T, sys *MixedOpProofPreset, address common.Address, slots []common.Hash, block uint64) { + ctx := t.Ctx() + gethProofRes, err := sys.GethL2ELNode().Escape().L2EthClient().GetProof(ctx, address, slots, hexutil.Uint64(block).String()) + if err != nil { + require.NoError(t, err, "failed to get proof from L2EL at block %d", block) + } + + rethProofRes, err := sys.RethL2ELNode().Escape().L2EthClient().GetProof(ctx, address, slots, hexutil.Uint64(block).String()) + if err != nil { + require.NoError(t, err, "failed to get proof from L2ELB at block %d", block) + } + NormalizeProofResponse(rethProofRes) + NormalizeProofResponse(gethProofRes) + + require.Equal(t, gethProofRes, rethProofRes, "geth and reth proofs should match") + + blockInfo, err := sys.GethL2ELNode().Escape().L2EthClient().InfoByNumber(ctx, block) + if err != nil { + require.NoError(t, err, "failed to get block info for block %d", block) + } + + err = VerifyProof(gethProofRes, blockInfo.Root()) + if err != nil { + require.NoError(t, err, "geth proof verification failed at block %d", block) + } + + err = VerifyProof(rethProofRes, blockInfo.Root()) + if err != nil { + require.NoError(t, err, "reth proof verification failed at block %d", block) + } +} diff --git a/crates/optimism/tests/proofs/utils/simplestorage.go b/crates/optimism/tests/proofs/utils/simplestorage.go new file mode 100644 index 00000000000..2e080ff9ad9 --- /dev/null +++ b/crates/optimism/tests/proofs/utils/simplestorage.go @@ -0,0 +1,54 @@ +package utils + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +const SimpleStorageArtifact = "../contracts/artifacts/SimpleStorage.sol/SimpleStorage.json" + +type SimpleStorage struct { + *Contract + t devtest.T +} + +func (c *SimpleStorage) SetValue(user *dsl.EOA, value *big.Int) *types.Receipt { + ctx := c.t.Ctx() + callData, err := c.parsedABI.Pack("setValue", value) + if err != nil { + require.NoError(c.t, err, "failed to pack set call data") + } + + callTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(callData)) + callRes, err := callTx.Included.Eval(ctx) + if err != nil { + require.NoError(c.t, err, "failed to create set tx") + } + + if callRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "set transaction failed") + } + return callRes +} + +func (c *SimpleStorage) PlanSetValue(user *dsl.EOA, value *big.Int) *txplan.PlannedTx { + callData, err := c.parsedABI.Pack("setValue", value) + if err != nil { + require.NoError(c.t, err, "failed to pack set call data") + } + + callTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(callData)) + return callTx +} + +func DeploySimpleStorage(t devtest.T, user *dsl.EOA) (*SimpleStorage, *types.Receipt) { + parsedABI, bin := LoadArtifact(t, SimpleStorageArtifact) + contractAddress, receipt := DeployContract(t, user, bin) + contract := NewContract(contractAddress, parsedABI) + return &SimpleStorage{contract, t}, receipt +} diff --git a/crates/optimism/tests/proofs/utils/tokenvault.go b/crates/optimism/tests/proofs/utils/tokenvault.go new file mode 100644 index 00000000000..1c1fbe2d12a --- /dev/null +++ b/crates/optimism/tests/proofs/utils/tokenvault.go @@ -0,0 +1,106 @@ +package utils + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +const TokenVaultArtifact = "../contracts/artifacts/TokenVault.sol/TokenVault.json" +const BalanceSlotIndex = 0 +const AllowanceSlotIndex = 1 +const DepositorSlotIndex = 2 + +type TokenVault struct { + *Contract + t devtest.T +} + +func (c *TokenVault) Deposit(user *dsl.EOA, amount eth.ETH) *types.Receipt { + depositCalldata, err := c.Contract.parsedABI.Pack("deposit") + if err != nil { + require.NoError(c.t, err, "failed to pack deposit calldata") + } + depTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(depositCalldata), txplan.WithValue(amount)) + depRes, err := depTx.Included.Eval(c.t.Ctx()) + if err != nil { + require.NoError(c.t, err, "deposit tx failed") + } + + if depRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "deposit transaction failed") + } + + return depRes +} + +func (c *TokenVault) Approve(user *dsl.EOA, spender common.Address, amount *big.Int) *types.Receipt { + approveCalldata, err := c.Contract.parsedABI.Pack("approve", spender, amount) + if err != nil { + require.NoError(c.t, err, "failed to pack approve calldata") + } + + approveTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(approveCalldata)) + approveRes, err := approveTx.Included.Eval(c.t.Ctx()) + if err != nil { + require.NoError(c.t, err, "approve tx failed") + } + + if approveRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "approve transaction failed") + } + return approveRes +} + +func (c *TokenVault) DeactivateAllowance(user *dsl.EOA, spender common.Address) *types.Receipt { + deactCalldata, err := c.Contract.parsedABI.Pack("deactivateAllowance", spender) + if err != nil { + require.NoError(c.t, err, "failed to pack deactivateAllowance calldata") + } + deactTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(deactCalldata)) + deactRes, err := deactTx.Included.Eval(c.t.Ctx()) + if err != nil { + require.NoError(c.t, err, "deactivateAllowance tx failed") + } + + if deactRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "deactivateAllowance transaction failed") + } + return deactRes +} + +func (c *TokenVault) GetBalanceSlot(user common.Address) common.Hash { + keyBytes := common.LeftPadBytes(user.Bytes(), 32) + slotBytes := common.LeftPadBytes(new(big.Int).SetUint64(BalanceSlotIndex).Bytes(), 32) + return crypto.Keccak256Hash(append(keyBytes, slotBytes...)) +} + +func (c *TokenVault) GetAllowanceSlot(owner, spender common.Address) common.Hash { + ownerBytes := common.LeftPadBytes(owner.Bytes(), 32) + slotBytes := common.LeftPadBytes(new(big.Int).SetUint64(AllowanceSlotIndex).Bytes(), 32) + inner := crypto.Keccak256(ownerBytes, slotBytes) + spenderBytes := common.LeftPadBytes(spender.Bytes(), 32) + return crypto.Keccak256Hash(append(spenderBytes, inner...)) +} + +func (c *TokenVault) GetDepositorSlot(index uint64) common.Hash { + slotBytes := common.LeftPadBytes(new(big.Int).SetUint64(DepositorSlotIndex).Bytes(), 32) + base := crypto.Keccak256(slotBytes) + baseInt := new(big.Int).SetBytes(base) + elem := new(big.Int).Add(baseInt, new(big.Int).SetUint64(index)) + return common.BigToHash(elem) +} + +func DeployTokenVault(t devtest.T, user *dsl.EOA) (*TokenVault, *types.Receipt) { + parsedABI, bin := LoadArtifact(t, TokenVaultArtifact) + contractAddress, receipt := DeployContract(t, user, bin) + contract := NewContract(contractAddress, parsedABI) + return &TokenVault{contract, t}, receipt +} diff --git a/crates/optimism/tests/proofs/utils/utils.go b/crates/optimism/tests/proofs/utils/utils.go new file mode 100644 index 00000000000..e47fdc47e15 --- /dev/null +++ b/crates/optimism/tests/proofs/utils/utils.go @@ -0,0 +1,66 @@ +package utils + +import ( + "encoding/json" + "os" + "strings" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +// minimal parts of artifact +type Artifact struct { + ABI json.RawMessage `json:"abi"` + Bytecode struct { + Object string `json:"object"` + } `json:"bytecode"` +} + +// LoadArtifact reads the forge artifact JSON at artifactPath and returns the parsed ABI +// and the creation bytecode (as bytes). It prefers bytecode.object (creation) and falls +// back to deployedBytecode.object if needed. +func LoadArtifact(t devtest.T, artifactPath string) (abi.ABI, []byte) { + data, err := os.ReadFile(artifactPath) + if err != nil { + require.NoError(t, err, "failed to read artifact file") + } + + var art Artifact + if err := json.Unmarshal(data, &art); err != nil { + require.NoError(t, err, "failed to unmarshal artifact JSON") + } + + parsedABI, err := abi.JSON(strings.NewReader(string(art.ABI))) + if err != nil { + require.NoError(t, err, "failed to parse contract ABI") + } + + binHex := strings.TrimSpace(art.Bytecode.Object) + if binHex == "" { + require.NoError(t, err, "artifact has no bytecode") + } + + return parsedABI, common.FromHex(binHex) +} + +// DeployContract deploys the contract creation bytecode from the given artifact. +// user must provide a Plan() method compatible with txplan.NewPlannedTx (kept generic). +func DeployContract(t devtest.T, user *dsl.EOA, bin []byte) (common.Address, *types.Receipt) { + tx := txplan.NewPlannedTx(user.Plan(), txplan.WithData(bin)) + res, err := tx.Included.Eval(t.Ctx()) + if err != nil { + require.NoError(t, err, "contract deployment tx failed") + } + + if res.Status != types.ReceiptStatusSuccessful { + require.NoError(t, err, "contract deployment transaction failed") + } + + return res.ContractAddress, res +} diff --git a/crates/optimism/tests/scripts/op-reth-entrypoint.sh b/crates/optimism/tests/scripts/op-reth-entrypoint.sh new file mode 100644 index 00000000000..cbc7af0eda5 --- /dev/null +++ b/crates/optimism/tests/scripts/op-reth-entrypoint.sh @@ -0,0 +1,74 @@ +#!/bin/sh +set -e + +# Variables to extract +DATADIR="" +PROOFS_PATH="" +CHAIN="" + +# Helper: require a value after flag +require_value() { + if [ -z "$2" ] || printf "%s" "$2" | grep -q "^--"; then + echo "ERROR: Missing value for $1" >&2 + exit 1 + fi +} + +# Parse arguments +i=1 +while [ "$i" -le "$#" ]; do + eval arg="\${$i}" + + case "$arg" in + --datadir=*) + DATADIR="${arg#*=}" + ;; + + --datadir) + eval next="\${$((i+1))}" + require_value "$arg" "$next" + DATADIR="$next" + i=$((i+1)) + ;; + + --proofs-history.storage-path=*) + PROOFS_PATH="${arg#*=}" + ;; + + --proofs-history.storage-path) + eval next="\${$((i+1))}" + require_value "$arg" "$next" + PROOFS_PATH="$next" + i=$((i+1)) + ;; + + --chain=*) + CHAIN="${arg#*=}" + ;; + + --chain) + eval next="\${$((i+1))}" + require_value "$arg" "$next" + CHAIN="$next" + i=$((i+1)) + ;; + + *) + # ignore unknown args—OR log them + ;; + esac + + i=$((i+1)) +done + +# Log extracted values +echo "extracted --datadir: ${DATADIR:-}" +echo "extracted --proofs-history.storage-path: ${PROOFS_PATH:-}" +echo "extracted --chain: ${CHAIN:-}" + +echo "Initializing op-reth" +op-reth init --datadir="$DATADIR" --chain="$CHAIN" +echo "Initializing op-reth proofs" +op-reth proofs init --datadir="$DATADIR" --chain="$CHAIN" --proofs-history.storage-path="$PROOFS_PATH" +echo "Starting op-reth with args: $*" +op-reth "$@" diff --git a/crates/optimism/trie/Cargo.toml b/crates/optimism/trie/Cargo.toml new file mode 100644 index 00000000000..d5ee4d639a2 --- /dev/null +++ b/crates/optimism/trie/Cargo.toml @@ -0,0 +1,87 @@ +[package] +name = "reth-optimism-trie" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Trie node storage for serving proofs in FP window fast" + +[lints] +workspace = true + +[dependencies] +# reth +reth-db = { workspace = true, features = ["mdbx"] } +reth-evm.workspace = true +reth-execution-errors.workspace = true +reth-primitives-traits.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-trie = { workspace = true, features = ["serde"] } +reth-tasks.workspace = true + +# `metrics` feature +metrics = { workspace = true, optional = true } +reth-metrics = { workspace = true, features = ["common"], optional = true } + +# ethereum +alloy-primitives.workspace = true +alloy-eips.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } + +# codec +bytes.workspace = true +serde.workspace = true +bincode.workspace = true + +# misc +thiserror.workspace = true +auto_impl.workspace = true +eyre.workspace = true +strum.workspace = true +tracing.workspace = true +derive_more.workspace = true + +[dev-dependencies] +reth-codecs = { workspace = true, features = ["test-utils"] } +tempfile.workspace = true +tokio = { workspace = true, features = ["test-util", "rt-multi-thread", "macros"] } +test-case.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } +# workaround for failing doc test +reth-db-api = { workspace = true, features = ["test-utils"] } +reth-trie = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-node-api.workspace = true +alloy-consensus.workspace = true +alloy-genesis.workspace = true +reth-chainspec.workspace = true +reth-db-common.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-testing-utils.workspace = true +reth-storage-errors.workspace = true +secp256k1.workspace = true +mockall.workspace = true + +# misc +serial_test.workspace = true + +[features] +serde-bincode-compat = [ + "reth-primitives-traits/serde-bincode-compat", + "reth-trie/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "alloy-genesis/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", +] +metrics = [ + "reth-trie/metrics", + "dep:reth-metrics", + "dep:metrics", +] diff --git a/crates/optimism/trie/src/api.rs b/crates/optimism/trie/src/api.rs new file mode 100644 index 00000000000..66c27a4efb2 --- /dev/null +++ b/crates/optimism/trie/src/api.rs @@ -0,0 +1,196 @@ +//! Storage API for external storage of intermediary trie nodes. + +use crate::OpProofsStorageResult; +use alloy_eips::eip1898::BlockWithParent; +use alloy_primitives::{map::HashMap, B256, U256}; +use auto_impl::auto_impl; +use derive_more::{AddAssign, Constructor}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, + updates::TrieUpdatesSorted, + BranchNodeCompact, HashedPostStateSorted, Nibbles, +}; +use std::{fmt::Debug, time::Duration}; + +/// Diff of trie updates and post state for a block. +#[derive(Debug, Clone, Default)] +pub struct BlockStateDiff { + /// Trie updates for branch nodes + pub sorted_trie_updates: TrieUpdatesSorted, + /// Post state for leaf nodes (accounts and storage) + pub sorted_post_state: HashedPostStateSorted, +} + +impl BlockStateDiff { + /// Extend the [` BlockStateDiff`] from other latest [`BlockStateDiff`] + pub fn extend_ref(&mut self, other: &Self) { + self.sorted_trie_updates.extend_ref(&other.sorted_trie_updates); + self.sorted_post_state.extend_ref(&other.sorted_post_state); + } +} + +/// Counts of trie updates written to storage. +#[derive(Debug, Clone, Default, AddAssign, Constructor, Eq, PartialEq)] +pub struct WriteCounts { + /// Number of account trie updates written + pub account_trie_updates_written_total: u64, + /// Number of storage trie updates written + pub storage_trie_updates_written_total: u64, + /// Number of hashed accounts written + pub hashed_accounts_written_total: u64, + /// Number of hashed storages written + pub hashed_storages_written_total: u64, +} + +/// Duration metrics for block processing. +#[derive(Debug, Default, Clone)] +pub struct OperationDurations { + /// Total time to process a block (end-to-end) in seconds + pub total_duration_seconds: Duration, + /// Time spent executing the block (EVM) in seconds + pub execution_duration_seconds: Duration, + /// Time spent calculating state root in seconds + pub state_root_duration_seconds: Duration, + /// Time spent writing trie updates to storage in seconds + pub write_duration_seconds: Duration, +} + +/// Trait for reading trie nodes from the database. +/// +/// Only leaf nodes and some branch nodes are stored. The bottom layer of branch nodes +/// are not stored to reduce write amplification. This matches Reth's non-historical trie storage. +#[auto_impl(Arc)] +pub trait OpProofsStore: Send + Sync + Debug { + /// Cursor for iterating over trie branches. + type StorageTrieCursor<'tx>: TrieStorageCursor + 'tx + where + Self: 'tx; + + /// Cursor for iterating over account trie branches. + type AccountTrieCursor<'tx>: TrieCursor + 'tx + where + Self: 'tx; + + /// Cursor for iterating over storage leaves. + type StorageCursor<'tx>: HashedStorageCursor + Send + Sync + 'tx + where + Self: 'tx; + + /// Cursor for iterating over account leaves. + type AccountHashedCursor<'tx>: HashedCursor + Send + Sync + 'tx + where + Self: 'tx; + + /// Store a batch of account trie branches. Used for saving existing state. For live state + /// capture, use [store_trie_updates](OpProofsStore::store_trie_updates). + fn store_account_branches( + &self, + account_nodes: Vec<(Nibbles, Option)>, + ) -> impl Future> + Send; + + /// Store a batch of storage trie branches. Used for saving existing state. + fn store_storage_branches( + &self, + hashed_address: B256, + storage_nodes: Vec<(Nibbles, Option)>, + ) -> impl Future> + Send; + + /// Store a batch of account trie leaf nodes. Used for saving existing state. + fn store_hashed_accounts( + &self, + accounts: Vec<(B256, Option)>, + ) -> impl Future> + Send; + + /// Store a batch of storage trie leaf nodes. Used for saving existing state. + fn store_hashed_storages( + &self, + hashed_address: B256, + storages: Vec<(B256, U256)>, + ) -> impl Future> + Send; + + /// Get the earliest block number and hash that has been stored + /// + /// This is used to determine the block number of trie nodes with block number 0. + /// All earliest block numbers are stored in 0 to reduce updates required to prune trie nodes. + fn get_earliest_block_number( + &self, + ) -> impl Future>> + Send; + + /// Get the latest block number and hash that has been stored + fn get_latest_block_number( + &self, + ) -> impl Future>> + Send; + + /// Get a trie cursor for the storage backend + fn storage_trie_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get a trie cursor for the account backend + fn account_trie_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get a storage cursor for the storage backend + fn storage_hashed_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get an account hashed cursor for the storage backend + fn account_hashed_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Store a batch of trie updates. + /// + /// If wiped is true, the entire storage trie is wiped, but this is unsupported going forward, + /// so should only happen for legacy reasons. + fn store_trie_updates( + &self, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> impl Future> + Send; + + /// Fetch all updates for a given block number. + fn fetch_trie_updates( + &self, + block_number: u64, + ) -> impl Future> + Send; + + /// Applies [`BlockStateDiff`] to the earliest state (updating/deleting nodes) and updates the + /// earliest block number. + fn prune_earliest_state( + &self, + new_earliest_block_ref: BlockWithParent, + diff: BlockStateDiff, + ) -> impl Future> + Send; + + /// Remove account, storage and trie updates from historical storage for all blocks till + /// the specified block (inclusive). + fn unwind_history( + &self, + to: BlockWithParent, + ) -> impl Future> + Send; + + /// Deletes all updates > `latest_common_block_number` and replaces them with the new updates. + fn replace_updates( + &self, + latest_common_block_number: u64, + blocks_to_add: HashMap, + ) -> impl Future> + Send; + + /// Set the earliest block number and hash that has been stored + fn set_earliest_block_number( + &self, + block_number: u64, + hash: B256, + ) -> impl Future> + Send; +} diff --git a/crates/optimism/trie/src/backfill.rs b/crates/optimism/trie/src/backfill.rs new file mode 100644 index 00000000000..65a9afd3c01 --- /dev/null +++ b/crates/optimism/trie/src/backfill.rs @@ -0,0 +1,708 @@ +//! Backfill job for proofs storage. Handles storing the existing state into the proofs storage. + +use crate::{OpProofsStorageError, OpProofsStore}; +use alloy_primitives::B256; +use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRO}, + tables, + transaction::DbTx, + DatabaseError, +}; +use reth_primitives_traits::{Account, StorageEntry}; +use reth_trie::{BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles}; +use std::{collections::HashMap, time::Instant}; +use tracing::info; + +/// Batch size threshold for storing entries during backfill +const BACKFILL_STORAGE_THRESHOLD: usize = 100000; + +/// Threshold for logging progress during backfill +const BACKFILL_LOG_THRESHOLD: usize = 100000; + +/// Backfill job for external storage. +#[derive(Debug)] +pub struct BackfillJob<'a, Tx: DbTx, S: OpProofsStore + Send> { + storage: S, + tx: &'a Tx, +} + +/// Macro to generate simple cursor iterators for tables +macro_rules! define_simple_cursor_iter { + ($iter_name:ident, $table:ty, $key_type:ty, $value_type:ty) => { + struct $iter_name(C); + + impl $iter_name { + const fn new(cursor: C) -> Self { + Self(cursor) + } + } + + impl> Iterator for $iter_name { + type Item = Result<($key_type, $value_type), DatabaseError>; + + fn next(&mut self) -> Option { + self.0.next().transpose() + } + } + }; +} + +/// Macro to generate duplicate cursor iterators for tables with custom logic +macro_rules! define_dup_cursor_iter { + ($iter_name:ident, $table:ty, $key_type:ty, $value_type:ty) => { + struct $iter_name(C); + + impl $iter_name { + const fn new(cursor: C) -> Self { + Self(cursor) + } + } + + impl + DbCursorRO<$table>> Iterator for $iter_name { + type Item = Result<($key_type, $value_type), DatabaseError>; + + fn next(&mut self) -> Option { + // First try to get the next duplicate value + if let Some(res) = self.0.next_dup().transpose() { + return Some(res); + } + + // If no more duplicates, find the next key with values + let Some(Ok((next_key, _))) = self.0.next_no_dup().transpose() else { + // If no more entries, return None + return None; + }; + + // If found, seek to the first duplicate for this key + return self.0.seek(next_key).transpose(); + } + } + }; +} + +// Generate iterators for all 4 table types +define_simple_cursor_iter!(HashedAccountsIter, tables::HashedAccounts, B256, Account); +define_dup_cursor_iter!(HashedStoragesIter, tables::HashedStorages, B256, StorageEntry); +define_simple_cursor_iter!( + AccountsTrieIter, + tables::AccountsTrie, + StoredNibbles, + BranchNodeCompact +); +define_dup_cursor_iter!(StoragesTrieIter, tables::StoragesTrie, B256, StorageTrieEntry); + +/// Trait to estimate the progress of a backfill job based on the key. +trait CompletionEstimatable { + // Returns a progress estimate as a percentage (0.0 to 1.0) + fn estimate_progress(&self) -> f64; +} + +impl CompletionEstimatable for B256 { + fn estimate_progress(&self) -> f64 { + // use the first 3 bytes as a progress estimate + let progress = self.0[..3].to_vec(); + let mut val: u64 = 0; + for nibble in &progress { + val = (val << 8) | *nibble as u64; + } + val as f64 / (256u64.pow(3)) as f64 + } +} + +impl CompletionEstimatable for StoredNibbles { + fn estimate_progress(&self) -> f64 { + // use the first 6 nibbles as a progress estimate + let progress_nibbles = + if self.0.is_empty() { Nibbles::new() } else { self.0.slice(0..(self.0.len().min(6))) }; + let mut val: u64 = 0; + for nibble in progress_nibbles.iter() { + val = (val << 4) | nibble as u64; + } + val as f64 / (16u64.pow(progress_nibbles.len() as u32)) as f64 + } +} + +/// Backfill a table from a source iterator to a storage function. Handles batching and logging. +async fn backfill< + S: Iterator>, + F: Future> + Send, + Key: CompletionEstimatable + Clone + 'static, + Value: Clone + 'static, +>( + name: &str, + source: S, + storage_threshold: usize, + log_threshold: usize, + save_fn: impl Fn(Vec<(Key, Value)>) -> F, +) -> Result { + let mut entries = Vec::new(); + + let mut total_entries: u64 = 0; + + info!("Starting {} backfill", name); + let start_time = Instant::now(); + + let mut source = source.peekable(); + let initial_progress = source + .peek() + .map(|entry| entry.clone().map(|entry| entry.0.estimate_progress())) + .transpose()?; + + for entry in source { + let Some(initial_progress) = initial_progress else { + // If there are any items, there must be an initial progress + unreachable!(); + }; + let entry = entry?; + + entries.push(entry.clone()); + total_entries += 1; + + if total_entries.is_multiple_of(log_threshold as u64) { + let progress = entry.0.estimate_progress(); + let elapsed = start_time.elapsed(); + let elapsed_secs = elapsed.as_secs_f64(); + + let progress_per_second = if elapsed_secs.is_normal() { + (progress - initial_progress) / elapsed_secs + } else { + 0.0 + }; + let estimated_total_time = if progress_per_second.is_normal() { + (1.0 - progress) / progress_per_second + } else { + 0.0 + }; + let progress_pct = progress * 100.0; + info!( + "Processed {} {}, progress: {progress_pct:.2}%, ETA: {}s", + name, total_entries, estimated_total_time, + ); + } + + if entries.len() >= storage_threshold { + info!("Storing {} entries, total entries: {}", name, total_entries); + save_fn(entries).await?; + entries = Vec::new(); + } + } + + if !entries.is_empty() { + info!("Storing final {} entries", name); + save_fn(entries).await?; + } + + info!("{} backfill complete: {} entries", name, total_entries); + Ok(total_entries) +} + +impl<'a, Tx: DbTx + Sync, S: OpProofsStore + Send> BackfillJob<'a, Tx, S> { + /// Create a new backfill job. + pub const fn new(storage: S, tx: &'a Tx) -> Self { + Self { storage, tx } + } + + /// Save mapping of hashed addresses to accounts to storage. + async fn save_hashed_accounts( + &self, + entries: Vec<(B256, Account)>, + ) -> Result<(), OpProofsStorageError> { + self.storage + .store_hashed_accounts( + entries.into_iter().map(|(address, account)| (address, Some(account))).collect(), + ) + .await?; + + Ok(()) + } + + /// Save mapping of account trie paths to branch nodes to storage. + async fn save_account_branches( + &self, + entries: Vec<(StoredNibbles, BranchNodeCompact)>, + ) -> Result<(), OpProofsStorageError> { + self.storage + .store_account_branches( + entries.into_iter().map(|(path, branch)| (path.0, Some(branch))).collect(), + ) + .await?; + + Ok(()) + } + + /// Save mapping of hashed addresses to storage entries to storage. + async fn save_hashed_storages( + &self, + entries: Vec<(B256, StorageEntry)>, + ) -> Result<(), OpProofsStorageError> { + // Group entries by hashed address + let mut by_address: HashMap> = HashMap::default(); + for (address, entry) in entries { + by_address.entry(address).or_default().push((entry.key, entry.value)); + } + + // Store each address's storage entries + for (address, storages) in by_address { + self.storage.store_hashed_storages(address, storages).await?; + } + + Ok(()) + } + + /// Save mapping of hashed addresses to storage trie entries to storage. + async fn save_storage_branches( + &self, + entries: Vec<(B256, StorageTrieEntry)>, + ) -> Result<(), OpProofsStorageError> { + // Group entries by hashed address + let mut by_address: HashMap)>> = + HashMap::default(); + for (hashed_address, storage_entry) in entries { + by_address + .entry(hashed_address) + .or_default() + .push((storage_entry.nibbles.0, Some(storage_entry.node))); + } + + // Store each address's storage trie branches + for (address, branches) in by_address { + self.storage.store_storage_branches(address, branches).await?; + } + + Ok(()) + } + + /// Backfill hashed accounts data + async fn backfill_hashed_accounts(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_read::()?; + + let source = HashedAccountsIter::new(start_cursor); + backfill( + "hashed accounts", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + |entries| self.save_hashed_accounts(entries), + ) + .await?; + + Ok(()) + } + + /// Backfill hashed storage data + async fn backfill_hashed_storages(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_dup_read::()?; + + let source = HashedStoragesIter::new(start_cursor); + backfill( + "hashed storage", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + |entries| self.save_hashed_storages(entries), + ) + .await?; + + Ok(()) + } + + /// Backfill accounts trie data + async fn backfill_accounts_trie(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_read::()?; + + let source = AccountsTrieIter::new(start_cursor); + backfill( + "accounts trie", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + |entries| self.save_account_branches(entries), + ) + .await?; + + Ok(()) + } + + /// Backfill storage trie data + async fn backfill_storages_trie(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_dup_read::()?; + + let source = StoragesTrieIter::new(start_cursor); + backfill( + "storage trie", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + |entries| self.save_storage_branches(entries), + ) + .await?; + + Ok(()) + } + + /// Run complete backfill of all preimage data + async fn backfill_trie(&self) -> Result<(), OpProofsStorageError> { + self.backfill_hashed_accounts().await?; + self.backfill_hashed_storages().await?; + self.backfill_storages_trie().await?; + self.backfill_accounts_trie().await?; + Ok(()) + } + + /// Run the backfill job. + pub async fn run(&self, best_number: u64, best_hash: B256) -> Result<(), OpProofsStorageError> { + if self.storage.get_earliest_block_number().await?.is_none() { + self.backfill_trie().await?; + + self.storage.set_earliest_block_number(best_number, best_hash).await?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::InMemoryProofsStorage; + use alloy_primitives::{keccak256, Address, U256}; + use reth_db::{ + cursor::DbCursorRW, test_utils::create_test_rw_db, transaction::DbTxMut, Database, + }; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::HashedCursor, trie_cursor::TrieCursor, BranchNodeCompact, StorageTrieEntry, + StoredNibbles, StoredNibblesSubKey, + }; + use std::sync::Arc; + + /// Helper function to create a test branch node + fn create_test_branch_node() -> BranchNodeCompact { + let mut state_mask = reth_trie::TrieMask::default(); + state_mask.set_bit(0); + state_mask.set_bit(1); + + BranchNodeCompact { + state_mask, + tree_mask: reth_trie::TrieMask::default(), + hash_mask: reth_trie::TrieMask::default(), + hashes: Arc::new(vec![]), + root_hash: None, + } + } + + #[tokio::test] + async fn test_backfill_hashed_accounts() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test accounts into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_write::().unwrap(); + + let mut accounts = vec![ + ( + keccak256(Address::repeat_byte(0x01)), + Account { nonce: 1, balance: U256::from(100), bytecode_hash: None }, + ), + ( + keccak256(Address::repeat_byte(0x02)), + Account { nonce: 2, balance: U256::from(200), bytecode_hash: None }, + ), + ( + keccak256(Address::repeat_byte(0x03)), + Account { nonce: 3, balance: U256::from(300), bytecode_hash: None }, + ), + ]; + + // Sort accounts by address for cursor.append (which requires sorted order) + accounts.sort_by_key(|(addr, _)| *addr); + + for (addr, account) in &accounts { + cursor.append(*addr, account).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_hashed_accounts().await.unwrap(); + + // Verify data was stored (will be in sorted order) + let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); + let mut count = 0; + while let Some((key, account)) = account_cursor.next().unwrap() { + // Find matching account in our test data + let expected = accounts.iter().find(|(addr, _)| *addr == key).unwrap(); + assert_eq!((key, account), *expected); + count += 1; + } + assert_eq!(count, 3); + } + + #[tokio::test] + async fn test_backfill_hashed_storage() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test storage into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_dup_write::().unwrap(); + + let addr1 = keccak256(Address::repeat_byte(0x01)); + let addr2 = keccak256(Address::repeat_byte(0x02)); + + let storage_entries = vec![ + ( + addr1, + StorageEntry { key: keccak256(B256::repeat_byte(0x10)), value: U256::from(100) }, + ), + ( + addr1, + StorageEntry { key: keccak256(B256::repeat_byte(0x20)), value: U256::from(200) }, + ), + ( + addr2, + StorageEntry { key: keccak256(B256::repeat_byte(0x30)), value: U256::from(300) }, + ), + ]; + + for (addr, entry) in &storage_entries { + cursor.upsert(*addr, entry).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_hashed_storages().await.unwrap(); + + // Verify data was stored for addr1 + let mut storage_cursor = storage.storage_hashed_cursor(addr1, 100).unwrap(); + let mut found = vec![]; + while let Some((key, value)) = storage_cursor.next().unwrap() { + found.push((key, value)); + } + assert_eq!(found.len(), 2); + assert_eq!(found[0], (storage_entries[0].1.key, storage_entries[0].1.value)); + assert_eq!(found[1], (storage_entries[1].1.key, storage_entries[1].1.value)); + + // Verify data was stored for addr2 + let mut storage_cursor = storage.storage_hashed_cursor(addr2, 100).unwrap(); + let mut found = vec![]; + while let Some((key, value)) = storage_cursor.next().unwrap() { + found.push((key, value)); + } + assert_eq!(found.len(), 1); + assert_eq!(found[0], (storage_entries[2].1.key, storage_entries[2].1.value)); + } + + #[tokio::test] + async fn test_backfill_accounts_trie() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test trie nodes into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_write::().unwrap(); + + let branch = create_test_branch_node(); + let nodes = vec![ + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![1])), branch.clone()), + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![2])), branch.clone()), + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![3])), branch.clone()), + ]; + + for (path, node) in &nodes { + cursor.append(path.clone(), node).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_accounts_trie().await.unwrap(); + + // Verify data was stored + let mut trie_cursor = storage.account_trie_cursor(100).unwrap(); + let mut count = 0; + while let Some((path, _node)) = trie_cursor.next().unwrap() { + assert_eq!(path, nodes[count].0 .0); + count += 1; + } + assert_eq!(count, 3); + } + + #[tokio::test] + async fn test_backfill_storages_trie() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test storage trie nodes into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_dup_write::().unwrap(); + + let branch = create_test_branch_node(); + let addr1 = keccak256(Address::repeat_byte(0x01)); + let addr2 = keccak256(Address::repeat_byte(0x02)); + + let nodes = vec![ + ( + addr1, + StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![1])), + node: branch.clone(), + }, + ), + ( + addr1, + StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![2])), + node: branch.clone(), + }, + ), + ( + addr2, + StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![3])), + node: branch.clone(), + }, + ), + ]; + + for (addr, entry) in &nodes { + cursor.upsert(*addr, entry).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_storages_trie().await.unwrap(); + + // Verify data was stored for addr1 + let mut trie_cursor = storage.storage_trie_cursor(addr1, 100).unwrap(); + let mut found = vec![]; + while let Some((path, _node)) = trie_cursor.next().unwrap() { + found.push(path); + } + assert_eq!(found.len(), 2); + assert_eq!(found[0], nodes[0].1.nibbles.0); + assert_eq!(found[1], nodes[1].1.nibbles.0); + + // Verify data was stored for addr2 + let mut trie_cursor = storage.storage_trie_cursor(addr2, 100).unwrap(); + let mut found = vec![]; + while let Some((path, _node)) = trie_cursor.next().unwrap() { + found.push(path); + } + assert_eq!(found.len(), 1); + assert_eq!(found[0], nodes[2].1.nibbles.0); + } + + #[tokio::test] + async fn test_full_backfill_run() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert some test data + let tx = db.tx_mut().unwrap(); + + // Add accounts + let mut cursor = tx.cursor_write::().unwrap(); + let addr = keccak256(Address::repeat_byte(0x01)); + cursor + .append(addr, &Account { nonce: 1, balance: U256::from(100), bytecode_hash: None }) + .unwrap(); + drop(cursor); + + // Add storage + let mut cursor = tx.cursor_dup_write::().unwrap(); + cursor + .upsert( + addr, + &StorageEntry { key: keccak256(B256::repeat_byte(0x10)), value: U256::from(100) }, + ) + .unwrap(); + drop(cursor); + + // Add account trie + let mut cursor = tx.cursor_write::().unwrap(); + cursor + .append( + StoredNibbles(Nibbles::from_nibbles_unchecked(vec![1])), + &create_test_branch_node(), + ) + .unwrap(); + drop(cursor); + + // Add storage trie + let mut cursor = tx.cursor_dup_write::().unwrap(); + cursor + .upsert( + addr, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![1])), + node: create_test_branch_node(), + }, + ) + .unwrap(); + drop(cursor); + + tx.commit().unwrap(); + + // Run full backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + let best_number = 100; + let best_hash = B256::repeat_byte(0x42); + + // Should be None initially + assert_eq!(storage.get_earliest_block_number().await.unwrap(), None); + + job.run(best_number, best_hash).await.unwrap(); + + // Should be set after backfill + assert_eq!( + storage.get_earliest_block_number().await.unwrap(), + Some((best_number, best_hash)) + ); + + // Verify data was backfilled + let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); + assert!(account_cursor.next().unwrap().is_some()); + + let mut storage_cursor = storage.storage_hashed_cursor(addr, 100).unwrap(); + assert!(storage_cursor.next().unwrap().is_some()); + + let mut trie_cursor = storage.account_trie_cursor(100).unwrap(); + assert!(trie_cursor.next().unwrap().is_some()); + + let mut storage_trie_cursor = storage.storage_trie_cursor(addr, 100).unwrap(); + assert!(storage_trie_cursor.next().unwrap().is_some()); + } + + #[tokio::test] + async fn test_backfill_run_skips_if_already_done() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Set earliest block to simulate already backfilled + storage.set_earliest_block_number(50, B256::repeat_byte(0x01)).await.unwrap(); + + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + + // Run backfill - should skip + job.run(100, B256::repeat_byte(0x42)).await.unwrap(); + + // Should still have old earliest block + assert_eq!( + storage.get_earliest_block_number().await.unwrap(), + Some((50, B256::repeat_byte(0x01))) + ); + } +} diff --git a/crates/optimism/trie/src/cursor.rs b/crates/optimism/trie/src/cursor.rs new file mode 100644 index 00000000000..615c554f557 --- /dev/null +++ b/crates/optimism/trie/src/cursor.rs @@ -0,0 +1,129 @@ +//! Implementation of [`HashedCursor`] and [`TrieCursor`] for +//! [`OpProofsStorage`](crate::OpProofsStorage). + +use alloy_primitives::{B256, U256}; +use derive_more::Constructor; +use reth_db::DatabaseError; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, + BranchNodeCompact, Nibbles, +}; + +/// Manages reading storage or account trie nodes from [`TrieCursor`]. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsTrieCursor(pub C); + +impl TrieCursor for OpProofsTrieCursor +where + C: TrieCursor, +{ + #[inline] + fn seek_exact( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + self.0.seek_exact(key) + } + + #[inline] + fn seek( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn current(&mut self) -> Result, DatabaseError> { + self.0.current() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +impl TrieStorageCursor for OpProofsTrieCursor +where + C: TrieStorageCursor, +{ + #[inline] + fn set_hashed_address(&mut self, hashed_address: B256) { + self.0.set_hashed_address(hashed_address) + } +} + +/// Manages reading hashed account nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedAccountCursor(pub C); + +impl HashedCursor for OpProofsHashedAccountCursor +where + C: HashedCursor + Send + Sync, +{ + type Value = Account; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +/// Manages reading hashed storage nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedStorageCursor(pub C); + +impl HashedCursor for OpProofsHashedStorageCursor +where + C: HashedCursor + Send + Sync, +{ + type Value = U256; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +impl HashedStorageCursor for OpProofsHashedStorageCursor +where + C: HashedStorageCursor + Send + Sync, +{ + #[inline] + fn is_storage_empty(&mut self) -> Result { + self.0.is_storage_empty() + } + + #[inline] + fn set_hashed_address(&mut self, hashed_address: B256) { + self.0.set_hashed_address(hashed_address) + } +} diff --git a/crates/optimism/trie/src/cursor_factory.rs b/crates/optimism/trie/src/cursor_factory.rs new file mode 100644 index 00000000000..ff47c9137ca --- /dev/null +++ b/crates/optimism/trie/src/cursor_factory.rs @@ -0,0 +1,100 @@ +//! Implements [`TrieCursorFactory`] and [`HashedCursorFactory`] for [`OpProofsStore`] types. + +use crate::{ + OpProofsHashedAccountCursor, OpProofsHashedStorageCursor, OpProofsStorage, OpProofsStore, + OpProofsTrieCursor, +}; +use alloy_primitives::B256; +use reth_db::DatabaseError; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use std::marker::PhantomData; + +/// Factory for creating trie cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsTrieCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsTrieCursorFactory<'tx, S> { + /// Initializes new `OpProofsTrieCursorFactory` + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> TrieCursorFactory for OpProofsTrieCursorFactory<'tx, S> +where + for<'a> S: OpProofsStore + 'tx, +{ + type AccountTrieCursor<'a> + = OpProofsTrieCursor> + where + Self: 'a; + type StorageTrieCursor<'a> + = OpProofsTrieCursor> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { + Ok(OpProofsTrieCursor::new( + self.storage + .account_trie_cursor(self.block_number) + .map_err(Into::::into)?, + )) + } + + fn storage_trie_cursor( + &self, + hashed_address: B256, + ) -> Result, DatabaseError> { + Ok(OpProofsTrieCursor::new( + self.storage + .storage_trie_cursor(hashed_address, self.block_number) + .map_err(Into::::into)?, + )) + } +} + +/// Factory for creating hashed account cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsHashedAccountCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsHashedAccountCursorFactory<'tx, S> { + /// Creates a new `OpProofsHashedAccountCursorFactory` instance. + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> HashedCursorFactory for OpProofsHashedAccountCursorFactory<'tx, S> +where + S: OpProofsStore + 'tx, +{ + type AccountCursor<'a> + = OpProofsHashedAccountCursor> + where + Self: 'a; + type StorageCursor<'a> + = OpProofsHashedStorageCursor> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { + Ok(OpProofsHashedAccountCursor::new(self.storage.account_hashed_cursor(self.block_number)?)) + } + + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result, DatabaseError> { + Ok(OpProofsHashedStorageCursor::new( + self.storage.storage_hashed_cursor(hashed_address, self.block_number)?, + )) + } +} diff --git a/crates/optimism/trie/src/db/cursor.rs b/crates/optimism/trie/src/db/cursor.rs new file mode 100644 index 00000000000..e5c0329d22a --- /dev/null +++ b/crates/optimism/trie/src/db/cursor.rs @@ -0,0 +1,1351 @@ +use std::marker::PhantomData; + +use crate::{ + db::{ + AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, + MaybeDeleted, StorageTrieHistory, StorageTrieKey, VersionedValue, + }, + OpProofsStorageResult, +}; +use alloy_primitives::{B256, U256}; +use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRO}, + table::{DupSort, Table}, + transaction::DbTx, + Database, DatabaseEnv, DatabaseError, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, + BranchNodeCompact, Nibbles, StoredNibbles, +}; + +/// Generic alias for dup cursor for T +pub(crate) type Dup<'tx, T> = <::TX as DbTx>::DupCursor; + +/// Iterates versioned dup-sorted rows and returns the latest value (<= `max_block_number`), +/// skipping tombstones. +#[derive(Debug, Clone)] +pub struct BlockNumberVersionedCursor { + _table: PhantomData, + cursor: Cursor, + max_block_number: u64, +} + +impl BlockNumberVersionedCursor +where + T: Table> + DupSort, + Cursor: DbCursorRO + DbDupCursorRO, +{ + /// Initializes new [`BlockNumberVersionedCursor`]. + pub const fn new(cursor: Cursor, max_block_number: u64) -> Self { + Self { _table: PhantomData, cursor, max_block_number } + } + + /// Resolve the latest version for `key` with `block_number` <= `max_block_number`. + /// Strategy: + /// - `seek_by_key_subkey(key, max)` gives first dup >= max. + /// - if exactly == max → it's our latest + /// - if > max → `prev_dup()` is latest < max (or None) + /// - if no dup >= max: + /// - if key exists → `last_dup()` is latest < max + /// - else → None + fn latest_version_for_key( + &mut self, + key: T::Key, + ) -> OpProofsStorageResult> { + // First dup with subkey >= max_block_number + let seek_res = self.cursor.seek_by_key_subkey(key.clone(), self.max_block_number)?; + + if let Some(vv) = seek_res { + if vv.block_number > self.max_block_number { + // step back to the last dup < max + return Ok(self.cursor.prev_dup()?); + } + // already at the dup = max + return Ok(Some((key, vv))) + } + + // No dup >= max ⇒ either key absent or all dups < max. Check if key exists: + if self.cursor.seek_exact(key.clone())?.is_none() { + return Ok(None); + } + + // Key exists ⇒ take last dup (< max). + if let Some(vv) = self.cursor.last_dup()? { + return Ok(Some((key, vv))); + } + Ok(None) + } + + /// Returns a non-deleted latest version for exactly `key`, if any. + fn seek_exact(&mut self, key: T::Key) -> OpProofsStorageResult> { + if let Some((latest_key, latest_value)) = self.latest_version_for_key(key)? && + let MaybeDeleted(Some(v)) = latest_value.value + { + return Ok(Some((latest_key, v))); + } + Ok(None) + } + + /// Walk forward from `first_key` (inclusive) until we find a *live* latest-≤-max value. + /// `first_key` must already be a *real key* in the table. + fn next_live_from( + &mut self, + mut first_key: T::Key, + ) -> OpProofsStorageResult> { + loop { + // Compute latest version ≤ max for this key + if let Some((k, v)) = self.seek_exact(first_key.clone())? { + return Ok(Some((k, v))); + } + + // Move to next distinct key, or EOF + let Some((next_key, _)) = self.cursor.next_no_dup()? else { + return Ok(None); + }; + + first_key = next_key; + } + } + + /// Seek to the first non-deleted latest version at or after `start_key`. + /// Logic: + /// - Try exact key first (above). If alive, return it. + /// - Otherwise hop to next distinct key and repeat until we find a live version or hit EOF. + fn seek(&mut self, start_key: T::Key) -> OpProofsStorageResult> { + // Position MDBX at first key >= start_key + if let Some((first_key, _)) = self.cursor.seek(start_key)? { + return self.next_live_from(first_key); + } + Ok(None) + } + + /// Advance to the next distinct key from the current MDBX position + /// and return its non-deleted latest version, if any. + /// Next distinct key; if not positioned, start from `T::Key::default()`. + fn next(&mut self) -> OpProofsStorageResult> + where + T::Key: Default, + { + // If not positioned, start from the beginning (default key). + if self.cursor.current()?.is_none() { + let Some((first_key, _)) = self.cursor.seek(T::Key::default())? else { + return Ok(None); + }; + return self.next_live_from(first_key); + } + + // Otherwise advance to next distinct key and resume the walk. + let Some((next_key, _)) = self.cursor.next_no_dup()? else { + return Ok(None); + }; + self.next_live_from(next_key) + } +} + +/// MDBX implementation of [`TrieCursor`]. +#[derive(Debug)] +pub struct MdbxTrieCursor { + inner: BlockNumberVersionedCursor, + hashed_address: Option, +} + +impl< + V, + T: Table> + DupSort, + Cursor: DbCursorRO + DbDupCursorRO, + > MdbxTrieCursor +{ + /// Initializes new [`MdbxTrieCursor`]. + pub const fn new(cursor: Cursor, max_block_number: u64, hashed_address: Option) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, max_block_number), hashed_address } + } +} + +impl TrieCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn seek_exact( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + Ok(self + .inner + .seek_exact(StoredNibbles(path)) + .map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn seek( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + Ok(self + .inner + .seek(StoredNibbles(path)) + .map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.inner.next().map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn current(&mut self) -> Result, DatabaseError> { + self.inner.cursor.current().map(|opt| opt.map(|(StoredNibbles(n), _)| n)) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl TrieCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn seek_exact( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + let key = StorageTrieKey::new(address, StoredNibbles(path)); + return Ok(self.inner.seek_exact(key).map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?) + } + Ok(None) + } + + fn seek( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + let key = StorageTrieKey::new(address, StoredNibbles(path)); + return Ok(self.inner.seek(key).map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?) + } + Ok(None) + } + + fn next(&mut self) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + return Ok(self.inner.next().map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?) + } + Ok(None) + } + + fn current(&mut self) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + return self.inner.cursor.current().map(|opt| { + opt.and_then(|(k, _)| (k.hashed_address == address).then_some(k.path.0)) + }); + } + Ok(None) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl TrieStorageCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = Some(hashed_address); + } +} + +/// MDBX implementation of [`HashedCursor`] for storage state. +#[derive(Debug)] +pub struct MdbxStorageCursor { + inner: BlockNumberVersionedCursor, + hashed_address: B256, +} + +impl MdbxStorageCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + /// Initializes new [`MdbxStorageCursor`] + pub const fn new(cursor: Cursor, block_number: u64, hashed_address: B256) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, block_number), hashed_address } + } +} + +impl HashedCursor for MdbxStorageCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + type Value = U256; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + let storage_key = HashedStorageKey::new(self.hashed_address, key); + + // hashed storage values can be zero, which means the storage slot is deleted, so we should + // skip those + let result = self.inner.seek(storage_key).map(|opt| { + opt.and_then(|(k, v)| { + // Only return entries that belong to the bound address + (k.hashed_address == self.hashed_address).then_some((k.hashed_storage_key, v.0)) + }) + })?; + + if let Some((_, v)) = result && + v.is_zero() + { + return self.next(); + } + + Ok(result) + } + + fn next(&mut self) -> Result, DatabaseError> { + loop { + let result = self.inner.next().map(|opt| { + opt.and_then(|(k, v)| { + // Only return entries that belong to the bound address + (k.hashed_address == self.hashed_address).then_some((k.hashed_storage_key, v.0)) + }) + })?; + + // hashed storage values can be zero, which means the storage slot is deleted, so we + // should skip those + if let Some((_, v)) = result && + v.is_zero() + { + continue; + } + + return Ok(result); + } + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl HashedStorageCursor for MdbxStorageCursor> { + fn is_storage_empty(&mut self) -> Result { + Ok(self.seek(B256::ZERO)?.is_none()) + } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = hashed_address + } +} + +/// MDBX implementation of [`HashedCursor`] for account state. +#[derive(Debug)] +pub struct MdbxAccountCursor { + inner: BlockNumberVersionedCursor, +} + +impl MdbxAccountCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + /// Initializes new `MdbxAccountCursor` + pub const fn new(cursor: Cursor, block_number: u64) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, block_number) } + } +} + +impl HashedCursor for MdbxAccountCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + type Value = Account; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + Ok(self.inner.seek(key)?) + } + + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.inner.next()?) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::{models, StorageValue}; + use reth_db::{ + mdbx::{init_db_for, DatabaseArguments}, + DatabaseEnv, + }; + use reth_db_api::{ + cursor::DbDupCursorRW, + transaction::{DbTx, DbTxMut}, + Database, + }; + use reth_trie::{BranchNodeCompact, Nibbles, StoredNibbles}; + use tempfile::TempDir; + + fn setup_db() -> DatabaseEnv { + let tmp = TempDir::new().expect("create tmpdir"); + init_db_for::<_, models::Tables>(tmp, DatabaseArguments::default()).expect("init db") + } + + fn stored(path: Nibbles) -> StoredNibbles { + StoredNibbles(path) + } + + fn node() -> BranchNodeCompact { + BranchNodeCompact::default() + } + + fn append_account_trie( + wtx: &::TXMut, + key: StoredNibbles, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write cursor"); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_storage_trie( + wtx: &::TXMut, + address: B256, + path: Nibbles, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write cursor"); + let key = StorageTrieKey::new(address, StoredNibbles(path)); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_hashed_storage( + wtx: &::TXMut, + addr: B256, + slot: B256, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write"); + let key = HashedStorageKey::new(addr, slot); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val.map(StorageValue)) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_hashed_account( + wtx: &::TXMut, + key: B256, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write"); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + // Open a dup-RO cursor and wrap it in a BlockNumberVersionedCursor with a given bound. + fn version_cursor( + tx: &::TX, + max_block: u64, + ) -> BlockNumberVersionedCursor> { + let cur = tx.cursor_dup_read::().expect("dup ro cursor"); + BlockNumberVersionedCursor::new(cur, max_block) + } + + fn account_trie_cursor( + tx: &'_ ::TX, + max_block: u64, + ) -> MdbxTrieCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + // For account trie the address is not used; pass None. + MdbxTrieCursor::new(c, max_block, None) + } + + // Helper: build a Storage trie cursor bound to an address + fn storage_trie_cursor( + tx: &'_ ::TX, + max_block: u64, + address: B256, + ) -> MdbxTrieCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxTrieCursor::new(c, max_block, Some(address)) + } + + fn storage_cursor( + tx: &'_ ::TX, + max_block: u64, + address: B256, + ) -> MdbxStorageCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxStorageCursor::new(c, max_block, address) + } + + fn account_cursor( + tx: &'_ ::TX, + max_block: u64, + ) -> MdbxAccountCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxAccountCursor::new(c, max_block) + } + + // Assert helper: ensure the chosen VersionedValue has the expected block and deletion flag. + fn assert_block( + got: Option<(StoredNibbles, VersionedValue)>, + expected_block: u64, + expect_deleted: bool, + ) { + let (_, vv) = got.expect("expected Some(..)"); + assert_eq!(vv.block_number, expected_block, "wrong block chosen"); + let is_deleted = matches!(vv.value, MaybeDeleted(None)); + assert_eq!(is_deleted, expect_deleted, "tombstone mismatch"); + } + + /// No entry for key → None. + #[test] + fn latest_version_for_key_none_when_key_absent() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cursor = version_cursor(&tx, 100); + + let out = cursor + .latest_version_for_key(stored(Nibbles::default())) + .expect("should not return error"); + assert!(out.is_none(), "absent key must return None"); + } + + /// Exact match at max (live) → pick it. + #[test] + fn latest_version_for_key_picks_value_at_max_if_present() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 50, Some(node())); // == max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 50, false); + } + + /// When `seek_by_key_subkey` points to the subkey > max - fallback to the prev. + #[test] + fn latest_version_for_key_picks_latest_below_max_when_next_is_above() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 30, Some(node())); // expected + append_account_trie(&wtx, k.clone(), 70, Some(node())); // > max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 30, false); + } + + /// No ≥ max but key exists → use last < max. + #[test] + fn latest_version_for_key_picks_last_below_max_when_none_at_or_above() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 40, Some(node())); // expected (max=100) + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 40, false); + } + + /// All entries are > max → None. + #[test] + fn latest_version_for_key_none_when_everything_is_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 60, Some(node())); + append_account_trie(&wtx, k1.clone(), 70, Some(node())); + append_account_trie(&wtx, k2, 40, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k1).expect("ok"); + assert!(out.is_none(), "no dup ≤ max ⇒ None"); + } + + /// Single dup < max → pick it. + #[test] + fn latest_version_for_key_picks_single_below_max() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 25, Some(node())); // < max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 25, false); + } + + /// Single dup == max → pick it. + #[test] + fn latest_version_for_key_picks_single_at_max() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 50, Some(node())); // == max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 50, false); + } + + /// Latest ≤ max is a tombstone → return it (this API doesn't filter). + #[test] + fn latest_version_for_key_returns_tombstone_if_latest_is_deleted() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 90, None); // latest ≤ max, but deleted + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 90, true); + } + + /// Should skip tombstones and return None when the latest ≤ max is deleted. + #[test] + fn seek_exact_skips_tombstone_returns_none() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 90, None); // latest ≤ max is tombstoned + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.seek_exact(k).expect("ok"); + assert!(out.is_none(), "seek_exact must filter out deleted latest value"); + } + + /// Empty table → None. + #[test] + fn seek_empty_returns_none() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.seek(stored(Nibbles::from_nibbles([0x0A]))).expect("ok"); + assert!(out.is_none()); + } + + /// Start at an existing key whose latest ≤ max is live → returns that key. + #[test] + fn seek_at_live_key_returns_it() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 20, Some(node())); // latest ≤ max + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k.clone()).expect("ok").expect("some"); + assert_eq!(out.0, k); + } + + /// Start at an existing key whose latest ≤ max is tombstoned → skip to next key with live + /// value. + #[test] + fn seek_skips_tombstoned_key_to_next_live_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + // Key 0x10 latest ≤ max is deleted + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + append_account_trie(&wtx, k1.clone(), 20, None); // tombstone at latest ≤ max + // Next key has live + append_account_trie(&wtx, k2.clone(), 5, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start between keys → returns the next key’s live latest ≤ max. + #[test] + fn seek_between_keys_returns_next_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0C])); + let k3 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2.clone(), 10, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Start at 0x15 (between 0x10 and 0x20) + + let out = cur.seek(k3).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start after the last key → None. + #[test] + fn seek_after_last_returns_none() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + let k3 = stored(Nibbles::from_nibbles([0x0C])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2, 10, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.seek(k3).expect("ok"); + assert!(out.is_none()); + } + + /// If the first key at-or-after has only versions > max, it is effectively not visible → skip + /// to next. + #[test] + fn seek_skips_keys_with_only_versions_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 60, Some(node())); + append_account_trie(&wtx, k2.clone(), 40, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start at a key with mixed versions; latest ≤ max is tombstone → skip to next key with live. + #[test] + fn seek_mixed_versions_tombstone_latest_skips_to_next_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + append_account_trie(&wtx, k1.clone(), 30, None); + append_account_trie(&wtx, k2.clone(), 5, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 30); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// When not positioned should start from default key and return the first live key. + #[test] + fn next_unpositioned_starts_from_default_returns_first_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); // first live + append_account_trie(&wtx, k2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + // Unpositioned cursor + let mut cur = version_cursor(&tx, 100); + + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k1); + } + + /// After positioning on a live key via `seek()`, `next()` should advance to the next live key. + #[test] + fn next_advances_from_current_live_to_next_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); // live + append_account_trie(&wtx, k2.clone(), 10, Some(node())); // next live + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Position at k1 + let _ = cur.seek(k1).expect("ok").expect("some"); + // Next should yield k2 + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// If the next key's latest ≤ max is tombstone, `next()` should skip to the next live key. + #[test] + fn next_skips_tombstoned_key_to_next_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); // will be tombstoned at latest ≤ max + let k3 = stored(Nibbles::from_nibbles([0x0C])); // next live + + { + let wtx = db.tx_mut().expect("rw tx"); + // k1 live + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + // k2: latest ≤ max is tombstone + append_account_trie(&wtx, k2.clone(), 10, Some(node())); + append_account_trie(&wtx, k2, 20, None); + // k3 live + append_account_trie(&wtx, k3.clone(), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + // Position at k1 + let _ = cur.seek(k1).expect("ok").expect("some"); + // next should skip k2 (tombstoned latest) and return k3 + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k3); + } + + /// If positioned on the last live key, `next()` should return None (EOF). + #[test] + fn next_returns_none_at_eof() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); // last key + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2.clone(), 10, Some(node())); // last live + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Position at the last key k2 + let _ = cur.seek(k2).expect("ok").expect("some"); + // `next()` should hit EOF + let out = cur.next().expect("ok"); + assert!(out.is_none()); + } + + /// If the first key has only versions > max, `next()` should skip it and return the next live + /// key. + #[test] + fn next_skips_keys_with_only_versions_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); // only > max + let k2 = stored(Nibbles::from_nibbles([0x0B])); // ≤ max live + + { + let wtx = db.tx_mut().expect("rw tx"); + // k1 only above max (max=50) + append_account_trie(&wtx, k1, 60, Some(node())); + // k2 within max + append_account_trie(&wtx, k2.clone(), 40, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + // Unpositioned; `next()` will start from default and walk + let mut cur = version_cursor(&tx, 50); + + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Empty table: `next()` should return None. + #[test] + fn next_on_empty_returns_none() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.next().expect("ok"); + assert!(out.is_none()); + } + + // ----------------- Account trie cursor thin-wrapper checks ----------------- + + #[test] + fn account_seek_exact_live_maps_key_and_value() { + let db = setup_db(); + let k = Nibbles::from_nibbles([0x0A]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + + // Build wrapper + let mut cur = account_trie_cursor(&tx, 100); + + // Wrapper should return (Nibbles, BranchNodeCompact) + let out = TrieCursor::seek_exact(&mut cur, k).expect("ok").expect("some"); + assert_eq!(out.0, k); + } + + #[test] + fn account_seek_exact_filters_tombstone() { + let db = setup_db(); + let k = Nibbles::from_nibbles([0x0B]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k), 5, Some(node())); + append_account_trie(&wtx, StoredNibbles(k), 9, None); // latest ≤ max tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = account_trie_cursor(&tx, 10); + + let out = TrieCursor::seek_exact(&mut cur, k).expect("ok"); + assert!(out.is_none(), "account seek_exact must filter tombstone"); + } + + #[test] + fn account_seek_and_next_and_current_roundtrip() { + let db = setup_db(); + let k1 = Nibbles::from_nibbles([0x01]); + let k2 = Nibbles::from_nibbles([0x02]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k1), 10, Some(node())); + append_account_trie(&wtx, StoredNibbles(k2), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = account_trie_cursor(&tx, 100); + + // seek at k1 + let out1 = TrieCursor::seek(&mut cur, k1).expect("ok").expect("some"); + assert_eq!(out1.0, k1); + + // current should be k1 + let cur_k = TrieCursor::current(&mut cur).expect("ok").expect("some"); + assert_eq!(cur_k, k1); + + // next should move to k2 + let out2 = TrieCursor::next(&mut cur).expect("ok").expect("some"); + assert_eq!(out2.0, k2); + } + + // ----------------- Storage trie cursor thin-wrapper checks ----------------- + + #[test] + fn storage_seek_exact_respects_address_filter() { + let db = setup_db(); + + let addr_a = B256::from([0xAA; 32]); + let addr_b = B256::from([0xBB; 32]); + + let path = Nibbles::from_nibbles([0x0D]); + + { + let wtx = db.tx_mut().expect("rw tx"); + // insert only under B + append_storage_trie(&wtx, addr_b, path, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + + // Cursor bound to A must not see B’s data + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + let out_a = TrieCursor::seek_exact(&mut cur_a, path).expect("ok"); + assert!(out_a.is_none(), "no data for addr A"); + + // Cursor bound to B should see it + let mut cur_b = storage_trie_cursor(&tx, 100, addr_b); + let out_b = TrieCursor::seek_exact(&mut cur_b, path).expect("ok").expect("some"); + assert_eq!(out_b.0, path); + } + + #[test] + fn storage_seek_returns_first_key_for_bound_address() { + let db = setup_db(); + + let addr_a = B256::from([0x11; 32]); + let addr_b = B256::from([0x22; 32]); + + let p1 = Nibbles::from_nibbles([0x01]); + let p2 = Nibbles::from_nibbles([0x02]); + let p3 = Nibbles::from_nibbles([0x03]); + + { + let wtx = db.tx_mut().expect("rw tx"); + // For A: only p2 + append_storage_trie(&wtx, addr_a, p2, 10, Some(node())); + // For B: p1 + append_storage_trie(&wtx, addr_b, p1, 10, Some(node())); + wtx.commit().expect("commit"); + } + + // test seek behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // seek at p1: for A there is no p1; the next key >= p1 under A is p2 + let out = TrieCursor::seek(&mut cur_a, p1).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek at p2: exact match + let out = TrieCursor::seek(&mut cur_a, p2).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek at p3: no p3 under A; no next key ≥ p3 under A → None + let out = TrieCursor::seek(&mut cur_a, p3).expect("ok"); + assert!(out.is_none(), "no key ≥ p3 under A"); + } + + // test next behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + let out = TrieCursor::next(&mut cur_a).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // next should yield None as there is no further key under A + let out = TrieCursor::next(&mut cur_a).expect("ok"); + assert!(out.is_none(), "no more keys under A"); + + // current should return None + let out = TrieCursor::current(&mut cur_a).expect("ok"); + assert!(out.is_none(), "no current key after EOF"); + } + + // test seek_exact behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // seek_exact at p1: no exact match + let out = TrieCursor::seek_exact(&mut cur_a, p1).expect("ok"); + assert!(out.is_none(), "no exact p1 under A"); + + // seek_exact at p2: exact match + let out = TrieCursor::seek_exact(&mut cur_a, p2).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek_exact at p3: no exact match + let out = TrieCursor::seek_exact(&mut cur_a, p3).expect("ok"); + assert!(out.is_none(), "no exact p3 under A"); + } + } + + #[test] + fn storage_next_stops_at_address_boundary() { + let db = setup_db(); + + let addr_a = B256::from([0x33; 32]); + let addr_b = B256::from([0x44; 32]); + + let p1 = Nibbles::from_nibbles([0x05]); // under A + let p2 = Nibbles::from_nibbles([0x06]); // under B (next key overall) + + { + let wtx = db.tx_mut().expect("rw tx"); + append_storage_trie(&wtx, addr_a, p1, 10, Some(node())); + append_storage_trie(&wtx, addr_b, p2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // position at p1 (A) + let _ = TrieCursor::seek_exact(&mut cur_a, p1).expect("ok").expect("some"); + + // next should reach boundary; impl filters different address and returns None + let out = TrieCursor::next(&mut cur_a).expect("ok"); + assert!(out.is_none(), "next() should stop when next key is a different address"); + } + + #[test] + fn storage_current_maps_key() { + let db = setup_db(); + + let addr = B256::from([0x55; 32]); + let p = Nibbles::from_nibbles([0x09]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_storage_trie(&wtx, addr, p, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = storage_trie_cursor(&tx, 100, addr); + + let _ = TrieCursor::seek_exact(&mut cur, p).expect("ok").expect("some"); + + let now = TrieCursor::current(&mut cur).expect("ok").expect("some"); + assert_eq!(now, p); + } + + #[test] + fn hashed_storage_seek_maps_slot_and_value() { + let db = setup_db(); + let addr = B256::from([0xAA; 32]); + let slot = B256::from([0x10; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, slot, 10, Some(U256::from(7))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr); + + let (got_slot, got_val) = cur.seek(slot).expect("ok").expect("some"); + assert_eq!(got_slot, slot); + assert_eq!(got_val, U256::from(7)); + } + + #[test] + fn hashed_storage_seek_filters_tombstone() { + let db = setup_db(); + let addr = B256::from([0xAB; 32]); + let slot = B256::from([0x11; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, slot, 5, Some(U256::from(1))); + append_hashed_storage(&wtx, addr, slot, 9, None); // latest ≤ max is tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 10, addr); + + let out = cur.seek(slot).expect("ok"); + assert!(out.is_none(), "wrapper must filter tombstoned latest"); + } + + #[test] + fn hashed_storage_seek_and_next_roundtrip() { + let db = setup_db(); + let addr = B256::from([0xAC; 32]); + let s1 = B256::from([0x01; 32]); + let s2 = B256::from([0x02; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, s1, 10, Some(U256::from(11))); + append_hashed_storage(&wtx, addr, s2, 10, Some(U256::from(22))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr); + + let (k1, v1) = cur.seek(s1).expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.next().expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + } + + #[test] + fn hashed_storage_address_boundary() { + let db = setup_db(); + let addr1 = B256::from([0xAC; 32]); + let addr2 = B256::from([0xAD; 32]); + let s1 = B256::from([0x01; 32]); + let s2 = B256::from([0x02; 32]); + let s3 = B256::from([0x03; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr1, s1, 10, Some(U256::from(11))); + append_hashed_storage(&wtx, addr1, s2, 10, Some(U256::from(22))); + wtx.commit().expect("commit"); + } + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr2, s1, 10, Some(U256::from(33))); + append_hashed_storage(&wtx, addr2, s2, 10, Some(U256::from(44))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr1); + + let (k1, v1) = cur.next().expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.next().expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + + let out = cur.next().expect("ok"); + assert!(out.is_none(), "should stop at address boundary"); + + let (k1, v1) = cur.seek(s1).expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.seek(s2).expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + + let out = cur.seek(s3).expect("ok"); + assert!(out.is_none(), "should not see keys from other address"); + } + + #[test] + fn hashed_account_seek_maps_key_and_value() { + let db = setup_db(); + let key = B256::from([0x20; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, key, 10, Some(Account::default())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 100); + + let (got_key, _acc) = cur.seek(key).expect("ok").expect("some"); + assert_eq!(got_key, key); + } + + #[test] + fn hashed_account_seek_filters_tombstone() { + let db = setup_db(); + let key = B256::from([0x21; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, key, 5, Some(Account::default())); + append_hashed_account(&wtx, key, 9, None); // latest ≤ max is tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 10); + + let out = cur.seek(key).expect("ok"); + assert!(out.is_none(), "wrapper must filter tombstoned latest"); + } + + #[test] + fn hashed_account_seek_and_next_roundtrip() { + let db = setup_db(); + let k1 = B256::from([0x01; 32]); + let k2 = B256::from([0x02; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, k1, 10, Some(Account::default())); + append_hashed_account(&wtx, k2, 10, Some(Account::default())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 100); + + let (got1, _) = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(got1, k1); + + let (got2, _) = cur.next().expect("ok").expect("some"); + assert_eq!(got2, k2); + } +} diff --git a/crates/optimism/trie/src/db/mod.rs b/crates/optimism/trie/src/db/mod.rs new file mode 100644 index 00000000000..042b682074f --- /dev/null +++ b/crates/optimism/trie/src/db/mod.rs @@ -0,0 +1,17 @@ +//! MDBX implementation of [`OpProofsStore`](crate::OpProofsStore). +//! +//! This module provides a complete MDBX implementation of the +//! [`OpProofsStore`](crate::OpProofsStore) trait. It uses the [`reth_db`] +//! crate for database interactions and defines the necessary tables and models for storing trie +//! branches, accounts, and storage leaves. + +mod models; +pub use models::*; + +mod store; +pub use store::MdbxProofsStorage; + +mod cursor; +pub use cursor::{ + BlockNumberVersionedCursor, MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, +}; diff --git a/crates/optimism/trie/src/db/models/block.rs b/crates/optimism/trie/src/db/models/block.rs new file mode 100644 index 00000000000..6c5bd9c5cfa --- /dev/null +++ b/crates/optimism/trie/src/db/models/block.rs @@ -0,0 +1,76 @@ +use alloy_eips::BlockNumHash; +use alloy_primitives::B256; +use bytes::BufMut; +use derive_more::{From, Into}; +use reth_db::{ + table::{Compress, Decompress}, + DatabaseError, +}; +use serde::{Deserialize, Serialize}; + +/// Wrapper for block number and block hash tuple to implement [`Compress`]/[`Decompress`]. +/// +/// Used for storing block metadata (number + hash). +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, From, Into)] +pub struct BlockNumberHash(BlockNumHash); + +impl Compress for BlockNumberHash { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + // Encode block number (8 bytes, big-endian) + hash (32 bytes) = 40 bytes total + buf.put_u64(self.0.number); + buf.put_slice(self.0.hash.as_slice()); + } +} + +impl Decompress for BlockNumberHash { + fn decompress(value: &[u8]) -> Result { + if value.len() != 40 { + return Err(DatabaseError::Decode); + } + + let number = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); + let hash = B256::from_slice(&value[8..40]); + + Ok(Self(BlockNumHash { number, hash })) + } +} + +impl BlockNumberHash { + /// Create new instance. + pub const fn new(number: u64, hash: B256) -> Self { + Self(BlockNumHash { number, hash }) + } + + /// Get the block number. + pub const fn number(&self) -> u64 { + self.0.number + } + + /// Get the block hash. + pub const fn hash(&self) -> &B256 { + &self.0.hash + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + + #[test] + fn test_block_number_hash_roundtrip() { + let test_cases = vec![ + BlockNumberHash::new(0, B256::ZERO), + BlockNumberHash::new(42, B256::repeat_byte(0xaa)), + BlockNumberHash::new(u64::MAX, B256::repeat_byte(0xff)), + ]; + + for original in test_cases { + let compressed = original.compress(); + let decompressed = BlockNumberHash::decompress(&compressed).unwrap(); + assert_eq!(original, decompressed); + } + } +} diff --git a/crates/optimism/trie/src/db/models/change_set.rs b/crates/optimism/trie/src/db/models/change_set.rs new file mode 100644 index 00000000000..f2328590045 --- /dev/null +++ b/crates/optimism/trie/src/db/models/change_set.rs @@ -0,0 +1,125 @@ +use crate::db::{HashedStorageKey, StorageTrieKey}; +use alloy_primitives::B256; +use reth_db::{ + table::{self, Decode, Encode}, + DatabaseError, +}; +use reth_trie::StoredNibbles; +use serde::{Deserialize, Serialize}; + +/// The keys of the entries in the history tables. +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct ChangeSet { + /// Keys changed in [`AccountTrieHistory`](super::AccountTrieHistory) table. + pub account_trie_keys: Vec, + /// Keys changed in [`StorageTrieHistory`](super::StorageTrieHistory) table. + pub storage_trie_keys: Vec, + /// Keys changed in [`HashedAccountHistory`](super::HashedAccountHistory) table. + pub hashed_account_keys: Vec, + /// Keys changed in [`HashedStorageHistory`](super::HashedStorageHistory) table. + pub hashed_storage_keys: Vec, +} + +impl table::Encode for ChangeSet { + type Encoded = Vec; + + fn encode(self) -> Self::Encoded { + bincode::serialize(&self).expect("ChangeSet serialization should not fail") + } +} + +impl table::Decode for ChangeSet { + fn decode(value: &[u8]) -> Result { + bincode::deserialize(value).map_err(|_| DatabaseError::Decode) + } +} + +impl table::Compress for ChangeSet { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let encoded = self.clone().encode(); + buf.put_slice(&encoded); + } +} + +impl table::Decompress for ChangeSet { + fn decompress(value: &[u8]) -> Result { + Self::decode(value) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_db::table::{Compress, Decompress}; + + #[test] + fn test_encode_decode_empty_change_set() { + let change_set = ChangeSet { + account_trie_keys: vec![], + storage_trie_keys: vec![], + hashed_account_keys: vec![], + hashed_storage_keys: vec![], + }; + + let encoded = change_set.clone().encode(); + let decoded = ChangeSet::decode(&encoded).expect("Failed to decode"); + assert_eq!(change_set, decoded); + } + + #[test] + fn test_encode_decode_populated_change_set() { + let account_key = StoredNibbles::from(vec![1, 2, 3, 4]); + let storage_key = StorageTrieKey { + hashed_address: B256::repeat_byte(0x11), + path: StoredNibbles::from(vec![5, 6, 7, 8]), + }; + let hashed_storage_key = HashedStorageKey { + hashed_address: B256::repeat_byte(0x22), + hashed_storage_key: B256::repeat_byte(0x33), + }; + + let change_set = ChangeSet { + account_trie_keys: vec![account_key], + storage_trie_keys: vec![storage_key], + hashed_account_keys: vec![B256::repeat_byte(0x44)], + hashed_storage_keys: vec![hashed_storage_key], + }; + + let encoded = change_set.clone().encode(); + let decoded = ChangeSet::decode(&encoded).expect("Failed to decode"); + assert_eq!(change_set, decoded); + } + + #[test] + fn test_decode_invalid_data() { + let invalid_data = vec![0xFF; 32]; + let result = ChangeSet::decode(&invalid_data); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), DatabaseError::Decode)); + } + + #[test] + fn test_compress_decompress() { + let change_set = ChangeSet { + account_trie_keys: vec![StoredNibbles::from(vec![1, 2, 3])], + storage_trie_keys: vec![StorageTrieKey { + hashed_address: B256::ZERO, + path: StoredNibbles::from(vec![4, 5, 6]), + }], + hashed_account_keys: vec![B256::ZERO], + hashed_storage_keys: vec![HashedStorageKey { + hashed_address: B256::ZERO, + hashed_storage_key: B256::repeat_byte(0x42), + }], + }; + + let mut buf = Vec::new(); + change_set.compress_to_buf(&mut buf); + + let decompressed = ChangeSet::decompress(&buf).expect("Failed to decompress"); + assert_eq!(change_set, decompressed); + } +} diff --git a/crates/optimism/trie/src/db/models/kv.rs b/crates/optimism/trie/src/db/models/kv.rs new file mode 100644 index 00000000000..30a2b042076 --- /dev/null +++ b/crates/optimism/trie/src/db/models/kv.rs @@ -0,0 +1,66 @@ +use crate::db::{ + AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, MaybeDeleted, + StorageTrieHistory, StorageTrieKey, StorageValue, VersionedValue, +}; +use alloy_primitives::B256; +use reth_db::table::{DupSort, Table}; +use reth_primitives_traits::Account; +use reth_trie::{BranchNodeCompact, Nibbles, StoredNibbles}; + +/// Helper to convert inputs into a table key or kv pair. +pub trait IntoKV { + /// Convert `self` into the table key. + fn into_key(self) -> Tab::Key; + + /// Convert `self` into kv for the given `block_number`. + fn into_kv(self, block_number: u64) -> (Tab::Key, Tab::Value); +} + +impl IntoKV for (Nibbles, Option) { + fn into_key(self) -> StoredNibbles { + StoredNibbles::from(self.0) + } + + fn into_kv(self, block_number: u64) -> (StoredNibbles, VersionedValue) { + let (path, node) = self; + (StoredNibbles::from(path), VersionedValue { block_number, value: MaybeDeleted(node) }) + } +} + +impl IntoKV for (B256, Nibbles, Option) { + fn into_key(self) -> StorageTrieKey { + let (hashed_address, path, _) = self; + StorageTrieKey::new(hashed_address, StoredNibbles::from(path)) + } + fn into_kv(self, block_number: u64) -> (StorageTrieKey, VersionedValue) { + let (hashed_address, path, node) = self; + ( + StorageTrieKey::new(hashed_address, StoredNibbles::from(path)), + VersionedValue { block_number, value: MaybeDeleted(node) }, + ) + } +} + +impl IntoKV for (B256, Option) { + fn into_key(self) -> B256 { + self.0 + } + fn into_kv(self, block_number: u64) -> (B256, VersionedValue) { + let (hashed_address, account) = self; + (hashed_address, VersionedValue { block_number, value: MaybeDeleted(account) }) + } +} + +impl IntoKV for (B256, B256, Option) { + fn into_key(self) -> HashedStorageKey { + let (hashed_address, hashed_storage_key, _) = self; + HashedStorageKey::new(hashed_address, hashed_storage_key) + } + fn into_kv(self, block_number: u64) -> (HashedStorageKey, VersionedValue) { + let (hashed_address, hashed_storage_key, value) = self; + ( + HashedStorageKey::new(hashed_address, hashed_storage_key), + VersionedValue { block_number, value: MaybeDeleted(value) }, + ) + } +} diff --git a/crates/optimism/trie/src/db/models/mod.rs b/crates/optimism/trie/src/db/models/mod.rs new file mode 100644 index 00000000000..408bf7ed18e --- /dev/null +++ b/crates/optimism/trie/src/db/models/mod.rs @@ -0,0 +1,84 @@ +//! MDBX implementation of [`OpProofsStore`](crate::OpProofsStore). +//! +//! This module provides a complete MDBX implementation of the +//! [`OpProofsStore`](crate::OpProofsStore) trait. It uses the [`reth_db`] crate for +//! database interactions and defines the necessary tables and models for storing trie branches, +//! accounts, and storage leaves. + +mod block; +pub use block::*; +mod version; +pub use version::*; +mod storage; +pub use storage::*; +mod change_set; +pub(crate) mod kv; +pub use change_set::*; +pub use kv::*; + +use alloy_primitives::B256; +use reth_db::{ + table::{DupSort, TableInfo}, + tables, TableSet, TableType, TableViewer, +}; +use reth_primitives_traits::Account; +use reth_trie::{BranchNodeCompact, StoredNibbles}; +use std::fmt; + +tables! { + /// Stores historical branch nodes for the account state trie. + /// + /// Each entry maps a compact-encoded trie path (`StoredNibbles`) to its versioned branch node. + /// Multiple versions of the same node are stored using the block number as a subkey. + table AccountTrieHistory { + type Key = StoredNibbles; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores historical branch nodes for the storage trie of each account. + /// + /// Each entry is identified by a composite key combining the account’s hashed address and the + /// compact-encoded trie path. Versions are tracked using block numbers as subkeys. + table StorageTrieHistory { + type Key = StorageTrieKey; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores versioned account state across block history. + /// + /// Each entry maps a hashed account address to its serialized account data (balance, nonce, + /// code hash, storage root). + table HashedAccountHistory { + type Key = B256; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores versioned storage state across block history. + /// + /// Each entry maps a composite key of (hashed address, storage key) to its stored value. + /// Used for reconstructing contract storage at any historical block height. + table HashedStorageHistory { + type Key = HashedStorageKey; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Tracks the active proof window in the external historical storage. + /// + /// Stores the earliest and latest block numbers (and corresponding hashes) + /// for which historical trie data is retained. + table ProofWindow { + type Key = ProofWindowKey; + type Value = BlockNumberHash; + } + + /// A reverse mapping of block numbers to a keys of the tables. + /// This is used for efficiently locating data by block number. + table BlockChangeSet { + type Key = u64; // Block number + type Value = ChangeSet; + } +} diff --git a/crates/optimism/trie/src/db/models/storage.rs b/crates/optimism/trie/src/db/models/storage.rs new file mode 100644 index 00000000000..f8caf0e0e98 --- /dev/null +++ b/crates/optimism/trie/src/db/models/storage.rs @@ -0,0 +1,253 @@ +use alloy_primitives::{B256, U256}; +use derive_more::{Constructor, From, Into}; +use reth_db::{ + table::{Compress, Decode, Decompress, Encode}, + DatabaseError, +}; +use reth_trie::StoredNibbles; +use serde::{Deserialize, Serialize}; + +/// Composite key: `(hashed-address, path)` for storage trie branches +/// +/// Used to efficiently index storage branches by both account address and trie path. +/// The encoding ensures lexicographic ordering: first by address, then by path. +#[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct StorageTrieKey { + /// Hashed account address + pub hashed_address: B256, + /// Trie path as nibbles + pub path: StoredNibbles, +} + +impl StorageTrieKey { + /// Create a new storage branch key + pub const fn new(hashed_address: B256, path: StoredNibbles) -> Self { + Self { hashed_address, path } + } +} + +impl Encode for StorageTrieKey { + type Encoded = Vec; + + fn encode(self) -> Self::Encoded { + let mut buf = Vec::with_capacity(32 + self.path.0.len()); + // First encode the address (32 bytes) + buf.extend_from_slice(self.hashed_address.as_slice()); + // Then encode the path + buf.extend_from_slice(&self.path.encode()); + buf + } +} + +impl Decode for StorageTrieKey { + fn decode(value: &[u8]) -> Result { + if value.len() < 32 { + return Err(DatabaseError::Decode); + } + + // First 32 bytes are the address + let hashed_address = B256::from_slice(&value[..32]); + + // Remaining bytes are the path + let path = StoredNibbles::decode(&value[32..])?; + + Ok(Self { hashed_address, path }) + } +} + +/// Composite key: (`hashed_address`, `hashed_storage_key`) for hashed storage values +/// +/// Used to efficiently index storage values by both account address and storage key. +/// The encoding ensures lexicographic ordering: first by address, then by storage key. +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct HashedStorageKey { + /// Hashed account address + pub hashed_address: B256, + /// Hashed storage key + pub hashed_storage_key: B256, +} + +impl HashedStorageKey { + /// Create a new hashed storage key + pub const fn new(hashed_address: B256, hashed_storage_key: B256) -> Self { + Self { hashed_address, hashed_storage_key } + } +} + +impl Encode for HashedStorageKey { + type Encoded = [u8; 64]; + + fn encode(self) -> Self::Encoded { + let mut buf = [0u8; 64]; + // First 32 bytes: address + buf[..32].copy_from_slice(self.hashed_address.as_slice()); + // Next 32 bytes: storage key + buf[32..].copy_from_slice(self.hashed_storage_key.as_slice()); + buf + } +} + +impl Decode for HashedStorageKey { + fn decode(value: &[u8]) -> Result { + if value.len() != 64 { + return Err(DatabaseError::Decode); + } + + let hashed_address = B256::from_slice(&value[..32]); + let hashed_storage_key = B256::from_slice(&value[32..64]); + + Ok(Self { hashed_address, hashed_storage_key }) + } +} + +/// Storage value wrapper for U256 values +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, From, Into, Constructor)] +pub struct StorageValue(pub U256); + +impl Compress for StorageValue { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let be: [u8; 32] = self.0.to_be_bytes::<32>(); + buf.put_slice(&be); + } +} + +impl Decompress for StorageValue { + fn decompress(value: &[u8]) -> Result { + if value.len() != 32 { + return Err(DatabaseError::Decode); + } + let bytes: [u8; 32] = value.try_into().map_err(|_| DatabaseError::Decode)?; + Ok(Self(U256::from_be_bytes(bytes))) + } +} + +/// Proof Window key for tracking active proof window bounds +/// +/// Used to store earliest and latest block numbers in the external storage. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(u8)] +pub enum ProofWindowKey { + /// Earliest block number stored in external storage + EarliestBlock = 0, + /// Latest block number stored in external storage + LatestBlock = 1, +} + +impl Encode for ProofWindowKey { + type Encoded = [u8; 1]; + + fn encode(self) -> Self::Encoded { + [self as u8] + } +} + +impl Decode for ProofWindowKey { + fn decode(value: &[u8]) -> Result { + match value.first() { + Some(&0) => Ok(Self::EarliestBlock), + Some(&1) => Ok(Self::LatestBlock), + _ => Err(DatabaseError::Decode), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_trie::Nibbles; + + #[test] + fn test_storage_branch_subkey_encode_decode() { + let addr = B256::from([1u8; 32]); + let path = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 2, 3, 4])); + let key = StorageTrieKey::new(addr, path.clone()); + + let encoded = key.clone().encode(); + let decoded = StorageTrieKey::decode(&encoded).unwrap(); + + assert_eq!(key, decoded); + assert_eq!(decoded.hashed_address, addr); + assert_eq!(decoded.path, path); + } + + #[test] + fn test_storage_branch_subkey_ordering() { + let addr1 = B256::from([1u8; 32]); + let addr2 = B256::from([2u8; 32]); + let path1 = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 2])); + let path2 = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 3])); + + let key1 = StorageTrieKey::new(addr1, path1.clone()); + let key2 = StorageTrieKey::new(addr1, path2); + let key3 = StorageTrieKey::new(addr2, path1); + + // Encoded bytes should be sortable: first by address, then by path + let enc1 = key1.encode(); + let enc2 = key2.encode(); + let enc3 = key3.encode(); + + assert!(enc1 < enc2, "Same address, path1 < path2"); + assert!(enc1 < enc3, "addr1 < addr2"); + assert!(enc2 < enc3, "addr1 < addr2 (even with larger path)"); + } + + #[test] + fn test_hashed_storage_subkey_encode_decode() { + let addr = B256::from([1u8; 32]); + let storage_key = B256::from([2u8; 32]); + let key = HashedStorageKey::new(addr, storage_key); + + let encoded = key.clone().encode(); + let decoded = HashedStorageKey::decode(&encoded).unwrap(); + + assert_eq!(key, decoded); + assert_eq!(decoded.hashed_address, addr); + assert_eq!(decoded.hashed_storage_key, storage_key); + } + + #[test] + fn test_hashed_storage_subkey_ordering() { + let addr1 = B256::from([1u8; 32]); + let addr2 = B256::from([2u8; 32]); + let storage1 = B256::from([10u8; 32]); + let storage2 = B256::from([20u8; 32]); + + let key1 = HashedStorageKey::new(addr1, storage1); + let key2 = HashedStorageKey::new(addr1, storage2); + let key3 = HashedStorageKey::new(addr2, storage1); + + // Encoded bytes should be sortable: first by address, then by storage key + let enc1 = key1.encode(); + let enc2 = key2.encode(); + let enc3 = key3.encode(); + + assert!(enc1 < enc2, "Same address, storage1 < storage2"); + assert!(enc1 < enc3, "addr1 < addr2"); + assert!(enc2 < enc3, "addr1 < addr2 (even with larger storage key)"); + } + + #[test] + fn test_hashed_storage_subkey_size() { + let addr = B256::from([1u8; 32]); + let storage_key = B256::from([2u8; 32]); + let key = HashedStorageKey::new(addr, storage_key); + + let encoded = key.encode(); + assert_eq!(encoded.len(), 64, "Encoded size should be exactly 64 bytes"); + } + + #[test] + fn test_metadata_key_encode_decode() { + let key = ProofWindowKey::EarliestBlock; + let encoded = key.encode(); + let decoded = ProofWindowKey::decode(&encoded).unwrap(); + assert_eq!(key, decoded); + + let key = ProofWindowKey::LatestBlock; + let encoded = key.encode(); + let decoded = ProofWindowKey::decode(&encoded).unwrap(); + assert_eq!(key, decoded); + } +} diff --git a/crates/optimism/trie/src/db/models/version.rs b/crates/optimism/trie/src/db/models/version.rs new file mode 100644 index 00000000000..49917099582 --- /dev/null +++ b/crates/optimism/trie/src/db/models/version.rs @@ -0,0 +1,191 @@ +use bytes::{Buf, BufMut}; +use reth_db::{ + table::{Compress, Decompress}, + DatabaseError, +}; +use reth_primitives_traits::ValueWithSubKey; +use serde::{Deserialize, Serialize}; + +/// Wrapper type for `Option` that implements [`Compress`] and [`Decompress`] +/// +/// Encoding: +/// - `None` => empty byte array (length 0) +/// - `Some(value)` => compressed bytes of value (length > 0) +/// +/// This assumes the inner type `T` always compresses to non-empty bytes when it exists. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct MaybeDeleted(pub Option); + +impl From> for MaybeDeleted { + fn from(opt: Option) -> Self { + Self(opt) + } +} + +impl From> for Option { + fn from(maybe: MaybeDeleted) -> Self { + maybe.0 + } +} + +impl Compress for MaybeDeleted { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + match &self.0 { + None => { + // Empty = deleted, write nothing + } + Some(value) => { + // Compress the inner value to the buffer + value.compress_to_buf(buf); + } + } + } +} + +impl Decompress for MaybeDeleted { + fn decompress(value: &[u8]) -> Result { + if value.is_empty() { + // Empty = deleted + Ok(Self(None)) + } else { + // Non-empty = present + let inner = T::decompress(value)?; + Ok(Self(Some(inner))) + } + } +} + +/// Versioned value wrapper for [`DupSort`] tables +/// +/// For [`DupSort`] tables in MDBX, the Value type must contain the [`DupSort::SubKey`] as a field. +/// This wrapper combines a [`block_number`] (the [`DupSort::SubKey`]) with +/// the actual value. +/// +/// [`DupSort`]: reth_db::table::DupSort +/// [`DupSort::SubKey`]: reth_db::table::DupSort::SubKey +/// [`block_number`]: Self::block_number +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct VersionedValue { + /// Block number ([`DupSort::SubKey`] for [`DupSort`]) + /// + /// [`DupSort`]: reth_db::table::DupSort + /// [`DupSort::SubKey`]: reth_db::table::DupSort::SubKey + pub block_number: u64, + /// The actual value (may be deleted) + pub value: MaybeDeleted, +} + +impl VersionedValue { + /// Create a new versioned value + pub const fn new(block_number: u64, value: MaybeDeleted) -> Self { + Self { block_number, value } + } +} + +impl Compress for VersionedValue { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + // Encode block number first (8 bytes, big-endian) + buf.put_u64(self.block_number); + // Then encode the value + self.value.compress_to_buf(buf); + } +} + +impl Decompress for VersionedValue { + fn decompress(value: &[u8]) -> Result { + if value.len() < 8 { + return Err(DatabaseError::Decode); + } + + let mut buf: &[u8] = value; + let block_number = buf.get_u64(); + let value = MaybeDeleted::::decompress(&value[8..])?; + + Ok(Self { block_number, value }) + } +} + +impl ValueWithSubKey for VersionedValue { + type SubKey = u64; + + fn get_subkey(&self) -> Self::SubKey { + self.block_number + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives_traits::Account; + use reth_trie::BranchNodeCompact; + + #[test] + fn test_maybe_deleted_none() { + let none: MaybeDeleted = MaybeDeleted(None); + let compressed = none.compress(); + assert!(compressed.is_empty(), "None should compress to empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, None); + } + + #[test] + fn test_maybe_deleted_some_account() { + let account = Account { + nonce: 42, + balance: alloy_primitives::U256::from(1000u64), + bytecode_hash: None, + }; + let some = MaybeDeleted(Some(account)); + let compressed = some.compress(); + assert!(!compressed.is_empty(), "Some should compress to non-empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, Some(account)); + } + + #[test] + fn test_maybe_deleted_some_branch() { + // Create a simple valid BranchNodeCompact (empty is valid) + let branch = BranchNodeCompact::new( + 0, // state_mask + 0, // tree_mask + 0, // hash_mask + vec![], // hashes + None, // root_hash + ); + let some = MaybeDeleted(Some(branch.clone())); + let compressed = some.compress(); + assert!(!compressed.is_empty(), "Some should compress to non-empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, Some(branch)); + } + + #[test] + fn test_maybe_deleted_roundtrip() { + let test_cases = vec![ + MaybeDeleted(None), + MaybeDeleted(Some(Account { + nonce: 0, + balance: alloy_primitives::U256::ZERO, + bytecode_hash: None, + })), + MaybeDeleted(Some(Account { + nonce: 999, + balance: alloy_primitives::U256::MAX, + bytecode_hash: Some([0xff; 32].into()), + })), + ]; + + for original in test_cases { + let compressed = original.clone().compress(); + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(original, decompressed); + } + } +} diff --git a/crates/optimism/trie/src/db/schema.md b/crates/optimism/trie/src/db/schema.md new file mode 100644 index 00000000000..4c2b04fd696 --- /dev/null +++ b/crates/optimism/trie/src/db/schema.md @@ -0,0 +1,366 @@ +# Proof History Database Schema + +> Location: `crates/optimism/trie/src/db` +> Backend: **MDBX** (via `reth-db`) +> Purpose: Serve **historical `eth_getProof`** by storing versioned trie data in a bounded window. + +--- + +## Design Overview + +This database is a **versioned, append-only history store** for Ethereum state tries. + +Each logical key is stored with **multiple historical versions**, each tagged by a **block number**. Reads select the latest version whose block number is **≤ the requested block**. + +### Core principles + +* History tables are **DupSort** tables +* Each entry is versioned by `block_number` +* Deletions are encoded as **tombstones** +* A reverse index (`BlockChangeSet`) enables **range pruning** +* Proof window bounds are tracked explicitly + +--- + +## Version Encoding + +All historical values are wrapped in `VersionedValue`. + +### `VersionedValue` + +| Field | Type | Encoding | +| -------------- | ----------------- | -------------------- | +| `block_number` | `u64` | big-endian (8 bytes) | +| `value` | `MaybeDeleted` | see below | + +``` +VersionedValue := block_number || maybe_deleted_value +``` + +--- + +### `MaybeDeleted` + +Encodes value presence or deletion: + +| Logical value | Encoding | +| ------------- | ---------------------------- | +| `Some(T)` | `T::compress()` | +| `None` | empty byte slice (`len = 0`) | + +An empty value represents **deletion at that block**. + +--- + +## Tables + +--- + +## 1. `AccountTrieHistory` (DupSort) + +Historical **branch nodes** of the **account trie**. + +### Purpose + +Reconstruct account trie structure at any historical block. + +### Schema + +| Component | Type | +| --------- | ----------------------------------- | +| Key | `StoredNibbles` | +| SubKey | `u64` (block number) | +| Value | `VersionedValue` | + +### Key encoding + +* `StoredNibbles` = compact-encoded trie path + +### Semantics + +For a given trie path: + +* Multiple versions may exist +* Reader selects highest `block_number ≤ target_block` + +--- + +## 2. `StorageTrieHistory` (DupSort) + +Historical **branch nodes** of **per-account storage tries**. + +### Schema + +| Component | Type | +| --------- | ----------------------------------- | +| Key | `StorageTrieKey` | +| SubKey | `u64` | +| Value | `VersionedValue` | + +### `StorageTrieKey` encoding + +``` +StorageTrieKey := + hashed_address (32 bytes) + || StoredNibbles::encode(path) +``` + +Ordering: + +1. `hashed_address` +2. trie path bytes + +--- + +## 3. `HashedAccountHistory` (DupSort) + +Historical **account leaf values**. + +### Schema + +| Component | Type | +| --------- | ------------------------- | +| Key | `B256` (hashed address) | +| SubKey | `u64` | +| Value | `VersionedValue` | + +### Semantics + +Stores nonce, balance, code hash, and storage root per account per block. + +--- + +## 4. `HashedStorageHistory` (DupSort) + +Historical **storage slot values**. + +### Schema + +| Component | Type | +| --------- | ------------------------------ | +| Key | `HashedStorageKey` | +| SubKey | `u64` | +| Value | `VersionedValue` | + +### `HashedStorageKey` encoding + +Fixed 64 bytes: + +``` +hashed_address (32 bytes) || hashed_storage_key (32 bytes) +``` + +### `StorageValue` encoding + +* Wraps `U256` +* Encoded as **32-byte big-endian** + +--- + +## 5. `BlockChangeSet` + +Reverse index of **which keys were modified in a block**. + +### Purpose + +Efficient pruning by block range. + +### Schema + +| Component | Type | +| --------- | -------------------- | +| Key | `u64` (block number) | +| Value | `ChangeSet` | + +### `ChangeSet` structure + +```rust +pub struct ChangeSet { + pub account_trie_keys: Vec, + pub storage_trie_keys: Vec, + pub hashed_account_keys: Vec, + pub hashed_storage_keys: Vec, +} +``` + +### Encoding + +* Serialized using **bincode** + +--- + +## 6. `ProofWindow` + +Tracks active proof window bounds. + +### Schema + +| Component | Type | +| --------- | ----------------- | +| Key | `ProofWindowKey` | +| Value | `BlockNumberHash` | + +### `ProofWindowKey` + +| Variant | Encoding | +| --------------- | -------- | +| `EarliestBlock` | `0u8` | +| `LatestBlock` | `1u8` | + +### `BlockNumberHash` encoding + +``` +block_number (u64 BE, 8 bytes) +|| block_hash (B256, 32 bytes) +``` + +Total size: **40 bytes** + +--- +Here is a **short, clean, professional** version suitable for `SCHEMA.md`: + +--- + +## Reads: Hashed & Trie Cursors + +Historical reads are performed using **hashed cursors** and **trie cursors**, both operating on versioned history tables. + +All reads follow the same rule: + +> Select the newest entry whose block number is **≤ the requested block**. + +--- + +### Hashed Cursors + +Hashed cursors read **leaf values** from: + +* `HashedAccountHistory` +* `HashedStorageHistory` + +They answer: + +> *What was the value of this account or storage slot at block B?* + +For a given key, the cursor scans historical versions and returns the latest valid value. Tombstones indicate deletion and are treated as non-existence. + +--- + +### Trie Cursors + +Trie cursors read **trie branch nodes** from: + +* `AccountTrieHistory` +* `StorageTrieHistory` + +They answer: + +> *Which trie nodes existed at this path at block B?* + +These cursors enable reconstruction of Merkle paths required for proof generation. + +--- + +### Combined Usage + +When serving `eth_getProof`: + +* Trie cursors reconstruct the Merkle path +* Hashed cursors supply the leaf values + +Both are evaluated at the same target explained block to produce deterministic historical proofs. + + +--- +## Writes: `store_trie_updates` (Append-Only) + +`store_trie_updates` persists all state changes introduced by a block using a strictly **append-only** write model. + + +### Purpose + +The function records **historical trie updates** so that state and proofs can be reconstructed at any later block. + +--- + +### What is written + +For a processed block `B`, the following data is appended: + +* Account trie branch nodes → `AccountTrieHistory` +* Storage trie branch nodes → `StorageTrieHistory` +* Account leaf updates → `HashedAccountHistory` +* Storage slot updates → `HashedStorageHistory` +* Modified keys → `BlockChangeSet[B]` + +All entries are tagged with the same `block_number`. + +--- + +### How writes work + +For each updated item: + +1. A `VersionedValue` is created with: + + * `block_number = B` + * the encoded node or value + +2. The entry is appended to the corresponding history table. + +No existing entries are modified or replaced. + + +--- +Here is a **concise, professional, SCHEMA.md-style** explanation aligned exactly with your clarification: + +--- + +## Initial State Backfill + +### Source database (Reth) + +The initial state is sourced from **Reth’s main execution database**, which only contains data for the **current canonical state**. + +The backfill reads from the following Reth tables: + +* `HashedAccounts` – current account leaf values +* `HashedStorages` – current storage slot values +* `AccountsTrie` – current account trie branch nodes +* `StoragesTrie` – current storage trie branch nodes + +These tables do **not** contain historical versions; they represent a single finalized state snapshot. + +--- + +### Destination database (Proofs storage) + +The data is copied into the **proofs history database** (`OpProofsStore`), which is a **versioned, append-only** store designed for historical proof generation. + +--- + +### How the initial state is created + +During backfill: + +1. The current state is fully scanned from Reth tables using read-only cursors. +2. All entries are written into the proofs storage as versioned records. +3. This creates a complete **baseline state** inside the proofs DB. + +The backfill runs only once and is skipped if the proofs DB already has an `earliest_block` set. + +--- + +### Why block `0` is used + +Since Reth tables only represent the **current state**, the copied data must be assigned a synthetic version. + +Block **`0`** is used as the baseline version because: + +* It is ≤ any real block number +* It establishes a stable initial version for all keys +* Later block updates naturally override it using higher block numbers + +This makes block `0` the canonical **initial state anchor** for versioned reads. + +--- diff --git a/crates/optimism/trie/src/db/store.rs b/crates/optimism/trie/src/db/store.rs new file mode 100644 index 00000000000..1de1dcf6adc --- /dev/null +++ b/crates/optimism/trie/src/db/store.rs @@ -0,0 +1,3426 @@ +use super::{BlockNumberHash, ProofWindow, ProofWindowKey, Tables}; +use crate::{ + api::WriteCounts, + db::{ + cursor::Dup, + models::{ + kv::IntoKV, AccountTrieHistory, BlockChangeSet, ChangeSet, HashedAccountHistory, + HashedStorageHistory, HashedStorageKey, MaybeDeleted, StorageTrieHistory, + StorageTrieKey, StorageValue, VersionedValue, + }, + MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, + }, + BlockStateDiff, OpProofsStorageError, OpProofsStorageResult, OpProofsStore, +}; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; +use alloy_primitives::{map::HashMap, B256, U256}; +#[cfg(feature = "metrics")] +use metrics::{gauge, Label}; +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + mdbx::{init_db_for, DatabaseArguments}, + table::{DupSort, Table}, + transaction::{DbTx, DbTxMut}, + Database, DatabaseEnv, DatabaseError, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::HashedCursor, + trie_cursor::TrieCursor, + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, HashedPostState, Nibbles, +}; +use std::{cmp::max, ops::RangeBounds, path::Path}; + +/// Preprocessed delete work for a prune range +#[derive(Debug, Default, Clone)] +struct HistoryDeleteBatch { + account_trie: Vec<(::Key, u64)>, + storage_trie: Vec<(::Key, u64)>, + hashed_account: Vec<(::Key, u64)>, + hashed_storage: Vec<(::Key, u64)>, +} + +/// MDBX implementation of [`OpProofsStore`]. +#[derive(Debug)] +pub struct MdbxProofsStorage { + env: DatabaseEnv, +} + +struct ProofWindowValue { + earliest: NumHash, + latest: NumHash, +} + +impl MdbxProofsStorage { + /// Creates a new [`MdbxProofsStorage`] instance with the given path. + pub fn new(path: &Path) -> Result { + let env = init_db_for::<_, Tables>(path, DatabaseArguments::default()) + .map_err(|e| DatabaseError::Other(format!("Failed to open database: {e}")))?; + Ok(Self { env }) + } + + fn inner_get_latest_block_number_hash( + &self, + tx: &impl DbTx, + ) -> OpProofsStorageResult> { + let block = self.inner_get_block_number_hash(tx, ProofWindowKey::LatestBlock)?; + if block.is_some() { + return Ok(block); + } + + self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock) + } + + fn inner_get_block_number_hash( + &self, + tx: &impl DbTx, + key: ProofWindowKey, + ) -> OpProofsStorageResult> { + let mut cursor = tx.cursor_read::()?; + let value = cursor.seek_exact(key)?; + Ok(value.map(|(_, val)| (val.number(), *val.hash()))) + } + + fn inner_get_proof_window( + &self, + tx: &impl DbTx, + ) -> OpProofsStorageResult> { + let mut cursor = tx.cursor_read::()?; + + let earliest = match cursor.seek_exact(ProofWindowKey::EarliestBlock)? { + Some((_, val)) => NumHash::new(val.number(), *val.hash()), + None => return Ok(None), + }; + + let latest = match cursor.seek_exact(ProofWindowKey::LatestBlock)? { + Some((_, val)) => NumHash::new(val.number(), *val.hash()), + None => earliest, + }; + + Ok(Some(ProofWindowValue { earliest, latest })) + } + + async fn set_earliest_block_number_hash( + &self, + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let _ = self.env.update(|tx| { + Self::inner_set_earliest_block_number(tx, block_number, hash)?; + Ok::<(), DatabaseError>(()) + })?; + Ok(()) + } + + /// Internal helper to set earliest block number hash within an existing transaction + fn inner_set_earliest_block_number( + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let mut cursor = tx.cursor_write::()?; + cursor.upsert(ProofWindowKey::EarliestBlock, &BlockNumberHash::new(block_number, hash))?; + Ok(()) + } + + /// Persist a batch of versioned history entries to a dup-sorted table. + /// + /// # Parameters + /// - `block_number`: Target block number for versioning entries + /// - `items`: **Must be sorted** - iterator of entries to persist + /// - `append_mode`: Mode selector for write strategy: + /// - `true` (Append): Appends all entries including tombstones for forward progress + /// - `false` (Prune): Removes tombstones, writes non-tombstones to block 0 + /// + /// The cost of pruning is the cost of (append + deleting tombstones + deleting old block 0). + /// The tombstones deletion is expensive as it requires a seek for each (key + subkey). + /// + /// Uses [`reth_db::mdbx::cursor::Cursor::upsert`] for upsert operation. + fn persist_history_batch( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: T::SubKey, + items: I, + append_mode: bool, + ) -> OpProofsStorageResult> + where + T: Table> + DupSort, + T::Key: Clone, + I: IntoIterator, + I::Item: IntoKV, + { + let mut cur = tx.cursor_dup_write::()?; + let mut keys = Vec::::new(); + + // Materialize iterator to enable partitioning and collect keys + let mut pairs: Vec<(T::Key, T::Value)> = Vec::new(); + for it in items { + let (k, vv) = it.into_kv(block_number); + pairs.push((k.clone(), vv)); + keys.push(k) + } + + if append_mode { + // Append all entries (including tombstones) to preserve full history + for (k, vv) in pairs { + cur.append_dup(k.clone(), vv)?; + } + return Ok(keys); + } + + // Drop current cursor to start clean for Phase 1 + drop(cur); + + // Phase 1: Batch Delete (Sequential) + // Remove all existing state at Block 0 for these keys. + { + let mut del_cur = tx.cursor_dup_write::()?; + for (k, _) in &pairs { + // Seek to (Key, Block 0) + if let Some(vv) = del_cur.seek_by_key_subkey(k.clone(), 0)? && + vv.block_number == 0 + { + del_cur.delete_current()?; + } + } + } + + // Phase 2: Batch Write (Sequential) + // Write new values (skipping tombstones). + { + let mut write_cur = tx.cursor_dup_write::()?; + for (k, vv) in pairs { + if vv.value.0.is_some() { + write_cur.upsert(k, &vv)?; + } + } + } + + Ok(keys) + } + + /// Delete entries for `items` at exactly `block_number` in a dup-sorted table. + /// Seeks (key, block) and deletes current if the subkey matches. + fn delete_dup_sorted( + &self, + tx: &(impl DbTxMut + DbTx), + items: I, + ) -> OpProofsStorageResult<()> + where + T: Table> + DupSort, + T::Key: Clone, + T::SubKey: PartialEq + Clone, + I: IntoIterator, + { + let mut cur = tx.cursor_dup_write::()?; + for (key, subkey) in items { + if let Some(vv) = cur.seek_by_key_subkey(key, subkey)? { + // ensure we didn't land on a >subkey + if vv.block_number == subkey { + cur.delete_current()?; + } + } + } + Ok(()) + } + + /// Append deletion tombstones for all existing storage items of `hashed_address` at + /// `block_number`. Iterates via `next()` from a RO cursor and writes MaybeDeleted(None) + /// rows. + fn wipe_storage( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hashed_address: B256, + mut next: Next, + ) -> OpProofsStorageResult> + where + T: Table> + DupSort, + Next: FnMut() -> OpProofsStorageResult>, + (B256, K, Option): IntoKV, + T::Key: Clone, + { + let mut cur = tx.cursor_dup_write::()?; + let mut keys: Vec = Vec::new(); + + while let Some((k, _vv)) = next()? { + let key: T::Key = (hashed_address, k, Option::::None).into_key(); + let del: T::Value = VersionedValue { block_number, value: MaybeDeleted(None) }; + cur.append_dup(key.clone(), del)?; + keys.push(key); + } + + Ok(keys) + } + + /// Collect versioned history over `block_range` using `BlockChangeSet`. + fn collect_history_ranged( + &self, + tx: &impl DbTx, + block_range: impl RangeBounds, + ) -> OpProofsStorageResult { + let mut history = HistoryDeleteBatch::default(); + let mut change_set_cursor = tx.cursor_read::()?; + let mut walker = change_set_cursor.walk_range(block_range)?; + + while let Some(Ok((block_number, change_set))) = walker.next() { + // Push (key, subkey=block_number) pairs + history + .account_trie + .extend(change_set.account_trie_keys.into_iter().map(|k| (k, block_number))); + history + .storage_trie + .extend(change_set.storage_trie_keys.into_iter().map(|k| (k, block_number))); + history + .hashed_account + .extend(change_set.hashed_account_keys.into_iter().map(|k| (k, block_number))); + history + .hashed_storage + .extend(change_set.hashed_storage_keys.into_iter().map(|k| (k, block_number))); + } + + // Sorting by tuple sorts by key first, then by block_number. + history.account_trie.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + history.storage_trie.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + history.hashed_account.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + history.hashed_storage.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + + Ok(history) + } + + /// Delete versioned history over `block_range` using history batch. + fn delete_history_ranged( + &self, + tx: &(impl DbTxMut + DbTx), + block_range: impl RangeBounds, + history: HistoryDeleteBatch, + ) -> OpProofsStorageResult { + let mut change_set_cursor = tx.cursor_write::()?; + let mut walker = change_set_cursor.walk_range(block_range)?; + + while let Some(Ok((_, _))) = walker.next() { + walker.delete_current()?; + } + + // Delete using the simplified API: iterator of (key, subkey) + self.delete_dup_sorted::(tx, history.clone().account_trie)?; + self.delete_dup_sorted::(tx, history.clone().storage_trie)?; + self.delete_dup_sorted::(tx, history.clone().hashed_account)?; + self.delete_dup_sorted::(tx, history.clone().hashed_storage)?; + + Ok(WriteCounts { + account_trie_updates_written_total: history.account_trie.len() as u64, + storage_trie_updates_written_total: history.storage_trie.len() as u64, + hashed_accounts_written_total: history.hashed_account.len() as u64, + hashed_storages_written_total: history.hashed_storage.len() as u64, + }) + } + + /// Write trie/state history for `block_number` from `block_state_diff`. + fn store_trie_updates_for_block( + &self, + tx: &::TXMut, + block_number: u64, + block_state_diff: BlockStateDiff, + append_mode: bool, + ) -> OpProofsStorageResult { + let BlockStateDiff { sorted_trie_updates, sorted_post_state } = block_state_diff; + + let storage_trie_len = sorted_trie_updates.storage_tries.len(); + let hashed_storage_len = sorted_post_state.storages.len(); + + let account_trie_keys = self.persist_history_batch( + tx, + block_number, + sorted_trie_updates.account_nodes.into_iter(), + append_mode, + )?; + let hashed_account_keys = self.persist_history_batch( + tx, + block_number, + sorted_post_state.accounts.iter().copied(), + append_mode, + )?; + + let mut storage_trie_keys = Vec::::with_capacity(storage_trie_len); + for (hashed_address, nodes) in sorted_trie_updates.storage_tries { + // Handle wiped - mark all storage trie as deleted at the current block number + if nodes.is_deleted && append_mode { + // Yet to have any update for the current block number - So just using up to + // previous block number + let mut ro = self.storage_trie_cursor(hashed_address, block_number - 1)?; + let keys = + self.wipe_storage(tx, block_number, hashed_address, || Ok(ro.next()?))?; + + storage_trie_keys.extend(keys); + + // Skip any further processing for this hashed_address + continue; + } + + let keys = self.persist_history_batch( + tx, + block_number, + nodes.storage_nodes.into_iter().map(|(path, node)| (hashed_address, path, node)), + append_mode, + )?; + storage_trie_keys.extend(keys); + } + + let mut hashed_storage_keys = Vec::::with_capacity(hashed_storage_len); + for (hashed_address, storage) in sorted_post_state.storages { + // Handle wiped - mark all storage slots as deleted at the current block number + if append_mode && storage.is_wiped() { + // Yet to have any update for the current block number - So just using up to + // previous block number + let mut ro = self.storage_hashed_cursor(hashed_address, block_number - 1)?; + let keys = + self.wipe_storage(tx, block_number, hashed_address, || Ok(ro.next()?))?; + hashed_storage_keys.extend(keys); + // Skip any further processing for this hashed_address + continue; + } + let keys = self.persist_history_batch( + tx, + block_number, + storage + .storage_slots_ref() + .iter() + .map(|(key, val)| (hashed_address, *key, Some(StorageValue(*val)))), + append_mode, + )?; + hashed_storage_keys.extend(keys); + } + + Ok(ChangeSet { + account_trie_keys, + storage_trie_keys, + hashed_account_keys, + hashed_storage_keys, + }) + } + + /// Append-only writer for a block: validates parent, persists diff (soft-delete=true), + /// records a `BlockChangeSet`, and advances `ProofWindow::LatestBlock`. + fn store_trie_updates_append_only( + &self, + tx: &::TXMut, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult { + let block_number = block_ref.block.number; + + // Check the latest stored block is the parent of the incoming block + let latest_block_hash = match self.inner_get_latest_block_number_hash(tx)? { + Some((_num, hash)) => hash, + None => B256::ZERO, + }; + + if latest_block_hash != block_ref.parent { + return Err(OpProofsStorageError::OutOfOrder { + block_number, + parent_block_hash: block_ref.parent, + latest_block_hash, + }); + } + + let change_set = + &self.store_trie_updates_for_block(tx, block_number, block_state_diff, true)?; + + // Cursor for recording all changes made in this block for all history tables + let mut change_set_cursor = tx.new_cursor::()?; + change_set_cursor.append(block_number, change_set)?; + + // Update proof window's latest block + let mut proof_window_cursor = tx.new_cursor::()?; + proof_window_cursor.append( + ProofWindowKey::LatestBlock, + &BlockNumberHash::new(block_number, block_ref.block.hash), + )?; + + Ok(WriteCounts { + account_trie_updates_written_total: change_set.account_trie_keys.len() as u64, + storage_trie_updates_written_total: change_set.storage_trie_keys.len() as u64, + hashed_accounts_written_total: change_set.hashed_account_keys.len() as u64, + hashed_storages_written_total: change_set.hashed_storage_keys.len() as u64, + }) + } +} + +impl OpProofsStore for MdbxProofsStorage { + type StorageTrieCursor<'tx> + = MdbxTrieCursor> + where + Self: 'tx; + type AccountTrieCursor<'tx> + = MdbxTrieCursor> + where + Self: 'tx; + type StorageCursor<'tx> + = MdbxStorageCursor> + where + Self: 'tx; + type AccountHashedCursor<'tx> + = MdbxAccountCursor> + where + Self: 'tx; + + async fn store_account_branches( + &self, + account_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()> { + let mut account_nodes = account_nodes; + if account_nodes.is_empty() { + return Ok(()); + } + + account_nodes.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch(tx, 0, account_nodes.into_iter(), true)?; + Ok(()) + })? + } + + async fn store_storage_branches( + &self, + hashed_address: B256, + storage_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()> { + let mut storage_nodes = storage_nodes; + if storage_nodes.is_empty() { + return Ok(()); + } + + storage_nodes.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch( + tx, + 0, + storage_nodes.into_iter().map(|(path, node)| (hashed_address, path, node)), + true, + )?; + Ok(()) + })? + } + + async fn store_hashed_accounts( + &self, + accounts: Vec<(B256, Option)>, + ) -> OpProofsStorageResult<()> { + let mut accounts = accounts; + if accounts.is_empty() { + return Ok(()); + } + + // sort the accounts by key to ensure insertion is efficient + accounts.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch(tx, 0, accounts.into_iter(), true)?; + Ok(()) + })? + } + + async fn store_hashed_storages( + &self, + hashed_address: B256, + storages: Vec<(B256, U256)>, + ) -> OpProofsStorageResult<()> { + let mut storages = storages; + if storages.is_empty() { + return Ok(()); + } + + // sort the storages by key to ensure insertion is efficient + storages.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch( + tx, + 0, + storages + .into_iter() + .map(|(key, val)| (hashed_address, key, Some(StorageValue(val)))), + true, + )?; + Ok(()) + })? + } + + async fn get_earliest_block_number(&self) -> OpProofsStorageResult> { + self.env.view(|tx| self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock))? + } + + async fn get_latest_block_number(&self) -> OpProofsStorageResult> { + self.env.view(|tx| self.inner_get_latest_block_number_hash(tx))? + } + + fn storage_trie_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxTrieCursor::new(cursor, max_block_number, Some(hashed_address))) + } + + fn account_trie_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxTrieCursor::new(cursor, max_block_number, None)) + } + + fn storage_hashed_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxStorageCursor::new(cursor, max_block_number, hashed_address)) + } + + fn account_hashed_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxAccountCursor::new(cursor, max_block_number)) + } + + async fn store_trie_updates( + &self, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult { + self.env + .update(|tx| self.store_trie_updates_append_only(tx, block_ref, block_state_diff))? + } + + async fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { + self.env.view(|tx| { + let mut change_set_cursor = tx.cursor_read::()?; + let (_, change_set) = change_set_cursor + .seek_exact(block_number)? + .ok_or(OpProofsStorageError::NoChangeSetForBlock(block_number))?; + + let mut account_trie_cursor = tx.new_cursor::()?; + let mut storage_trie_cursor = tx.new_cursor::()?; + let mut hashed_account_cursor = tx.new_cursor::()?; + let mut hashed_storage_cursor = tx.new_cursor::()?; + + let mut trie_updates = TrieUpdates::default(); + for key in change_set.account_trie_keys { + let entry = + match account_trie_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingAccountTrieHistory( + key.0, + block_number, + )) + } + }; + + if let Some(value) = entry { + trie_updates.account_nodes.insert(key.0, value); + } else { + trie_updates.removed_nodes.insert(key.0); + } + } + + for key in change_set.storage_trie_keys { + let entry = + match storage_trie_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingStorageTrieHistory( + key.hashed_address, + key.path.0, + block_number, + )) + } + }; + + let stu = trie_updates + .storage_tries + .entry(key.hashed_address) + .or_insert_with(StorageTrieUpdates::default); + + // handle is_deleted scenario + // Issue: https://github.com/op-rs/op-reth/issues/323 + if let Some(value) = entry { + stu.storage_nodes.insert(key.path.0, value); + } else { + stu.removed_nodes.insert(key.path.0); + } + } + + let mut post_state = + HashedPostState::with_capacity(change_set.hashed_account_keys.len()); + for key in change_set.hashed_account_keys { + let entry = match hashed_account_cursor.seek_by_key_subkey(key, block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingHashedAccountHistory( + key, + block_number, + )) + } + }; + + post_state.accounts.insert(key, entry); + } + + for key in change_set.hashed_storage_keys { + let entry = + match hashed_storage_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingHashedStorageHistory { + hashed_address: key.hashed_address, + hashed_storage_key: key.hashed_storage_key, + block_number, + }) + } + }; + + let hs = post_state.storages.entry(key.hashed_address).or_default(); + + // handle wiped storage scenario + // Issue: https://github.com/op-rs/op-reth/issues/323 + if let Some(value) = entry { + hs.storage.insert(key.hashed_storage_key, value.0); + } else { + hs.storage.insert(key.hashed_storage_key, U256::ZERO); + } + } + + Ok(BlockStateDiff { + sorted_trie_updates: trie_updates.into_sorted(), + sorted_post_state: post_state.into_sorted(), + }) + })? + } + + /// Update the initial state with the provided diff. + /// Prune all historical trie data till `new_earliest_block_number` (inclusive) using + /// the [`BlockChangeSet`] index. + /// + /// Arguments: + /// - `new_earliest_block_ref`: The new earliest block reference (with parent hash). + /// - `diff`: The state diff to apply to the initial state (block 0). This diff represents all + /// the changes from the old earliest block to the new earliest block (inclusive). + async fn prune_earliest_state( + &self, + new_earliest_block_ref: BlockWithParent, + diff: BlockStateDiff, + ) -> OpProofsStorageResult { + let mut write_counts = WriteCounts::default(); + + let new_earliest_block_number = new_earliest_block_ref.block.number; + let Some((old_earliest_block_number, _)) = self.get_earliest_block_number().await? else { + return Ok(write_counts); // Nothing to prune + }; + + if old_earliest_block_number >= new_earliest_block_number { + return Ok(write_counts); // Nothing to prune + } + + // collect history for deletion + let history_range = max(old_earliest_block_number, 1)..=new_earliest_block_number; + let history_to_delete = + self.env.view(|tx| self.collect_history_ranged(tx, history_range.clone()))??; + + self.env.update(|tx| { + // Update the initial state (block zero) + let change_set = self.store_trie_updates_for_block(tx, 0, diff, false)?; + write_counts += WriteCounts::new( + change_set.account_trie_keys.len() as u64, + change_set.storage_trie_keys.len() as u64, + change_set.hashed_account_keys.len() as u64, + change_set.hashed_storage_keys.len() as u64, + ); + + // Delete the old entries for the block range excluding block 0 + let delete_counts = self.delete_history_ranged(tx, history_range, history_to_delete)?; + write_counts += delete_counts; + + // Set the earliest block number to the new value + Self::inner_set_earliest_block_number( + tx, + new_earliest_block_number, + new_earliest_block_ref.block.hash, + )?; + Ok(write_counts) + })? + } + + /// Unwind the historical state to `unwind_upto_block` (inclusive), deleting all history + /// starting from provided block. Also updates the `ProofWindow::LatestBlock` to parent of + /// `unwind_upto_block`. + async fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { + let history_to_delete = + self.env.view(|tx| self.collect_history_ranged(tx, to.block.number..))??; + + self.env.update(|tx| { + let proof_window = match self.inner_get_proof_window(tx)? { + Some(pw) => pw, + None => return Ok(()), // Nothing to unwind + }; + + if to.block.number > proof_window.latest.number { + return Ok(()); // Nothing to unwind + } + + if to.block.number <= proof_window.earliest.number { + return Err(OpProofsStorageError::UnwindBeyondEarliest { + unwind_block_number: to.block.number, + earliest_block_number: proof_window.earliest.number, + }); + } + + self.delete_history_ranged(tx, to.block.number.., history_to_delete)?; + + let new_latest_block = + BlockNumberHash::new(to.block.number.saturating_sub(1), to.parent); + let mut proof_window_cursor = tx.new_cursor::()?; + proof_window_cursor.append(ProofWindowKey::LatestBlock, &new_latest_block)?; + + Ok(()) + })? + } + + async fn replace_updates( + &self, + latest_common_block_number: u64, + blocks_to_add: HashMap, + ) -> OpProofsStorageResult<()> { + let history_to_delete = self + .env + .view(|tx| self.collect_history_ranged(tx, latest_common_block_number + 1..))??; + + self.env.update(|tx| { + self.delete_history_ranged(tx, latest_common_block_number + 1.., history_to_delete)?; + + // Sort by block number: Hashmap does not guarantee order + // todo: use a sorted vec instead + let mut blocks_to_add_vec: Vec<(BlockWithParent, BlockStateDiff)> = + blocks_to_add.into_iter().collect(); + + blocks_to_add_vec.sort_unstable_by_key(|(bwp, _)| bwp.block.number); + + // update the proof window + // todo: refactor to use block hash from the block to add. We need to pass the + // BlockNumHash type for the latest_common_block_number + let mut proof_window_cursor = tx.new_cursor::()?; + proof_window_cursor.append( + ProofWindowKey::LatestBlock, + &BlockNumberHash::new( + latest_common_block_number, + blocks_to_add_vec.first().unwrap().0.parent, + ), + )?; + + for (block_with_parent, diff) in blocks_to_add_vec { + self.store_trie_updates_append_only(tx, block_with_parent, diff)?; + } + Ok(()) + })? + } + + async fn set_earliest_block_number( + &self, + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + self.set_earliest_block_number_hash(block_number, hash).await + } +} + +/// This implementation is copied from the +/// [`DatabaseMetrics`](reth_db::database_metrics::DatabaseMetrics) implementation for +/// [`DatabaseEnv`]. As the implementation hard-coded the table name, we need to reimplement it. +#[cfg(feature = "metrics")] +impl reth_db::database_metrics::DatabaseMetrics for MdbxProofsStorage { + fn report_metrics(&self) { + for (name, value, labels) in self.gauge_metrics() { + gauge!(name, labels).set(value); + } + } + + fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec

, + /// Reader to fetch block hash by block number + block_hash_reader: H, + /// Keep at least these many recent blocks + min_block_interval: u64, + /// Maximum number of blocks to prune in one database transaction + prune_batch_size: u64, + // TODO: add timeout - Maximum time for one pruner run. If `None`, no timeout. + #[doc(hidden)] + #[cfg(feature = "metrics")] + metrics: Metrics, +} + +impl OpProofStoragePruner { + /// Create a new pruner. + pub fn new( + provider: OpProofsStorage

, + block_hash_reader: H, + min_block_interval: u64, + prune_batch_size: u64, + ) -> Self { + Self { + provider, + block_hash_reader, + min_block_interval, + prune_batch_size, + #[cfg(feature = "metrics")] + metrics: Metrics::default(), + } + } +} + +impl OpProofStoragePruner +where + P: OpProofsStore, + H: BlockHashReader, +{ + async fn run_inner(&self) -> OpProofStoragePrunerResult { + let latest_block_opt = self.provider.get_latest_block_number().await?; + if latest_block_opt.is_none() { + trace!(target: "trie::pruner", "No latest blocks in the proof storage"); + return Ok(PrunerOutput::default()) + } + + let earliest_block_opt = self.provider.get_earliest_block_number().await?; + if earliest_block_opt.is_none() { + trace!(target: "trie::pruner", "No earliest blocks in the proof storage"); + return Ok(PrunerOutput::default()) + } + + let latest_block = latest_block_opt.unwrap().0; + let earliest_block = earliest_block_opt.unwrap().0; + + let interval = latest_block.saturating_sub(earliest_block); + if interval <= self.min_block_interval { + trace!(target: "trie::pruner", "Nothing to prune"); + return Ok(PrunerOutput::default()) + } + + // at this point `latest_block` is always greater than `min_block_interval` + let target_earliest_block = latest_block - self.min_block_interval; + + info!( + target: "trie::pruner", + from_block = earliest_block, + to_block = target_earliest_block, + "Starting pruning proof storage", + ); + + let mut current_earliest_block = earliest_block; + let mut prune_output = PrunerOutput { + start_block: earliest_block, + end_block: target_earliest_block, + ..Default::default() + }; + + // Prune in batches + while current_earliest_block < target_earliest_block { + // Calculate the end of this batch + let batch_end_block = + cmp::min(current_earliest_block + self.prune_batch_size, target_earliest_block); + + let batch_output = self.prune_batch(current_earliest_block, batch_end_block).await?; + + prune_output.extend_ref(batch_output); + + // Update loop state + current_earliest_block = batch_end_block; + } + + Ok(prune_output) + } + + /// Prunes a single batch of blocks. + async fn prune_batch( + &self, + start_block: u64, + end_block: u64, + ) -> Result { + let batch_start_time = Instant::now(); + let mut batch_diff = BlockStateDiff::default(); + + // Fetch all diffs from (start_block + 1) to end_block (inclusive) + for i in (start_block + 1)..=end_block { + let diff = self.provider.fetch_trie_updates(i).await.inspect_err(|err| { + error!( + target: "trie::pruner", + block = i, + ?err, + "Failed to fetch trie updates for block during pruning" + ) + })?; + batch_diff.extend_ref(&diff); + } + let fetch_duration = batch_start_time.elapsed(); + + // Fetch block hashes for the new earliest block of this batch + let new_earliest_block_hash = self + .block_hash_reader + .block_hash(end_block) + .inspect_err(|err| { + error!( + target: "trie::pruner", + block = end_block, + ?err, + "Failed to fetch block hash for new earliest block during pruning" + ) + })? + .ok_or(PrunerError::BlockNotFound(end_block))?; + + let parent_block_num = end_block - 1; + let parent_block_hash = self + .block_hash_reader + .block_hash(parent_block_num) + .inspect_err(|err| { + error!( + target: "trie::pruner", + block = parent_block_num, + ?err, + "Failed to fetch block hash for parent block during pruning" + ) + })? + .ok_or(PrunerError::BlockNotFound(parent_block_num))?; + + let block_with_parent = BlockWithParent { + parent: parent_block_hash, + block: BlockNumHash { number: end_block, hash: new_earliest_block_hash }, + }; + + // Commit this batch + let write_counts = + self.provider.prune_earliest_state(block_with_parent, batch_diff).await?; + + let duration = batch_start_time.elapsed(); + let batch_output = PrunerOutput { + duration, + fetch_duration, + prune_duration: duration.saturating_sub(fetch_duration), + start_block, + end_block, + write_counts, + }; + + // Record metrics for this batch + #[cfg(feature = "metrics")] + self.metrics.record_prune_result(batch_output.clone()); + + info!( + target: "trie::pruner", + ?batch_output, + "Finished pruning batch of proof storage", + ); + Ok(batch_output) + } + + /// Run the pruner + pub async fn run(&self) { + let res = self.run_inner().await; + if let Err(e) = res { + error!(target: "trie::pruner", err=%e, "Pruner failed"); + return; + } + info!(target: "trie::pruner", result = %res.unwrap(), "Finished pruning proof storage"); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::MdbxProofsStorage; + use alloy_eips::{BlockHashOrNumber, NumHash}; + use alloy_primitives::{BlockNumber, B256, U256}; + use mockall::mock; + use reth_primitives_traits::Account; + use reth_storage_errors::provider::ProviderResult; + use reth_trie::{ + hashed_cursor::HashedCursor, + trie_cursor::TrieCursor, + updates::{StorageTrieUpdates, TrieUpdates, TrieUpdatesSorted}, + BranchNodeCompact, HashedPostState, HashedStorage, Nibbles, + }; + use std::sync::Arc; + use tempfile::TempDir; + + mock! ( + #[derive(Debug)] + pub BlockHashReader {} + + impl BlockHashReader for BlockHashReader { + fn block_hash(&self, number: BlockNumber) -> ProviderResult>; + + fn convert_block_hash( + &self, + _hash_or_number: BlockHashOrNumber, + ) -> ProviderResult>; + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> ProviderResult>; + } + ); + + fn b256(n: u64) -> B256 { + use alloy_primitives::keccak256; + keccak256(n.to_be_bytes()) + } + + /// Build a block-with-parent for number `n` with deterministic hash. + fn block(n: u64, parent: B256) -> BlockWithParent { + BlockWithParent::new(parent, NumHash::new(n, b256(n))) + } + + #[tokio::test] + async fn run_inner_and_and_verify_updated_state() { + // --- env/store --- + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + store.set_earliest_block_number(0, B256::ZERO).await.expect("set earliest"); + + // --- entities --- + // accounts + let a1 = B256::from([0xA1; 32]); + let a2 = B256::from([0xA2; 32]); + let a3 = B256::from([0xA3; 32]); // introduced later + + // one storage address with 3 slots + let stor_addr = B256::from([0x10; 32]); + let s1 = B256::from([0xB1; 32]); + let s2 = B256::from([0xB2; 32]); + let s3 = B256::from([0xB3; 32]); + + // account-trie paths (p1 gets removed by block 3; p2 remains; p3 added later) + let p1 = Nibbles::from_nibbles_unchecked([0x01, 0x02]); + let p2 = Nibbles::from_nibbles_unchecked([0x03, 0x04]); + let p3 = Nibbles::from_nibbles_unchecked([0x05, 0x06]); + + let node_p1 = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::from([0x11; 32]))); + let node_p2 = BranchNodeCompact::new(0b10, 0, 0, vec![], Some(B256::from([0x22; 32]))); + let node_p3 = BranchNodeCompact::new(0b11, 0, 0, vec![], Some(B256::from([0x33; 32]))); + + // storage-trie paths (st1 removed by block 3; st2 remains; st3 added later) + let st1 = Nibbles::from_nibbles_unchecked([0x0A]); + let st2 = Nibbles::from_nibbles_unchecked([0x0B]); + let st3 = Nibbles::from_nibbles_unchecked([0x0C]); + + let node_st2 = BranchNodeCompact::new(0b101, 0, 0, vec![], Some(B256::from([0x44; 32]))); + let node_st3 = BranchNodeCompact::new(0b110, 0, 0, vec![], Some(B256::from([0x55; 32]))); + + // --- write 5 blocks manually --- + let mut parent = B256::ZERO; + + // Block 1: add a1,a2; s1=100, s2=200; add p1, st1 + { + let b1 = block(1, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a1, + Some(Account { nonce: 1, balance: U256::from(1_001), ..Default::default() }), + ); + d_post_state.accounts.insert( + a2, + Some(Account { nonce: 1, balance: U256::from(1_002), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s1, U256::from(100)); + hs.storage.insert(s2, U256::from(200)); + d_post_state.storages.insert(stor_addr, hs); + + d_trie_updates.account_nodes.insert(p1, node_p1.clone()); + let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); + e.storage_nodes.insert(st1, BranchNodeCompact::default()); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b1, d).await.expect("b1"); + parent = b256(1); + } + + // Block 2: update a2; add a3; s2=220, s3=300; add p2, st2 + { + let b2 = block(2, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a2, + Some(Account { nonce: 2, balance: U256::from(2_002), ..Default::default() }), + ); + d_post_state.accounts.insert( + a3, + Some(Account { nonce: 1, balance: U256::from(1_003), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s2, U256::from(220)); + hs.storage.insert(s3, U256::from(300)); + d_post_state.storages.insert(stor_addr, hs); + + d_trie_updates.account_nodes.insert(p2, node_p2.clone()); + let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); + e.storage_nodes.insert(st2, node_st2.clone()); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b2, d).await.expect("b2"); + parent = b256(2); + } + + // Block 3: delete a1; leave a2,a3; remove p1; remove st1 (storage-trie) + { + let b3 = block(3, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + // delete a1, keep a2 & a3 values unchanged for this block + d_post_state.accounts.insert(a1, None); + + // remove account trie node p1 + d_trie_updates.removed_nodes.insert(p1); + + // remove storage-trie node st1 + let mut st_upd = StorageTrieUpdates::default(); + st_upd.removed_nodes.insert(st1); + d_trie_updates.storage_tries.insert(stor_addr, st_upd); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b3, d).await.expect("b3"); + parent = b256(3); + } + + // Block 4 (kept): update a2; s1=140; add p3, st3 + { + let b4 = block(4, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a2, + Some(Account { nonce: 3, balance: U256::from(3_002), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s1, U256::from(140)); + d_post_state.storages.insert(stor_addr, hs); + d_trie_updates.account_nodes.insert(p3, node_p3.clone()); + let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); + e.storage_nodes.insert(st3, node_st3.clone()); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b4, d).await.expect("b4"); + parent = b256(4); + } + + // Block 5 (kept): update a3; s3=330 + { + let b5 = block(5, parent); + + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a3, + Some(Account { nonce: 2, balance: U256::from(2_003), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s3, U256::from(330)); + d_post_state.storages.insert(stor_addr, hs); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: TrieUpdatesSorted::default(), + }; + store.store_trie_updates(b5, d).await.expect("b5"); + } + + // sanity: earliest=0, latest=5 + { + let e = store.get_earliest_block_number().await.expect("earliest").expect("some"); + let l = store.get_latest_block_number().await.expect("latest").expect("some"); + assert_eq!(e.0, 0); + assert_eq!(l.0, 5); + } + + // --- prune: remove the first 3 blocks, keep 4 and 5 + // new_earliest = 5-1 = 4 + let mut block_hash_reader = MockBlockHashReader::new(); + block_hash_reader + .expect_block_hash() + .withf(move |block_num| *block_num == 4) + .returning(move |_| Ok(Some(b256(4)))); + + block_hash_reader + .expect_block_hash() + .withf(move |block_num| *block_num == 3) + .returning(move |_| Ok(Some(b256(3)))); + + let pruner = OpProofStoragePruner::new(store.clone(), block_hash_reader, 1, 1000); + let out = pruner.run_inner().await.expect("pruner ok"); + assert_eq!(out.start_block, 0); + assert_eq!(out.end_block, 4, "pruned up to 4 (inclusive); new earliest is 4"); + + // proof window moved: earliest=4, latest=5 + { + let e = store.get_earliest_block_number().await.expect("earliest").expect("some"); + let l = store.get_latest_block_number().await.expect("latest").expect("some"); + assert_eq!(e.0, 4); + assert_eq!(e.1, b256(4)); + assert_eq!(l.0, 5); + assert_eq!(l.1, b256(5)); + } + + // --- DB checks + let mut acc_cur = store.account_hashed_cursor(4).expect("acc cur"); + let mut stor_cur = store.storage_hashed_cursor(stor_addr, 4).expect("stor cur"); + let mut acc_trie_cur = store.account_trie_cursor(4).expect("acc trie cur"); + let mut stor_trie_cur = store.storage_trie_cursor(stor_addr, 4).expect("stor trie cur"); + + // Check these histories have been removed + let pruned_hashed_account = a1; + let pruned_trie_accounts = p1; + let pruned_trie_storage = st1; + + assert_ne!( + acc_cur.seek(pruned_hashed_account).expect("seek").unwrap().0, + pruned_hashed_account, + "deleted account must not exist in earliest snapshot" + ); + assert_ne!( + acc_trie_cur.seek(pruned_trie_accounts).expect("seek").unwrap().0, + pruned_trie_accounts, + "deleted account trie must not exist in earliest snapshot" + ); + assert_ne!( + stor_trie_cur.seek(pruned_trie_storage).expect("seek").unwrap().0, + pruned_trie_storage, + "deleted storage trie must not exist in earliest snapshot" + ); + + // Check these histories have been updated - till block 4 + let updated_hashed_accounts = vec![ + (a2, Account { nonce: 3, balance: U256::from(3_002), ..Default::default() }), /* block 4 */ + (a3, Account { nonce: 1, balance: U256::from(1_003), ..Default::default() }), /* block 2 */ + ]; + let updated_hashed_storage = vec![ + (s1, U256::from(140)), // block 4 + (s2, U256::from(220)), // block 2 + (s3, U256::from(300)), // block 2 + ]; + let updated_trie_accounts = vec![ + (p2, node_p2.clone()), // block 2 + (p3, node_p3.clone()), // block 4 + ]; + let updated_trie_storage = vec![ + (st2, node_st2.clone()), // block 2 + (st3, node_st3.clone()), // block 4 + ]; + + for (key, val) in updated_hashed_accounts { + let (k, vv) = acc_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + + for (key, val) in updated_hashed_storage { + let (k, vv) = stor_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + + for (key, val) in updated_trie_accounts { + let (k, vv) = acc_trie_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + for (key, val) in updated_trie_storage { + let (k, vv) = stor_trie_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + } + + // Both latest and earliest blocks are None -> early return default; DB untouched. + #[tokio::test] + async fn run_inner_where_latest_block_is_none() { + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + let earliest = store.get_earliest_block_number().await.unwrap(); + let latest = store.get_latest_block_number().await.unwrap(); + println!("{:?} {:?}", earliest, latest); + assert!(earliest.is_none()); + assert!(latest.is_none()); + + let block_hash_reader = MockBlockHashReader::new(); + let pruner = OpProofStoragePruner::new(store, block_hash_reader, 10, 1000); + let out = pruner.run_inner().await.expect("ok"); + assert_eq!(out, PrunerOutput::default(), "should early-return default output"); + } + + // The earliest block is None, but the latest block exists -> early return default. + #[tokio::test] + async fn run_inner_earliest_none_real_db() { + use crate::BlockStateDiff; + + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + // Write a single block to set *latest* only. + store + .store_trie_updates(block(3, B256::ZERO), BlockStateDiff::default()) + .await + .expect("store b1"); + + let earliest = store.get_earliest_block_number().await.unwrap(); + let latest = store.get_latest_block_number().await.unwrap(); + assert!(earliest.is_none(), "earliest must remain None"); + assert_eq!(latest.unwrap().0, 3); + + let block_hash_reader = MockBlockHashReader::new(); + let pruner = OpProofStoragePruner::new(store, block_hash_reader, 1, 1000); + let out = pruner.run_inner().await.expect("ok"); + assert_eq!(out, PrunerOutput::default(), "should early-return default output"); + } + + // interval < min_block_interval -> "Nothing to prune" path; default output. + #[tokio::test] + async fn run_inner_interval_too_small_real_db() { + use crate::BlockStateDiff; + + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + // Set earliest=4 explicitly + let earliest_num = 4u64; + let h4 = b256(4); + store.set_earliest_block_number(earliest_num, h4).await.expect("set earliest"); + + // Set latest=5 by storing block 5 + let b5 = block(5, h4); + store.store_trie_updates(b5, BlockStateDiff::default()).await.expect("store b5"); + + // Sanity: earliest=4, latest=5 => interval=1 + let e = store.get_earliest_block_number().await.unwrap().unwrap(); + let l = store.get_latest_block_number().await.unwrap().unwrap(); + assert_eq!(e.0, 4); + assert_eq!(l.0, 5); + + // Require min_block_interval=2 (or greater) so interval < min + let block_hash_reader = MockBlockHashReader::new(); + let pruner = OpProofStoragePruner::new(store, block_hash_reader, 2, 1000); + let out = pruner.run_inner().await.expect("ok"); + assert_eq!(out, PrunerOutput::default(), "no pruning should occur"); + } +} diff --git a/crates/optimism/trie/src/prune/task.rs b/crates/optimism/trie/src/prune/task.rs new file mode 100644 index 00000000000..920dc9d8cf6 --- /dev/null +++ b/crates/optimism/trie/src/prune/task.rs @@ -0,0 +1,64 @@ +use crate::{prune::OpProofStoragePruner, OpProofsStorage, OpProofsStore}; +use reth_provider::BlockHashReader; +use reth_tasks::shutdown::GracefulShutdown; +use tokio::{ + time, + time::{Duration, MissedTickBehavior}, +}; +use tracing::info; + +const PRUNE_BATCH_SIZE: u64 = 200; + +/// Periodic pruner task: constructs the pruner and runs it every interval. +#[derive(Debug)] +pub struct OpProofStoragePrunerTask { + pruner: OpProofStoragePruner, + min_block_interval: u64, + task_run_interval: Duration, +} + +impl OpProofStoragePrunerTask +where + P: OpProofsStore, + H: BlockHashReader, +{ + /// Initialize a new [`OpProofStoragePrunerTask`] + pub fn new( + provider: OpProofsStorage

, + hash_reader: H, + min_block_interval: u64, + task_run_interval: Duration, + ) -> Self { + let pruner = + OpProofStoragePruner::new(provider, hash_reader, min_block_interval, PRUNE_BATCH_SIZE); + Self { pruner, min_block_interval, task_run_interval } + } + + /// Run forever (until `cancel`), executing one prune pass per `task_run_interval`. + pub async fn run(self, mut signal: GracefulShutdown) { + info!( + target: "trie::pruner_task", + min_block_interval = self.min_block_interval, + interval_secs = self.task_run_interval.as_secs(), + "Starting pruner task" + ); + + // Drive pruning with a periodic ticker + let mut interval = time::interval(self.task_run_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + tokio::select! { + _ = &mut signal => { + info!(target: "trie::pruner_task", "Pruner task cancelled; exiting"); + break; + } + _ = interval.tick() => { + self.pruner.run().await + } + } + } + + info!(target: "trie::pruner_task", "Pruner task stopped"); + } +} diff --git a/crates/optimism/trie/tests/lib.rs b/crates/optimism/trie/tests/lib.rs new file mode 100644 index 00000000000..7bd13ccf582 --- /dev/null +++ b/crates/optimism/trie/tests/lib.rs @@ -0,0 +1,2005 @@ +//! Common test suite for [`OpProofsStore`] implementations. + +use alloy_eips::{eip1898::BlockWithParent, NumHash}; +use alloy_primitives::{map::HashMap, B256, U256}; +use reth_optimism_trie::{ + db::MdbxProofsStorage, BlockStateDiff, InMemoryProofsStorage, OpProofsStorageError, + OpProofsStore, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::HashedCursor, + trie_cursor::TrieCursor, + updates::{TrieUpdates, TrieUpdatesSorted}, + BranchNodeCompact, HashedPostState, HashedPostStateSorted, HashedStorage, Nibbles, TrieMask, +}; +use serial_test::serial; +use std::sync::Arc; +use tempfile::TempDir; +use test_case::test_case; + +/// Helper to create a simple test branch node +fn create_test_branch() -> BranchNodeCompact { + let mut state_mask = TrieMask::default(); + state_mask.set_bit(0); + state_mask.set_bit(1); + + BranchNodeCompact { + state_mask, + tree_mask: TrieMask::default(), + hash_mask: TrieMask::default(), + hashes: Arc::new(vec![]), + root_hash: None, + } +} + +/// Helper to create a variant test branch node for comparison tests +fn create_test_branch_variant() -> BranchNodeCompact { + let mut state_mask = TrieMask::default(); + state_mask.set_bit(5); + state_mask.set_bit(6); + + BranchNodeCompact { + state_mask, + tree_mask: TrieMask::default(), + hash_mask: TrieMask::default(), + hashes: Arc::new(vec![]), + root_hash: None, + } +} + +/// Helper to create nibbles from a vector of u8 values +fn nibbles_from(vec: Vec) -> Nibbles { + Nibbles::from_nibbles_unchecked(vec) +} + +/// Helper to create a test account +fn create_test_account() -> Account { + Account { + nonce: 42, + balance: U256::from(1000000), + bytecode_hash: Some(B256::repeat_byte(0xBB)), + } +} + +/// Helper to create a test account with custom values +fn create_test_account_with_values(nonce: u64, balance: u64, code_hash_byte: u8) -> Account { + Account { + nonce, + balance: U256::from(balance), + bytecode_hash: Some(B256::repeat_byte(code_hash_byte)), + } +} + +fn create_mdbx_proofs_storage() -> MdbxProofsStorage { + let path = TempDir::new().unwrap(); + MdbxProofsStorage::new(path.path()).unwrap() +} + +/// Test basic storage and retrieval of earliest block number +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_earliest_block_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Initially should be None + let earliest = storage.get_earliest_block_number().await?; + assert!(earliest.is_none()); + + // Set earliest block + let block_hash = B256::repeat_byte(0x42); + storage.set_earliest_block_number(100, block_hash).await?; + + // Should retrieve the same values + let earliest = storage.get_earliest_block_number().await?; + assert_eq!(earliest, Some((100, block_hash))); + + Ok(()) +} + +/// Test storing and retrieving trie updates +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_trie_updates_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + let sorted_trie_updates = TrieUpdatesSorted::default(); + let sorted_post_state = HashedPostStateSorted::default(); + let block_state_diff = BlockStateDiff { + sorted_trie_updates: sorted_trie_updates.clone(), + sorted_post_state: sorted_post_state.clone(), + }; + + // Store trie updates + storage.store_trie_updates(block_ref, block_state_diff).await?; + + // Retrieve and verify + let retrieved_diff = storage.fetch_trie_updates(block_ref.block.number).await?; + assert_eq!(retrieved_diff.sorted_trie_updates, sorted_trie_updates); + assert_eq!(retrieved_diff.sorted_post_state, sorted_post_state); + + Ok(()) +} + +// ============================================================================= +// 1. Basic Cursor Operations +// ============================================================================= + +/// Test cursor operations on empty trie +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_cursor_empty_trie(storage: S) -> Result<(), OpProofsStorageError> { + let mut cursor = storage.account_trie_cursor(100)?; + + // All operations should return None on empty trie + assert!(cursor.seek_exact(Nibbles::default())?.is_none()); + assert!(cursor.seek(Nibbles::default())?.is_none()); + assert!(cursor.next()?.is_none()); + assert!(cursor.current()?.is_none()); + + Ok(()) +} + +/// Test cursor operations with single entry +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_cursor_single_entry( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + // Store single entry + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + + // Test seek_exact + let result = cursor.seek_exact(path)?.unwrap(); + assert_eq!(result.0, path); + + // Test current position + assert_eq!(cursor.current()?.unwrap(), path); + + // Test next from end should return None + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test cursor operations with multiple entries +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_cursor_multiple_entries( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![ + nibbles_from(vec![1]), + nibbles_from(vec![1, 2]), + nibbles_from(vec![2]), + nibbles_from(vec![2, 3]), + ]; + let branch = create_test_branch(); + + // Store multiple entries + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + + // Test that we can iterate through all entries + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor.next()? { + found_paths.push(path); + } + + assert_eq!(found_paths.len(), 4); + // Paths should be in lexicographic order + for i in 0..paths.len() { + assert_eq!(found_paths[i], paths[i]); + } + + Ok(()) +} + +// ============================================================================= +// 2. Seek Operations +// ============================================================================= + +/// Test `seek_exact` with existing path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_exact_existing_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + let result = cursor.seek_exact(path)?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +/// Test `seek_exact` with non-existing path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_exact_non_existing_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + let non_existing = nibbles_from(vec![4, 5, 6]); + assert!(cursor.seek_exact(non_existing)?.is_none()); + + Ok(()) +} + +/// Test `seek_exact` with empty path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_exact_empty_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + let result = cursor.seek_exact(Nibbles::default())?.unwrap(); + assert_eq!(result.0, Nibbles::default()); + + Ok(()) +} + +/// Test seek to existing path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_to_existing_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + let result = cursor.seek(path)?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +/// Test seek between existing nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_between_existing_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path1, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + // Seek to path between 1 and 3, should return path 3 + let seek_path = nibbles_from(vec![2]); + let result = cursor.seek(seek_path)?.unwrap(); + assert_eq!(result.0, path2); + + Ok(()) +} + +/// Test seek after all nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_after_all_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + // Seek to path after all nodes + let seek_path = nibbles_from(vec![9]); + assert!(cursor.seek(seek_path)?.is_none()); + + Ok(()) +} + +/// Test seek before all nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_seek_before_all_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![5]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + // Seek to path before all nodes, should return first node + let seek_path = nibbles_from(vec![1]); + let result = cursor.seek(seek_path)?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +// ============================================================================= +// 3. Navigation Tests +// ============================================================================= + +/// Test next without prior seek +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_next_without_prior_seek( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + // next() without prior seek should start from beginning + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +/// Test next after seek +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_next_after_seek(storage: S) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![2]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path1, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + cursor.seek(path1)?; + + // next() should return second node + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, path2); + + Ok(()) +} + +/// Test next at end of trie +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_next_at_end_of_trie( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + cursor.seek(path)?; + + // next() at end should return None + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test multiple consecutive next calls +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_multiple_consecutive_next( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![nibbles_from(vec![1]), nibbles_from(vec![2]), nibbles_from(vec![3])]; + let branch = create_test_branch(); + + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + + // Iterate through all with consecutive next() calls + for expected_path in &paths { + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, *expected_path); + } + + // Final next() should return None + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test current after operations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_current_after_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![2]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path1, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + + let mut cursor = storage.account_trie_cursor(100)?; + + // Current should be None initially + assert!(cursor.current()?.is_none()); + + // After seek, current should track position + cursor.seek(path1)?; + assert_eq!(cursor.current()?.unwrap(), path1); + + // After next, current should update + cursor.next()?; + assert_eq!(cursor.current()?.unwrap(), path2); + + Ok(()) +} + +/// Test current with no prior operations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_current_no_prior_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + let mut cursor = storage.account_trie_cursor(100)?; + + // Current should be None when no operations performed + assert!(cursor.current()?.is_none()); + + Ok(()) +} + +// ============================================================================= +// 4. Block Number Filtering +// ============================================================================= + +/// Test same path with different blocks +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_same_path_different_blocks( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let branch1 = create_test_branch(); + let branch2 = create_test_branch_variant(); + + // Store same path at different blocks + storage.store_account_branches(vec![(path, Some(branch1.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch2.clone()))]).await?; + + // Cursor with max_block_number=75 should see only block 50 data + let mut cursor75 = storage.account_trie_cursor(75)?; + let result75 = cursor75.seek_exact(path)?.unwrap(); + assert_eq!(result75.0, path); + + // Cursor with max_block_number=150 should see block 100 data (latest) + let mut cursor150 = storage.account_trie_cursor(150)?; + let result150 = cursor150.seek_exact(path)?.unwrap(); + assert_eq!(result150.0, path); + + Ok(()) +} + +/// Test deleted branch nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_deleted_branch_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let branch = create_test_branch(); + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + + // Store branch node, then delete it (store None) + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + // Cursor before deletion should see the node + let mut cursor75 = storage.account_trie_cursor(75)?; + assert!(cursor75.seek_exact(path)?.is_some()); + + let mut block_state_diff_trie_updates = TrieUpdates::default(); + block_state_diff_trie_updates.removed_nodes.insert(path); + let block_state_diff = BlockStateDiff { + sorted_trie_updates: block_state_diff_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + storage.store_trie_updates(block_ref, block_state_diff).await?; + + // Cursor after deletion should not see the node + let mut cursor150 = storage.account_trie_cursor(150)?; + assert!(cursor150.seek_exact(path)?.is_none()); + + Ok(()) +} + +// ============================================================================= +// 5. Hashed Address Filtering +// ============================================================================= + +/// Test account-specific cursor +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_account_specific_cursor( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let addr1 = B256::repeat_byte(0x01); + let addr2 = B256::repeat_byte(0x02); + let branch = create_test_branch(); + + // Store same path for different accounts (using storage branches) + storage.store_storage_branches(addr1, vec![(path, Some(branch.clone()))]).await?; + storage.store_storage_branches(addr2, vec![(path, Some(branch.clone()))]).await?; + + // Cursor for addr1 should only see addr1 data + let mut cursor1 = storage.storage_trie_cursor(addr1, 100)?; + let result1 = cursor1.seek_exact(path)?.unwrap(); + assert_eq!(result1.0, path); + + // Cursor for addr2 should only see addr2 data + let mut cursor2 = storage.storage_trie_cursor(addr2, 100)?; + let result2 = cursor2.seek_exact(path)?.unwrap(); + assert_eq!(result2.0, path); + + // Cursor for addr1 should not see addr2 data when iterating + let mut cursor1_iter = storage.storage_trie_cursor(addr1, 100)?; + let mut found_count = 0; + while cursor1_iter.next()?.is_some() { + found_count += 1; + } + assert_eq!(found_count, 1); // Should only see one entry (for addr1) + + Ok(()) +} + +/// Test state trie cursor +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_state_trie_cursor(storage: S) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let addr = B256::repeat_byte(0x01); + let branch = create_test_branch(); + + // Store data for account trie and state trie + storage.store_storage_branches(addr, vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + + // State trie cursor (None address) should only see state trie data + let mut state_cursor = storage.account_trie_cursor(100)?; + let result = state_cursor.seek_exact(path)?.unwrap(); + assert_eq!(result.0, path); + + // Verify state cursor doesn't see account data when iterating + let mut state_cursor_iter = storage.account_trie_cursor(100)?; + let mut found_count = 0; + while state_cursor_iter.next()?.is_some() { + found_count += 1; + } + + assert_eq!(found_count, 1); // Should only see state trie entry + + Ok(()) +} + +/// Test mixed account and state data +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_mixed_account_state_data( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![2]); + let addr = B256::repeat_byte(0x01); + let branch = create_test_branch(); + + // Store mixed account and state trie data + storage.store_storage_branches(addr, vec![(path1, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + + // Account cursor should only see account data + let mut account_cursor = storage.storage_trie_cursor(addr, 100)?; + let mut account_paths = Vec::new(); + while let Some((path, _)) = account_cursor.next()? { + account_paths.push(path); + } + assert_eq!(account_paths.len(), 1); + assert_eq!(account_paths[0], path1); + + // State cursor should only see state data + let mut state_cursor = storage.account_trie_cursor(100)?; + let mut state_paths = Vec::new(); + while let Some((path, _)) = state_cursor.next()? { + state_paths.push(path); + } + assert_eq!(state_paths.len(), 1); + assert_eq!(state_paths[0], path2); + + Ok(()) +} + +// ============================================================================= +// 6. Path Ordering Tests +// ============================================================================= + +/// Test lexicographic ordering +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_lexicographic_ordering( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![ + nibbles_from(vec![3, 1]), + nibbles_from(vec![1, 2]), + nibbles_from(vec![2]), + nibbles_from(vec![1]), + ]; + let branch = create_test_branch(); + + // Store paths in random order + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor.next()? { + found_paths.push(path); + } + + // Should be returned in lexicographic order: [1], [1,2], [2], [3,1] + let expected_order = vec![ + nibbles_from(vec![1]), + nibbles_from(vec![1, 2]), + nibbles_from(vec![2]), + nibbles_from(vec![3, 1]), + ]; + + assert_eq!(found_paths, expected_order); + + Ok(()) +} + +/// Test path prefix scenarios +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_path_prefix_scenarios( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![ + nibbles_from(vec![1]), // Prefix of next + nibbles_from(vec![1, 2]), // Extends first + nibbles_from(vec![1, 2, 3]), // Extends second + ]; + let branch = create_test_branch(); + + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + + // Seek to prefix should find exact match + let result = cursor.seek_exact(paths[0])?.unwrap(); + assert_eq!(result.0, paths[0]); + + // Next should go to next path, not skip prefixed paths + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, paths[1]); + + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, paths[2]); + + Ok(()) +} + +/// Test complex nibble combinations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_complex_nibble_combinations( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Test various nibble patterns including edge values + let paths = vec![ + nibbles_from(vec![0]), + nibbles_from(vec![0, 15]), + nibbles_from(vec![15]), + nibbles_from(vec![15, 0]), + nibbles_from(vec![7, 8, 9]), + ]; + let branch = create_test_branch(); + + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor.next()? { + found_paths.push(path); + } + + // All paths should be found and in correct order + assert_eq!(found_paths.len(), 5); + + // Verify specific ordering for edge cases + assert_eq!(found_paths[0], nibbles_from(vec![0])); + assert_eq!(found_paths[1], nibbles_from(vec![0, 15])); + assert_eq!(found_paths[4], nibbles_from(vec![15, 0])); + + Ok(()) +} + +// ============================================================================= +// 7. Leaf Node Tests (Hashed Accounts and Storage) +// ============================================================================= + +/// Test store and retrieve single account +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_store_and_retrieve_single_account( + storage: S, +) -> Result<(), OpProofsStorageError> { + let account_key = B256::repeat_byte(0x01); + let account = create_test_account(); + + // Store account + storage.store_hashed_accounts(vec![(account_key, Some(account))]).await?; + + // Retrieve via cursor + let mut cursor = storage.account_hashed_cursor(100)?; + let result = cursor.seek(account_key)?.unwrap(); + + assert_eq!(result.0, account_key); + assert_eq!(result.1.nonce, account.nonce); + assert_eq!(result.1.balance, account.balance); + assert_eq!(result.1.bytecode_hash, account.bytecode_hash); + + Ok(()) +} + +/// Test account cursor navigation +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_account_cursor_navigation( + storage: S, +) -> Result<(), OpProofsStorageError> { + let accounts = [ + (B256::repeat_byte(0x01), create_test_account()), + (B256::repeat_byte(0x03), create_test_account()), + (B256::repeat_byte(0x05), create_test_account()), + ]; + + // Store accounts + let accounts_to_store: Vec<_> = accounts.iter().map(|(k, v)| (*k, Some(*v))).collect(); + storage.store_hashed_accounts(accounts_to_store).await?; + + let mut cursor = storage.account_hashed_cursor(100)?; + + // Test seeking to exact key + let result = cursor.seek(accounts[1].0)?.unwrap(); + assert_eq!(result.0, accounts[1].0); + + // Test seeking to key that doesn't exist (should return next greater) + let seek_key = B256::repeat_byte(0x02); + let result = cursor.seek(seek_key)?.unwrap(); + assert_eq!(result.0, accounts[1].0); // Should find 0x03 + + // Test next() navigation + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, accounts[2].0); // Should find 0x05 + + // Test next() at end + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test account block versioning +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_account_block_versioning( + storage: S, +) -> Result<(), OpProofsStorageError> { + let account_key = B256::repeat_byte(0x01); + let account_v1 = create_test_account_with_values(1, 100, 0xBB); + let account_v2 = create_test_account_with_values(2, 200, 0xDD); + + // Store account at different blocks + storage.store_hashed_accounts(vec![(account_key, Some(account_v1))]).await?; + + // Cursor with max_block_number=75 should see v1 + let mut cursor75 = storage.account_hashed_cursor(75)?; + let result75 = cursor75.seek(account_key)?.unwrap(); + assert_eq!(result75.1.nonce, account_v1.nonce); + assert_eq!(result75.1.balance, account_v1.balance); + + storage.store_hashed_accounts(vec![(account_key, Some(account_v2))]).await?; + + // After update, Cursor with max_block_number=150 should see v2 + let mut cursor150 = storage.account_hashed_cursor(150)?; + let result150 = cursor150.seek(account_key)?.unwrap(); + assert_eq!(result150.1.nonce, account_v2.nonce); + assert_eq!(result150.1.balance, account_v2.balance); + + Ok(()) +} + +/// Test store and retrieve storage +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +async fn test_store_and_retrieve_storage( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), + (B256::repeat_byte(0x20), U256::from(200)), + (B256::repeat_byte(0x30), U256::from(300)), + ]; + + // Store storage slots + storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + + // Retrieve via cursor + let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + + // Test seeking to each slot + for (key, expected_value) in &storage_slots { + let result = cursor.seek(*key)?.unwrap(); + assert_eq!(result.0, *key); + assert_eq!(result.1, *expected_value); + } + + Ok(()) +} + +/// Test storage cursor navigation +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_storage_cursor_navigation( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), + (B256::repeat_byte(0x30), U256::from(300)), + (B256::repeat_byte(0x50), U256::from(500)), + ]; + + storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + + let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + + // Start from beginning with next() + let mut found_slots = Vec::new(); + while let Some((key, value)) = cursor.next()? { + found_slots.push((key, value)); + } + + assert_eq!(found_slots.len(), 3); + assert_eq!(found_slots[0], storage_slots[0]); + assert_eq!(found_slots[1], storage_slots[1]); + assert_eq!(found_slots[2], storage_slots[2]); + + Ok(()) +} + +/// Test storage account isolation +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_storage_account_isolation( + storage: S, +) -> Result<(), OpProofsStorageError> { + let address1 = B256::repeat_byte(0x01); + let address2 = B256::repeat_byte(0x02); + let storage_key = B256::repeat_byte(0x10); + + // Store same storage key for different accounts + storage.store_hashed_storages(address1, vec![(storage_key, U256::from(100))]).await?; + storage.store_hashed_storages(address2, vec![(storage_key, U256::from(200))]).await?; + + // Verify each account sees only its own storage + let mut cursor1 = storage.storage_hashed_cursor(address1, 100)?; + let result1 = cursor1.seek(storage_key)?.unwrap(); + assert_eq!(result1.1, U256::from(100)); + + let mut cursor2 = storage.storage_hashed_cursor(address2, 100)?; + let result2 = cursor2.seek(storage_key)?.unwrap(); + assert_eq!(result2.1, U256::from(200)); + + // Verify cursor1 doesn't see address2's storage + let mut cursor1_iter = storage.storage_hashed_cursor(address1, 100)?; + let mut count = 0; + while cursor1_iter.next()?.is_some() { + count += 1; + } + assert_eq!(count, 1); // Should only see one entry + + Ok(()) +} + +/// Test storage block versioning +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_storage_block_versioning( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_key = B256::repeat_byte(0x10); + + // Store storage at different blocks + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))]).await?; + + // Cursor with max_block_number=75 should see old value + let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; + let result75 = cursor75.seek(storage_key)?.unwrap(); + assert_eq!(result75.1, U256::from(100)); + + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(200))]).await?; + // Cursor with max_block_number=150 should see new value + let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; + let result150 = cursor150.seek(storage_key)?.unwrap(); + assert_eq!(result150.1, U256::from(200)); + + Ok(()) +} + +/// Test storage zero value deletion +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_storage_zero_value_deletion( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_key = B256::repeat_byte(0x10); + + // Store non-zero value + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))]).await?; + + // Cursor before deletion should see the value + let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; + let result75 = cursor75.seek(storage_key)?.unwrap(); + assert_eq!(result75.1, U256::from(100)); + + // "Delete" by storing zero value at block 100 + let mut block_state_diff_post_state = HashedPostState::default(); + let mut hashed_storage = HashedStorage::default(); + hashed_storage.storage.insert(storage_key, U256::ZERO); + block_state_diff_post_state.storages.insert(hashed_address, hashed_storage); + + let block_ref: BlockWithParent = + BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + let block_state_diff = BlockStateDiff { + sorted_trie_updates: TrieUpdatesSorted::default(), + sorted_post_state: block_state_diff_post_state.into_sorted(), + }; + storage.store_trie_updates(block_ref, block_state_diff).await?; + + // Cursor after deletion should NOT see the entry (zero values are skipped) + let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; + let result150 = cursor150.seek(storage_key)?; + assert!(result150.is_none(), "Zero values should be skipped/deleted"); + + Ok(()) +} + +/// Test that zero values are skipped during iteration +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_storage_cursor_skips_zero_values( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + + // Create a mix of non-zero and zero value storage slots + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), // Non-zero + (B256::repeat_byte(0x20), U256::ZERO), // Zero value - should be skipped + (B256::repeat_byte(0x30), U256::from(300)), // Non-zero + (B256::repeat_byte(0x40), U256::ZERO), // Zero value - should be skipped + (B256::repeat_byte(0x50), U256::from(500)), // Non-zero + ]; + + // Store all slots + storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + + // Create cursor and iterate through all entries + let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + let mut found_slots = Vec::new(); + while let Some((key, value)) = cursor.next()? { + found_slots.push((key, value)); + } + + // Should only find 3 non-zero values + assert_eq!(found_slots.len(), 3, "Zero values should be skipped during iteration"); + + // Verify the non-zero values are the ones we stored + assert_eq!(found_slots[0], (B256::repeat_byte(0x10), U256::from(100))); + assert_eq!(found_slots[1], (B256::repeat_byte(0x30), U256::from(300))); + assert_eq!(found_slots[2], (B256::repeat_byte(0x50), U256::from(500))); + + // Verify seeking to a zero-value slot returns None or skips to next non-zero + let mut seek_cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + let seek_result = seek_cursor.seek(B256::repeat_byte(0x20))?; + + // Should either return None or skip to the next non-zero value (0x30) + if let Some((key, value)) = seek_result { + assert_eq!(key, B256::repeat_byte(0x30), "Should skip zero value and find next non-zero"); + assert_eq!(value, U256::from(300)); + } + + Ok(()) +} + +/// Test empty cursors +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_empty_cursors(storage: S) -> Result<(), OpProofsStorageError> { + // Test empty account cursor + let mut account_cursor = storage.account_hashed_cursor(100)?; + assert!(account_cursor.seek(B256::repeat_byte(0x01))?.is_none()); + assert!(account_cursor.next()?.is_none()); + + // Test empty storage cursor + let mut storage_cursor = storage.storage_hashed_cursor(B256::repeat_byte(0x01), 100)?; + assert!(storage_cursor.seek(B256::repeat_byte(0x10))?.is_none()); + assert!(storage_cursor.next()?.is_none()); + + Ok(()) +} + +/// Test cursor boundary conditions +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_cursor_boundary_conditions( + storage: S, +) -> Result<(), OpProofsStorageError> { + let account_key = B256::repeat_byte(0x80); // Middle value + let account = create_test_account(); + + storage.store_hashed_accounts(vec![(account_key, Some(account))]).await?; + + let mut cursor = storage.account_hashed_cursor(100)?; + + // Seek to minimum key should find our account + let result = cursor.seek(B256::ZERO)?.unwrap(); + assert_eq!(result.0, account_key); + + // Seek to maximum key should find nothing + assert!(cursor.seek(B256::repeat_byte(0xFF))?.is_none()); + + // Seek to key just before our account should find our account + let just_before = B256::repeat_byte(0x7F); + let result = cursor.seek(just_before)?.unwrap(); + assert_eq!(result.0, account_key); + + Ok(()) +} + +/// Test large batch operations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_large_batch_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Create large batch of accounts + let mut accounts = Vec::new(); + for i in 0..100 { + let key = B256::from([i as u8; 32]); + let account = create_test_account_with_values(i, i * 1000, (i + 1) as u8); + accounts.push((key, Some(account))); + } + + // Store in batch + storage.store_hashed_accounts(accounts.clone()).await?; + + // Verify all accounts can be retrieved + let mut cursor = storage.account_hashed_cursor(100)?; + let mut found_count = 0; + while cursor.next()?.is_some() { + found_count += 1; + } + assert_eq!(found_count, 100); + + // Test specific account retrieval + let test_key = B256::from([42u8; 32]); + let result = cursor.seek(test_key)?.unwrap(); + assert_eq!(result.0, test_key); + assert_eq!(result.1.nonce, 42); + + Ok(()) +} + +/// Test wiped storage in [`HashedPostState`] +/// +/// When `store_trie_updates` receives a [`HashedPostState`] with wiped=true for a storage entry, +/// it should iterate all existing values for that address and create deletion entries for them. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_store_trie_updates_with_wiped_storage( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::HashedStorage; + + let hashed_address = B256::repeat_byte(0x01); + let block_ref: BlockWithParent = + BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + + // First, store some storage values at block 50 + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), + (B256::repeat_byte(0x20), U256::from(200)), + (B256::repeat_byte(0x30), U256::from(300)), + (B256::repeat_byte(0x40), U256::from(400)), + ]; + + storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + + // Verify all values are present at block 75 + let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; + let mut found_slots = Vec::new(); + while let Some((key, value)) = cursor75.next()? { + found_slots.push((key, value)); + } + assert_eq!(found_slots.len(), 4, "All storage slots should be present before wipe"); + assert_eq!(found_slots[0], (B256::repeat_byte(0x10), U256::from(100))); + assert_eq!(found_slots[1], (B256::repeat_byte(0x20), U256::from(200))); + assert_eq!(found_slots[2], (B256::repeat_byte(0x30), U256::from(300))); + assert_eq!(found_slots[3], (B256::repeat_byte(0x40), U256::from(400))); + + // Now create a HashedPostState with wiped=true for this address at block 100 + let mut post_state = HashedPostState::default(); + let wiped_storage = HashedStorage::new(true); // wiped=true, empty storage map + post_state.storages.insert(hashed_address, wiped_storage); + + let block_state_diff = BlockStateDiff { + sorted_trie_updates: TrieUpdatesSorted::default(), + sorted_post_state: post_state.into_sorted(), + }; + + // Store the wiped state + storage.store_trie_updates(block_ref, block_state_diff).await?; + + // After wiping, cursor at block 150 should see NO storage values + let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; + let mut found_slots_after_wipe = Vec::new(); + while let Some((key, value)) = cursor150.next()? { + found_slots_after_wipe.push((key, value)); + } + + assert_eq!( + found_slots_after_wipe.len(), + 0, + "All storage slots should be deleted after wipe. Found: {:?}", + found_slots_after_wipe + ); + + // Verify individual seeks also return None + for (slot, _) in &storage_slots { + let mut seek_cursor = storage.storage_hashed_cursor(hashed_address, 150)?; + let result = seek_cursor.seek(*slot)?; + assert!( + result.is_none() || result.unwrap().0 != *slot, + "Storage slot {:?} should be deleted after wipe", + slot + ); + } + + // Verify cursor at block 75 (before wipe) still sees all values + let mut cursor75_after = storage.storage_hashed_cursor(hashed_address, 75)?; + let mut found_slots_before_wipe = Vec::new(); + while let Some((key, value)) = cursor75_after.next()? { + found_slots_before_wipe.push((key, value)); + } + assert_eq!( + found_slots_before_wipe.len(), + 4, + "All storage slots should still be present when querying before wipe block" + ); + + Ok(()) +} + +/// Test that `store_trie_updates` properly stores branch nodes, leaf nodes, and removals +/// +/// This test verifies that all data stored via `store_trie_updates` can be read back +/// through the cursor APIs. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_store_trie_updates_comprehensive( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::{updates::StorageTrieUpdates, HashedStorage}; + + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + + // Create comprehensive trie updates with branches, leaves, and removals + let mut trie_updates = TrieUpdates::default(); + + // Add account branch nodes + let account_path1 = nibbles_from(vec![1, 2, 3]); + let account_path2 = nibbles_from(vec![4, 5, 6]); + let account_branch1 = create_test_branch(); + let account_branch2 = create_test_branch_variant(); + + trie_updates.account_nodes.insert(account_path1, account_branch1.clone()); + trie_updates.account_nodes.insert(account_path2, account_branch2.clone()); + + // Add removed account nodes + let removed_account_path = nibbles_from(vec![7, 8, 9]); + trie_updates.removed_nodes.insert(removed_account_path); + + // Add storage branch nodes for an address + let hashed_address = B256::repeat_byte(0x42); + let storage_path1 = nibbles_from(vec![1, 1]); + let storage_path2 = nibbles_from(vec![2, 2]); + let storage_branch = create_test_branch(); + + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_path1, storage_branch.clone()); + storage_trie.storage_nodes.insert(storage_path2, storage_branch.clone()); + + // Add removed storage node + let removed_storage_path = nibbles_from(vec![3, 3]); + storage_trie.removed_nodes.insert(removed_storage_path); + + trie_updates.insert_storage_updates(hashed_address, storage_trie); + + // Create post state with accounts and storage + let mut post_state = HashedPostState::default(); + + // Add accounts + let account1_addr = B256::repeat_byte(0x10); + let account2_addr = B256::repeat_byte(0x20); + let account1 = create_test_account_with_values(1, 1000, 0xAA); + let account2 = create_test_account_with_values(2, 2000, 0xBB); + + post_state.accounts.insert(account1_addr, Some(account1)); + post_state.accounts.insert(account2_addr, Some(account2)); + + // Add deleted account + let deleted_account_addr = B256::repeat_byte(0x30); + post_state.accounts.insert(deleted_account_addr, None); + + // Add storage for an address + let storage_addr = B256::repeat_byte(0x50); + let mut hashed_storage = HashedStorage::new(false); + hashed_storage.storage.insert(B256::repeat_byte(0x01), U256::from(111)); + hashed_storage.storage.insert(B256::repeat_byte(0x02), U256::from(222)); + hashed_storage.storage.insert(B256::repeat_byte(0x03), U256::ZERO); // Deleted storage + post_state.storages.insert(storage_addr, hashed_storage); + + let block_state_diff = BlockStateDiff { + sorted_trie_updates: trie_updates.into_sorted(), + sorted_post_state: post_state.into_sorted(), + }; + + // Store the updates + storage.store_trie_updates(block_ref, block_state_diff).await?; + + // ========== Verify Account Branch Nodes ========== + let mut account_trie_cursor = storage.account_trie_cursor(block_ref.block.number + 10)?; + + // Should find the added branches + let result1 = account_trie_cursor.seek_exact(account_path1)?; + assert!(result1.is_some(), "Account branch node 1 should be found"); + assert_eq!(result1.unwrap().0, account_path1); + + let result2 = account_trie_cursor.seek_exact(account_path2)?; + assert!(result2.is_some(), "Account branch node 2 should be found"); + assert_eq!(result2.unwrap().0, account_path2); + + // Removed node should not be found + let removed_result = account_trie_cursor.seek_exact(removed_account_path)?; + assert!(removed_result.is_none(), "Removed account node should not be found"); + + // ========== Verify Storage Branch Nodes ========== + let mut storage_trie_cursor = + storage.storage_trie_cursor(hashed_address, block_ref.block.number + 10)?; + + let storage_result1 = storage_trie_cursor.seek_exact(storage_path1)?; + assert!(storage_result1.is_some(), "Storage branch node 1 should be found"); + + let storage_result2 = storage_trie_cursor.seek_exact(storage_path2)?; + assert!(storage_result2.is_some(), "Storage branch node 2 should be found"); + + // Removed storage node should not be found + let removed_storage_result = storage_trie_cursor.seek_exact(removed_storage_path)?; + assert!(removed_storage_result.is_none(), "Removed storage node should not be found"); + + // ========== Verify Account Leaves ========== + let mut account_cursor = storage.account_hashed_cursor(block_ref.block.number + 10)?; + + let acc1_result = account_cursor.seek(account1_addr)?; + assert!(acc1_result.is_some(), "Account 1 should be found"); + assert_eq!(acc1_result.unwrap().0, account1_addr); + assert_eq!(acc1_result.unwrap().1.nonce, 1); + assert_eq!(acc1_result.unwrap().1.balance, U256::from(1000)); + + let acc2_result = account_cursor.seek(account2_addr)?; + assert!(acc2_result.is_some(), "Account 2 should be found"); + assert_eq!(acc2_result.unwrap().1.nonce, 2); + + // Deleted account should not be found + let deleted_acc_result = account_cursor.seek(deleted_account_addr)?; + assert!( + deleted_acc_result.is_none() || deleted_acc_result.unwrap().0 != deleted_account_addr, + "Deleted account should not be found" + ); + + // ========== Verify Storage Leaves ========== + let mut storage_cursor = + storage.storage_hashed_cursor(storage_addr, block_ref.block.number + 10)?; + + let slot1_result = storage_cursor.seek(B256::repeat_byte(0x01))?; + assert!(slot1_result.is_some(), "Storage slot 1 should be found"); + assert_eq!(slot1_result.unwrap().1, U256::from(111)); + + let slot2_result = storage_cursor.seek(B256::repeat_byte(0x02))?; + assert!(slot2_result.is_some(), "Storage slot 2 should be found"); + assert_eq!(slot2_result.unwrap().1, U256::from(222)); + + // Zero-valued storage should not be found (deleted) + let slot3_result = storage_cursor.seek(B256::repeat_byte(0x03))?; + assert!( + slot3_result.is_none() || slot3_result.unwrap().0 != B256::repeat_byte(0x03), + "Zero-valued storage slot should not be found" + ); + + // ========== Verify fetch_trie_updates can retrieve the data ========== + let fetched_diff = storage.fetch_trie_updates(block_ref.block.number).await?; + + // Check that trie updates are stored + assert_eq!( + fetched_diff.sorted_trie_updates.account_nodes_ref().len(), + 3, + "Should have 3 account nodes, including removed" + ); + assert_eq!( + fetched_diff.sorted_trie_updates.storage_tries_ref().len(), + 1, + "Should have 1 storage trie" + ); + + // Check that post state is stored + assert_eq!( + fetched_diff.sorted_post_state.accounts.len(), + 3, + "Should have 3 accounts (including deleted)" + ); + assert_eq!(fetched_diff.sorted_post_state.storages.len(), 1, "Should have 1 storage entry"); + + Ok(()) +} + +/// Test that `replace_updates` properly applies hashed/trie storage updates to the DB +/// +/// This test verifies the bug fix where `replace_updates` was only storing `trie_updates` +/// and `post_states` directly without populating the internal data structures +/// (`hashed_accounts`, `hashed_storages`, `account_branches`, `storage_branches`). +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_replace_updates_applies_all_updates( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::{updates::StorageTrieUpdates, HashedStorage}; + + let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + + // ========== Setup: Store initial state at blocks 50, 100, 101 ========== + let initial_account_addr = B256::repeat_byte(0x10); + let initial_account = create_test_account_with_values(1, 1000, 0xAA); + + let initial_storage_addr = B256::repeat_byte(0x20); + let initial_storage_slot = B256::repeat_byte(0x01); + let initial_storage_value = U256::from(100); + + let initial_branch_path = nibbles_from(vec![1, 2, 3]); + let initial_branch = create_test_branch(); + + // Store initial data at block 50 + let mut initial_trie_updates_50 = TrieUpdates::default(); + initial_trie_updates_50.account_nodes.insert(initial_branch_path, initial_branch.clone()); + + let mut initial_post_state_50 = HashedPostState::default(); + initial_post_state_50.accounts.insert(initial_account_addr, Some(initial_account)); + + let initial_diff_50 = BlockStateDiff { + sorted_trie_updates: initial_trie_updates_50.into_sorted(), + sorted_post_state: initial_post_state_50.into_sorted(), + }; + storage.store_trie_updates(block_ref_50, initial_diff_50).await?; + + // Store data at block 100 (common block) + let mut initial_trie_updates_100 = TrieUpdates::default(); + let common_branch_path = nibbles_from(vec![4, 5, 6]); + initial_trie_updates_100.account_nodes.insert(common_branch_path, initial_branch.clone()); + + let mut initial_post_state_100 = HashedPostState::default(); + let mut initial_storage_100 = HashedStorage::new(false); + initial_storage_100.storage.insert(initial_storage_slot, initial_storage_value); + initial_post_state_100.storages.insert(initial_storage_addr, initial_storage_100); + + let initial_diff_100 = BlockStateDiff { + sorted_trie_updates: initial_trie_updates_100.into_sorted(), + sorted_post_state: initial_post_state_100.into_sorted(), + }; + + let block_ref_100 = + BlockWithParent::new(block_ref_50.block.hash, NumHash::new(100, B256::repeat_byte(0x97))); + + storage.store_trie_updates(block_ref_100, initial_diff_100).await?; + + // Store data at block 101 (will be replaced) + let mut initial_trie_updates_101 = TrieUpdates::default(); + let old_branch_path = nibbles_from(vec![7, 8, 9]); + initial_trie_updates_101.account_nodes.insert(old_branch_path, initial_branch.clone()); + + let mut initial_post_state_101 = HashedPostState::default(); + let old_account_addr = B256::repeat_byte(0x30); + let old_account = create_test_account_with_values(99, 9999, 0xFF); + initial_post_state_101.accounts.insert(old_account_addr, Some(old_account)); + + let initial_diff_101 = BlockStateDiff { + sorted_trie_updates: initial_trie_updates_101.into_sorted(), + sorted_post_state: initial_post_state_101.into_sorted(), + }; + let block_ref_101 = + BlockWithParent::new(block_ref_100.block.hash, NumHash::new(101, B256::repeat_byte(0x98))); + storage.store_trie_updates(block_ref_101, initial_diff_101).await?; + + let block_ref_102 = + BlockWithParent::new(block_ref_101.block.hash, NumHash::new(102, B256::repeat_byte(0x99))); + + // ========== Verify initial state exists ========== + // Verify block 50 data exists + let mut cursor_initial = storage.account_trie_cursor(75)?; + assert!( + cursor_initial.seek_exact(initial_branch_path)?.is_some(), + "Initial branch should exist before replace" + ); + + // Verify block 101 old data exists + let mut cursor_old = storage.account_trie_cursor(150)?; + assert!( + cursor_old.seek_exact(old_branch_path)?.is_some(), + "Old branch at block 101 should exist before replace" + ); + + let mut account_cursor_old = storage.account_hashed_cursor(150)?; + assert!( + account_cursor_old.seek(old_account_addr)?.is_some(), + "Old account at block 101 should exist before replace" + ); + + // ========== Call replace_updates to replace blocks after 100 ========== + let mut blocks_to_add: HashMap = HashMap::default(); + + // New data for block 101 + let new_account_addr = B256::repeat_byte(0x40); + let new_account = create_test_account_with_values(5, 5000, 0xCC); + + let new_storage_addr = B256::repeat_byte(0x50); + let new_storage_slot = B256::repeat_byte(0x02); + let new_storage_value = U256::from(999); + + let new_branch_path = nibbles_from(vec![10, 11, 12]); + let new_branch = create_test_branch_variant(); + + let storage_branch_path = nibbles_from(vec![5, 5]); + let storage_hashed_addr = B256::repeat_byte(0x60); + + let mut new_trie_updates = TrieUpdates::default(); + new_trie_updates.account_nodes.insert(new_branch_path, new_branch.clone()); + + // Add storage trie updates + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_branch_path, new_branch.clone()); + new_trie_updates.insert_storage_updates(storage_hashed_addr, storage_trie); + + let mut new_post_state = HashedPostState::default(); + new_post_state.accounts.insert(new_account_addr, Some(new_account)); + + let mut new_storage = HashedStorage::new(false); + new_storage.storage.insert(new_storage_slot, new_storage_value); + new_post_state.storages.insert(new_storage_addr, new_storage); + + blocks_to_add.insert( + block_ref_101, + BlockStateDiff { + sorted_trie_updates: new_trie_updates.into_sorted(), + sorted_post_state: new_post_state.into_sorted(), + }, + ); + + // New data for block 102 + let block_102_account_addr = B256::repeat_byte(0x70); + let block_102_account = create_test_account_with_values(10, 10000, 0xDD); + + let mut trie_updates_102 = TrieUpdates::default(); + let block_102_branch_path = nibbles_from(vec![15, 14, 13]); + trie_updates_102.account_nodes.insert(block_102_branch_path, new_branch.clone()); + + let mut post_state_102 = HashedPostState::default(); + post_state_102.accounts.insert(block_102_account_addr, Some(block_102_account)); + + blocks_to_add.insert( + block_ref_102, + BlockStateDiff { + sorted_trie_updates: trie_updates_102.into_sorted(), + sorted_post_state: post_state_102.into_sorted(), + }, + ); + + // Execute replace_updates + storage.replace_updates(100, blocks_to_add).await?; + // ========== Verify that data up to block 100 still exists ========== + let mut cursor_50 = storage.account_trie_cursor(75)?; + assert!( + cursor_50.seek_exact(initial_branch_path)?.is_some(), + "Block 50 branch should still exist after replace" + ); + + let mut cursor_100 = storage.account_trie_cursor(100)?; + assert!( + cursor_100.seek_exact(common_branch_path)?.is_some(), + "Block 100 branch should still exist after replace" + ); + + let mut storage_cursor_100 = storage.storage_hashed_cursor(initial_storage_addr, 100)?; + let result_100 = storage_cursor_100.seek(initial_storage_slot)?; + assert!(result_100.is_some(), "Block 100 storage should still exist after replace"); + assert_eq!( + result_100.unwrap().1, + initial_storage_value, + "Block 100 storage value should be unchanged" + ); + + // ========== Verify that old data after block 100 is gone ========== + let mut cursor_old_gone = storage.account_trie_cursor(150)?; + assert!( + cursor_old_gone.seek_exact(old_branch_path)?.is_none(), + "Old branch at block 101 should be removed after replace" + ); + + let mut account_cursor_old_gone = storage.account_hashed_cursor(150)?; + let old_acc_result = account_cursor_old_gone.seek(old_account_addr)?; + assert!( + old_acc_result.is_none() || old_acc_result.unwrap().0 != old_account_addr, + "Old account at block 101 should be removed after replace" + ); + + // ========== Verify new data is properly accessible via cursors ========== + + // Verify new account branch nodes + let mut trie_cursor = storage.account_trie_cursor(150)?; + let branch_result = trie_cursor.seek_exact(new_branch_path)?; + assert!(branch_result.is_some(), "New account branch should be accessible via cursor"); + assert_eq!(branch_result.unwrap().0, new_branch_path); + + // Verify new storage branch nodes + let mut storage_trie_cursor = storage.storage_trie_cursor(storage_hashed_addr, 150)?; + let storage_branch_result = storage_trie_cursor.seek_exact(storage_branch_path)?; + assert!(storage_branch_result.is_some(), "New storage branch should be accessible via cursor"); + assert_eq!(storage_branch_result.unwrap().0, storage_branch_path); + + // Verify new hashed accounts + let mut account_cursor = storage.account_hashed_cursor(150)?; + let account_result = account_cursor.seek(new_account_addr)?; + assert!(account_result.is_some(), "New account should be accessible via cursor"); + assert_eq!(account_result.as_ref().unwrap().0, new_account_addr); + assert_eq!(account_result.as_ref().unwrap().1.nonce, new_account.nonce); + assert_eq!(account_result.as_ref().unwrap().1.balance, new_account.balance); + assert_eq!(account_result.as_ref().unwrap().1.bytecode_hash, new_account.bytecode_hash); + + // Verify new hashed storages + let mut storage_cursor = storage.storage_hashed_cursor(new_storage_addr, 150)?; + let storage_result = storage_cursor.seek(new_storage_slot)?; + assert!(storage_result.is_some(), "New storage should be accessible via cursor"); + assert_eq!(storage_result.as_ref().unwrap().0, new_storage_slot); + assert_eq!(storage_result.as_ref().unwrap().1, new_storage_value); + + // Verify block 102 data + let mut trie_cursor_102 = storage.account_trie_cursor(150)?; + let branch_result_102 = trie_cursor_102.seek_exact(block_102_branch_path)?; + assert!(branch_result_102.is_some(), "Block 102 branch should be accessible"); + assert_eq!(branch_result_102.unwrap().0, block_102_branch_path); + + let mut account_cursor_102 = storage.account_hashed_cursor(150)?; + let account_result_102 = account_cursor_102.seek(block_102_account_addr)?; + assert!(account_result_102.is_some(), "Block 102 account should be accessible"); + assert_eq!(account_result_102.as_ref().unwrap().0, block_102_account_addr); + assert_eq!(account_result_102.as_ref().unwrap().1.nonce, block_102_account.nonce); + + // Verify fetch_trie_updates returns the new data + let fetched_101 = storage.fetch_trie_updates(101).await?; + assert_eq!( + fetched_101.sorted_trie_updates.account_nodes_ref().len(), + 1, + "Should have 1 account branch node at block 101" + ); + assert!( + fetched_101 + .sorted_trie_updates + .account_nodes_ref() + .iter() + .any(|(addr, _)| *addr == new_branch_path), + "New branch path should be in trie_updates" + ); + assert_eq!( + fetched_101.sorted_post_state.accounts.len(), + 1, + "Should have 1 account at block 101" + ); + assert!( + fetched_101.sorted_post_state.accounts.iter().any(|(addr, _)| *addr == new_account_addr), + "New account should be in post_state" + ); + + Ok(()) +} + +/// Test that pure deletions (nodes only in `removed_nodes`) are properly stored +/// +/// This test verifies that when a node appears only in `removed_nodes` (not in updates), +/// it is properly stored as a deletion and subsequent queries return None for that path. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_pure_deletions_stored_correctly( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::updates::StorageTrieUpdates; + + // ========== Setup: Store initial branch nodes at block 50 ========== + let account_path1 = nibbles_from(vec![1, 2, 3]); + let account_path2 = nibbles_from(vec![4, 5, 6]); + let storage_path1 = nibbles_from(vec![7, 8, 9]); + let storage_path2 = nibbles_from(vec![10, 11, 12]); + let storage_address = B256::repeat_byte(0x42); + + let initial_branch = create_test_branch(); + + let mut initial_trie_updates = TrieUpdates::default(); + initial_trie_updates.account_nodes.insert(account_path1, initial_branch.clone()); + initial_trie_updates.account_nodes.insert(account_path2, initial_branch.clone()); + + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_path1, initial_branch.clone()); + storage_trie.storage_nodes.insert(storage_path2, initial_branch.clone()); + initial_trie_updates.insert_storage_updates(storage_address, storage_trie); + + let initial_diff = BlockStateDiff { + sorted_trie_updates: initial_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + + storage.store_trie_updates(block_ref_50, initial_diff).await?; + + // Verify initial state exists at block 75 + let mut cursor_75 = storage.account_trie_cursor(75)?; + assert!( + cursor_75.seek_exact(account_path1)?.is_some(), + "Initial account branch 1 should exist at block 75" + ); + assert!( + cursor_75.seek_exact(account_path2)?.is_some(), + "Initial account branch 2 should exist at block 75" + ); + + let mut storage_cursor_75 = storage.storage_trie_cursor(storage_address, 75)?; + assert!( + storage_cursor_75.seek_exact(storage_path1)?.is_some(), + "Initial storage branch 1 should exist at block 75" + ); + assert!( + storage_cursor_75.seek_exact(storage_path2)?.is_some(), + "Initial storage branch 2 should exist at block 75" + ); + + // ========== At block 100: Mark paths as deleted (ONLY in removed_nodes) ========== + let mut deletion_trie_updates = TrieUpdates::default(); + + // Add to removed_nodes ONLY (no updates) + deletion_trie_updates.removed_nodes.insert(account_path1); + + // Do the same for storage branch + let mut deletion_storage_trie = StorageTrieUpdates::default(); + deletion_storage_trie.removed_nodes.insert(storage_path1); + deletion_trie_updates.insert_storage_updates(storage_address, deletion_storage_trie); + + let deletion_diff = BlockStateDiff { + sorted_trie_updates: deletion_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_100 = + BlockWithParent::new(B256::repeat_byte(0x96), NumHash::new(100, B256::repeat_byte(0x97))); + + storage.store_trie_updates(block_ref_100, deletion_diff).await?; + + // ========== Verify that deleted nodes return None at block 150 ========== + + // Deleted account branch should not be found + let mut cursor_150 = storage.account_trie_cursor(150)?; + let account_result = cursor_150.seek_exact(account_path1)?; + assert!(account_result.is_none(), "Deleted account branch should return None at block 150"); + + // Non-deleted account branch should still exist + let account_result2 = cursor_150.seek_exact(account_path2)?; + assert!( + account_result2.is_some(), + "Non-deleted account branch should still exist at block 150" + ); + + // Deleted storage branch should not be found + let mut storage_cursor_150 = storage.storage_trie_cursor(storage_address, 150)?; + let storage_result = storage_cursor_150.seek_exact(storage_path1)?; + assert!(storage_result.is_none(), "Deleted storage branch should return None at block 150"); + + // Non-deleted storage branch should still exist + let storage_result2 = storage_cursor_150.seek_exact(storage_path2)?; + assert!( + storage_result2.is_some(), + "Non-deleted storage branch should still exist at block 150" + ); + + // ========== Verify that the nodes still exist at block 75 (before deletion) ========== + let mut cursor_75_after = storage.account_trie_cursor(75)?; + assert!( + cursor_75_after.seek_exact(account_path1)?.is_some(), + "Deleted node should still exist at block 75 (before deletion)" + ); + + let mut storage_cursor_75_after = storage.storage_trie_cursor(storage_address, 75)?; + assert!( + storage_cursor_75_after.seek_exact(storage_path1)?.is_some(), + "Deleted storage node should still exist at block 75 (before deletion)" + ); + + // ========== Verify iteration skips deleted nodes ========== + let mut cursor_iter = storage.account_trie_cursor(150)?; + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor_iter.next()? { + found_paths.push(path); + } + + assert!(!found_paths.contains(&account_path1), "Iteration should skip deleted node"); + assert!(found_paths.contains(&account_path2), "Iteration should include non-deleted node"); + + Ok(()) +} + +/// Test that updates take precedence over removals when both are present +/// +/// This test verifies that when a path appears in both `removed_nodes` and `account_nodes`, +/// the update from `account_nodes` takes precedence. This is critical for correctness +/// when processing trie updates that both remove and update the same node. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[tokio::test] +#[serial] +async fn test_updates_take_precedence_over_removals( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::updates::StorageTrieUpdates; + + // ========== Setup: Store initial branch nodes at block 50 ========== + let account_path = nibbles_from(vec![1, 2, 3]); + let storage_path = nibbles_from(vec![4, 5, 6]); + let storage_address = B256::repeat_byte(0x42); + + let initial_branch = create_test_branch(); + + let mut initial_trie_updates = TrieUpdates::default(); + initial_trie_updates.account_nodes.insert(account_path, initial_branch.clone()); + + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_path, initial_branch.clone()); + initial_trie_updates.insert_storage_updates(storage_address, storage_trie); + + let initial_diff = BlockStateDiff { + sorted_trie_updates: initial_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + + storage.store_trie_updates(block_ref_50, initial_diff).await?; + + // Verify initial state exists at block 75 + let mut cursor_75 = storage.account_trie_cursor(75)?; + assert!( + cursor_75.seek_exact(account_path)?.is_some(), + "Initial account branch should exist at block 75" + ); + + let mut storage_cursor_75 = storage.storage_trie_cursor(storage_address, 75)?; + assert!( + storage_cursor_75.seek_exact(storage_path)?.is_some(), + "Initial storage branch should exist at block 75" + ); + + // ========== At block 100: Add paths to BOTH removed_nodes AND account_nodes ========== + // This simulates a scenario where a node is both removed and updated + // The update should take precedence + let updated_branch = create_test_branch_variant(); + + let mut conflicting_trie_updates = TrieUpdates::default(); + + // Add to removed_nodes + conflicting_trie_updates.removed_nodes.insert(account_path); + + // Also add to account_nodes (this should take precedence) + conflicting_trie_updates.account_nodes.insert(account_path, updated_branch.clone()); + + // Do the same for storage branch + let mut conflicting_storage_trie = StorageTrieUpdates::default(); + conflicting_storage_trie.removed_nodes.insert(storage_path); + conflicting_storage_trie.storage_nodes.insert(storage_path, updated_branch.clone()); + conflicting_trie_updates.insert_storage_updates(storage_address, conflicting_storage_trie); + + let conflicting_diff = BlockStateDiff { + sorted_trie_updates: conflicting_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_100 = + BlockWithParent::new(B256::repeat_byte(0x96), NumHash::new(100, B256::repeat_byte(0x97))); + + storage.store_trie_updates(block_ref_100, conflicting_diff).await?; + + // ========== Verify that updates took precedence at block 150 ========== + + // Account branch should exist (not deleted) with the updated value + let mut cursor_150 = storage.account_trie_cursor(150)?; + let account_result = cursor_150.seek_exact(account_path)?; + assert!( + account_result.is_some(), + "Account branch should exist at block 150 (update should take precedence over removal)" + ); + let (found_path, found_branch) = account_result.unwrap(); + assert_eq!(found_path, account_path); + // Verify it's the updated branch, not the initial one + assert_eq!( + found_branch.state_mask, updated_branch.state_mask, + "Account branch should be the updated version, not the initial one" + ); + + // Storage branch should exist (not deleted) with the updated value + let mut storage_cursor_150 = storage.storage_trie_cursor(storage_address, 150)?; + let storage_result = storage_cursor_150.seek_exact(storage_path)?; + assert!( + storage_result.is_some(), + "Storage branch should exist at block 150 (update should take precedence over removal)" + ); + let (found_storage_path, found_storage_branch) = storage_result.unwrap(); + assert_eq!(found_storage_path, storage_path); + // Verify it's the updated branch + assert_eq!( + found_storage_branch.state_mask, updated_branch.state_mask, + "Storage branch should be the updated version, not the initial one" + ); + + // ========== Verify that the old version still exists at block 75 ========== + let mut cursor_75_after = storage.account_trie_cursor(75)?; + let result_75 = cursor_75_after.seek_exact(account_path)?; + assert!(result_75.is_some(), "Initial version should still exist at block 75"); + let (_, branch_75) = result_75.unwrap(); + assert_eq!( + branch_75.state_mask, initial_branch.state_mask, + "Block 75 should see the initial branch, not the updated one" + ); + + Ok(()) +} diff --git a/crates/optimism/trie/tests/live.rs b/crates/optimism/trie/tests/live.rs new file mode 100644 index 00000000000..b87c527e24a --- /dev/null +++ b/crates/optimism/trie/tests/live.rs @@ -0,0 +1,492 @@ +//! End-to-end test of the live trie collector. + +use alloy_consensus::{constants::ETH_TO_WEI, BlockHeader, Header, TxEip2930}; +use alloy_genesis::{Genesis, GenesisAccount}; +use alloy_primitives::{Address, TxKind, B256, U256}; +use derive_more::Constructor; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS}; +use reth_db::Database; +use reth_db_common::init::init_genesis; +use reth_ethereum_primitives::{Block, BlockBody, Receipt, TransactionSigned}; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_api::{NodePrimitives, NodeTypesWithDB}; +use reth_optimism_trie::{ + backfill::BackfillJob, in_memory::InMemoryProofsStorage, live::LiveTrieCollector, + OpProofsStorage, OpProofsStorageError, +}; +use reth_primitives_traits::{ + crypto::secp256k1::public_key_to_address, Block as _, RecoveredBlock, +}; +use reth_provider::{ + providers::{BlockchainProvider, ProviderNodeTypes}, + test_utils::create_test_provider_factory_with_chain_spec, + BlockWriter as _, ExecutionOutcome, HashedPostStateProvider, LatestStateProviderRef, + ProviderFactory, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_testing_utils::generators::sign_tx_with_key_pair; +use secp256k1::{rand::thread_rng, Keypair, Secp256k1}; +use std::sync::Arc; + +/// Specification for a transaction within a block +#[derive(Debug, Clone)] +struct TxSpec { + /// Recipient address for the transaction + to: Address, + /// Value to transfer (in wei) + value: U256, + /// Nonce for the transaction (will be automatically assigned if None) + nonce: Option, +} + +impl TxSpec { + /// Create a simple transfer transaction + const fn transfer(to: Address, value: U256) -> Self { + Self { to, value, nonce: None } + } +} + +/// Specification for a block in the test chain +#[derive(Debug, Clone, Constructor)] +struct BlockSpec { + /// Transactions to include in this block + txs: Vec, +} + +/// Configuration for a test scenario +#[derive(Debug, Constructor)] +struct TestScenario { + /// Blocks to execute before running the backfill job + blocks_before_backfill: Vec, + /// Blocks to execute after backfill using the live collector + blocks_after_backfill: Vec, +} + +/// Helper to create a chain spec with a genesis account funded +fn chain_spec_with_address(address: Address) -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(10 * ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() + }) + .paris_activated() + .build(), + ) +} + +/// Creates a block from a spec, executing transactions with the given keypair +fn create_block_from_spec( + spec: &BlockSpec, + block_number: u64, + parent_hash: B256, + chain_spec: &Arc, + key_pair: Keypair, + nonce_counter: &mut u64, +) -> RecoveredBlock { + let transactions: Vec = spec + .txs + .iter() + .map(|tx_spec| { + let nonce = tx_spec.nonce.unwrap_or_else(|| { + let current = *nonce_counter; + *nonce_counter += 1; + current + }); + + sign_tx_with_key_pair( + key_pair, + TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce, + gas_limit: MIN_TRANSACTION_GAS, + gas_price: 1_500_000_000, + to: TxKind::Call(tx_spec.to), + value: tx_spec.value, + ..Default::default() + } + .into(), + ) + }) + .collect(); + + let gas_total = transactions.len() as u64 * MIN_TRANSACTION_GAS; + + Block { + header: Header { + parent_hash, + receipts_root: alloy_primitives::b256!( + "0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: block_number, + gas_limit: gas_total.max(MIN_TRANSACTION_GAS), + gas_used: gas_total, + state_root: B256::ZERO, // Will be calculated by executor + ..Default::default() + }, + body: BlockBody { transactions, ..Default::default() }, + } + .try_into_recovered() + .unwrap() +} + +/// Executes a block and returns the updated block with correct state root +fn execute_block( + block: &mut RecoveredBlock, + provider_factory: &ProviderFactory, + chain_spec: &Arc, +) -> eyre::Result> +where + N: ProviderNodeTypes< + Primitives: NodePrimitives, + > + NodeTypesWithDB, +{ + let provider = provider_factory.provider()?; + let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + let block_executor = evm_config.batch_executor(db); + + let execution_result = block_executor.execute(block)?; + + let hashed_state = + LatestStateProviderRef::new(&provider).hashed_post_state(&execution_result.state); + let state_root = LatestStateProviderRef::new(&provider).state_root(hashed_state)?; + + block.set_state_root(state_root); + + Ok(execution_result) +} + +/// Commits a block and its execution output to the database +fn commit_block_to_database( + block: &RecoveredBlock, + execution_output: &reth_evm::execute::BlockExecutionOutput, + provider_factory: &ProviderFactory, +) -> eyre::Result<()> +where + N: ProviderNodeTypes< + Primitives: NodePrimitives, + > + NodeTypesWithDB, +{ + let execution_outcome = ExecutionOutcome { + bundle: execution_output.state.clone(), + receipts: vec![execution_output.receipts.clone()], + first_block: block.number(), + requests: vec![execution_output.requests.clone()], + }; + + // Calculate hashed state from execution result + let state_provider = provider_factory.provider()?; + let hashed_state = HashedPostStateProvider::hashed_post_state( + &LatestStateProviderRef::new(&state_provider), + &execution_output.state, + ); + + let provider_rw = provider_factory.provider_rw()?; + provider_rw.append_blocks_with_state( + vec![block.clone()], + &execution_outcome, + hashed_state.into_sorted(), + )?; + provider_rw.commit()?; + + Ok(()) +} + +/// Runs a test scenario with the given configuration +async fn run_test_scenario( + scenario: TestScenario, + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, + storage: OpProofsStorage>, +) -> eyre::Result<()> +where + N: ProviderNodeTypes< + Primitives: NodePrimitives, + > + NodeTypesWithDB, +{ + let genesis_hash = chain_spec.genesis_hash(); + let mut nonce_counter = 0u64; + let mut last_block_hash = genesis_hash; + let mut last_block_number = 0u64; + + // Execute blocks before backfill + for (idx, block_spec) in scenario.blocks_before_backfill.iter().enumerate() { + let block_number = idx as u64 + 1; + let mut block = create_block_from_spec( + block_spec, + block_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; + commit_block_to_database(&block, &execution_output, &provider_factory)?; + + last_block_hash = block.hash(); + last_block_number = block_number; + } + + { + let provider = provider_factory.db_ref(); + let tx = provider.tx()?; + let backfill_job = BackfillJob::new(storage.clone(), &tx); + backfill_job.run(last_block_number, last_block_hash).await?; + } + + // Execute blocks after backfill using live collector + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + + for (idx, block_spec) in scenario.blocks_after_backfill.iter().enumerate() { + let block_number = last_block_number + idx as u64 + 1; + let mut block = create_block_from_spec( + block_spec, + block_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + // Execute the block to get the correct state root + let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; + + // Create a fresh blockchain provider to ensure it sees all committed blocks + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + let live_trie_collector = + LiveTrieCollector::new(evm_config.clone(), blockchain_db, &storage); + + // Use the live collector to execute and store trie updates + live_trie_collector.execute_and_store_block_updates(&block).await?; + + // Commit the block to the database so subsequent blocks can build on it + commit_block_to_database(&block, &execution_output, &provider_factory)?; + + last_block_hash = block.hash(); + } + + Ok(()) +} + +/// End-to-end test of a single live collector iteration. +/// (1) Creates a chain with some state +/// (2) Stores the genesis state into storage via backfill +/// (3) Executes a block and calculates the state root using the stored state +#[tokio::test] +async fn test_execute_and_store_block_updates() { + let storage = Arc::new(InMemoryProofsStorage::new()).into(); + + // Create a keypair for signing transactions + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + // Create chain spec with the sender address funded in genesis + let chain_spec = chain_spec_with_address(sender); + + // Create test database and provider factory + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + + // Insert genesis state into the database + init_genesis(&provider_factory).unwrap(); + + // Define the test scenario: + // - No blocks before backfill + // - Backfill to genesis (block 0) + // - Execute one block with a single transaction after backfill + let recipient = Address::repeat_byte(0x42); + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); +} + +#[tokio::test] +async fn test_execute_and_store_block_updates_missing_parent_block() { + let storage: OpProofsStorage> = + Arc::new(InMemoryProofsStorage::new()).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // No blocks before backfill; backfill only inserts genesis. + let scenario = TestScenario::new(vec![], vec![]); + + // Run backfill (block 0 only) + run_test_scenario( + scenario, + provider_factory.clone(), + chain_spec.clone(), + key_pair, + storage.clone(), + ) + .await + .unwrap(); + + // Create a block whose parent block number is missing. + let incorrect_block_number = 2; + let incorrect_parent_hash = B256::repeat_byte(0x11); + + let mut nonce_counter = 0; + let incorrect_block = create_block_from_spec( + &BlockSpec::new(vec![]), + incorrect_block_number, + incorrect_parent_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + let blockchain_db = BlockchainProvider::new(provider_factory.clone()).unwrap(); + let collector = + LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); + + // EXPECT: MissingParentBlock + let err = collector.execute_and_store_block_updates(&incorrect_block).await.unwrap_err(); + + assert!(matches!(err, OpProofsStorageError::MissingParentBlock { .. })); +} + +#[tokio::test] +async fn test_execute_and_store_block_updates_state_root_mismatch() { + let storage: OpProofsStorage> = + Arc::new(InMemoryProofsStorage::new()).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // Run normal scenario: no blocks before backfill, one block after. + let recipient = Address::repeat_byte(0x42); + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], + ); + + run_test_scenario( + scenario, + provider_factory.clone(), + chain_spec.clone(), + key_pair, + storage.clone(), + ) + .await + .unwrap(); + + // Generate a second block normally + let blockchain_db = BlockchainProvider::new(provider_factory.clone()).unwrap(); + let collector = + LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); + + // Create the next block + let mut nonce_counter = 0; + let last_block_hash = chain_spec.genesis_hash(); // because scenario executes 1 block + let next_number = 2; + + let mut block = create_block_from_spec( + &BlockSpec::new(vec![]), + next_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + // Execute it to compute a correct state root + let _ = execute_block(&mut block, &provider_factory, &chain_spec).unwrap(); + + // Change the state root to induce the error + block.header_mut().state_root = B256::repeat_byte(0xAA); + + // EXPECT: StateRootMismatch + let err = collector.execute_and_store_block_updates(&block).await.unwrap_err(); + + assert!(matches!(err, OpProofsStorageError::StateRootMismatch { .. })); +} + +/// Test with multiple blocks before and after backfill +#[tokio::test] +async fn test_multiple_blocks_before_and_after_backfill() { + let storage = Arc::new(InMemoryProofsStorage::new()).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // Define the test scenario: + // - Execute 3 blocks before backfill (will be committed to db) + // - Backfill to block 3 + // - Execute 2 more blocks using the live collector + let recipient1 = Address::repeat_byte(0x42); + let recipient2 = Address::repeat_byte(0x43); + let recipient3 = Address::repeat_byte(0x44); + + let scenario = TestScenario::new( + vec![ + BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(1))]), + BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(2))]), + BlockSpec::new(vec![TxSpec::transfer(recipient3, U256::from(3))]), + ], + vec![ + BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(4))]), + BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(5))]), + ], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); +} + +/// Test with blocks containing multiple transactions +#[tokio::test] +async fn test_blocks_with_multiple_transactions() { + let storage = Arc::new(InMemoryProofsStorage::new()).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + let recipient1 = Address::repeat_byte(0x42); + let recipient2 = Address::repeat_byte(0x43); + let recipient3 = Address::repeat_byte(0x44); + + // Block with 3 transactions + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![ + TxSpec::transfer(recipient1, U256::from(1)), + TxSpec::transfer(recipient2, U256::from(2)), + TxSpec::transfer(recipient3, U256::from(3)), + ])], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); +} diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs index ba5c927b9f3..e3f25d7b7f3 100644 --- a/crates/payload/basic/src/stack.rs +++ b/crates/payload/basic/src/stack.rs @@ -7,7 +7,7 @@ use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{Address, B256, U256}; use reth_payload_builder::PayloadId; use reth_payload_primitives::BuiltPayload; -use reth_primitives_traits::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::{BlockTy, SealedBlock}; use alloy_eips::eip7685::Requests; use std::{error::Error, fmt}; @@ -155,7 +155,7 @@ where { type Primitives = L::Primitives; - fn block(&self) -> &SealedBlock<::Block> { + fn block(&self) -> &SealedBlock> { match self { Self::Left(l) => l.block(), Self::Right(r) => r.block(), diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 7072f288cd8..c71ffb35adf 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -15,7 +15,7 @@ extern crate alloc; use alloy_primitives::Bytes; use reth_chainspec::EthereumHardforks; -use reth_primitives_traits::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::{BlockTy, SealedBlock}; mod error; pub use error::{ @@ -59,9 +59,7 @@ pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static /// Converts a sealed block into the execution payload format. fn block_to_payload( - block: SealedBlock< - <::Primitives as NodePrimitives>::Block, - >, + block: SealedBlock::Primitives>>, ) -> Self::ExecutionData; } diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 726122743ea..48e774e0a99 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -12,7 +12,7 @@ use core::fmt; use either::Either; use reth_chain_state::ComputedTrieData; use reth_execution_types::ExecutionOutcome; -use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{BlockTy, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_trie_common::{ updates::{TrieUpdates, TrieUpdatesSorted}, HashedPostState, HashedPostStateSorted, @@ -78,7 +78,7 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { type Primitives: NodePrimitives; /// Returns the built block in its sealed (hash-verified) form. - fn block(&self) -> &SealedBlock<::Block>; + fn block(&self) -> &SealedBlock>; /// Returns the total fees collected from all transactions in this block. fn fees(&self) -> U256; diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 30915f89b2f..2e82cda4594 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -9,7 +9,7 @@ use crate::{ PrunerError, }; use reth_db_api::{table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_provider::{ errors::provider::ProviderResult, BlockReader, DBProvider, EitherWriter, NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, StorageSettingsCache, @@ -50,14 +50,14 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.tx_ref().prune_table_with_range::::Receipt, - >>( - tx_range, - &mut limiter, - |_| false, - |row| last_pruned_transaction = row.0, - )?; + let (pruned, done) = provider + .tx_ref() + .prune_table_with_range::>>( + tx_range, + &mut limiter, + |_| false, + |row| last_pruned_transaction = row.0, + )?; trace!(target: "pruner", %pruned, %done, "Pruned receipts"); let last_pruned_block = provider diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 9e57bd2411a..b4daf08f4cd 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -5,7 +5,7 @@ use crate::{ }; use alloy_consensus::TxReceipt; use reth_db_api::{table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_provider::{ BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, }; @@ -149,24 +149,24 @@ where // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, - >>( - tx_range, - &mut limiter, - |(tx_num, receipt)| { - let skip = num_addresses > 0 && - receipt.logs().iter().any(|log| { - filtered_addresses[..num_addresses].contains(&&log.address) - }); - - if skip { - last_skipped_transaction = *tx_num; - } - skip - }, - |row| last_pruned_transaction = Some(row.0), - )?; + (deleted, done) = provider + .tx_ref() + .prune_table_with_range::>>( + tx_range, + &mut limiter, + |(tx_num, receipt)| { + let skip = num_addresses > 0 && + receipt.logs().iter().any(|log| { + filtered_addresses[..num_addresses].contains(&&log.address) + }); + + if skip { + last_skipped_transaction = *tx_num; + } + skip + }, + |row| last_pruned_transaction = Some(row.0), + )?; trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 333143aa987..abdbf0dacce 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -134,7 +134,9 @@ where mod tests { use super::*; use crate::eth::helpers::types::EthRpcConverter; - use alloy_consensus::{Block, Header, SidecarBuilder, SimpleCoder, Transaction}; + use alloy_consensus::{ + BlobTransactionSidecar, Block, Header, SidecarBuilder, SimpleCoder, Transaction, + }; use alloy_primitives::{Address, U256}; use alloy_rpc_types_eth::request::TransactionRequest; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; @@ -327,12 +329,13 @@ mod tests { let mut builder = SidecarBuilder::::new(); builder.ingest(b"dummy blob"); + let sidecar: BlobTransactionSidecar = builder.build().unwrap(); // EIP-4844 blob transaction with versioned hashes but no blob fee let tx_req = TransactionRequest { from: Some(address), to: Some(Address::random().into()), - sidecar: Some(builder.build().unwrap().into()), + sidecar: Some(sidecar.into()), ..Default::default() }; @@ -364,13 +367,14 @@ mod tests { let mut builder = SidecarBuilder::::new(); builder.ingest(b"dummy blob"); + let sidecar: BlobTransactionSidecar = builder.build().unwrap(); // EIP-4844 blob transaction with blob fee already set let tx_req = TransactionRequest { from: Some(address), to: Some(Address::random().into()), transaction_type: Some(3), // EIP-4844 - sidecar: Some(builder.build().unwrap().into()), + sidecar: Some(sidecar.into()), max_fee_per_blob_gas: Some(provided_blob_fee), // Already set ..Default::default() }; diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 78185f92ffb..60224d2c09f 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -29,8 +29,8 @@ use reth_metrics::{ }; use reth_node_api::{NewPayloadError, PayloadTypes}; use reth_primitives_traits::{ - constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, - SealedBlock, SealedHeaderFor, + constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, BlockTy, GotExpected, HeaderTy, ReceiptTy, + RecoveredBlock, SealedBlock, SealedHeaderFor, }; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; @@ -63,9 +63,7 @@ where evm_config: E, config: ValidationApiConfig, task_spawner: Box, - payload_validator: Arc< - dyn PayloadValidator::Block>, - >, + payload_validator: Arc>>, ) -> Self { let ValidationApiConfig { disallow, validation_window } = config; @@ -113,7 +111,7 @@ where impl ValidationApi where - Provider: BlockReaderIdExt

::BlockHeader> + Provider: BlockReaderIdExt
> + ChainSpecProvider + StateProviderFactory + 'static, @@ -123,7 +121,7 @@ where /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( &self, - block: RecoveredBlock<::Block>, + block: RecoveredBlock>, message: BidTrace, registered_gas_limit: u64, ) -> Result<(), ValidationApiError> { @@ -283,8 +281,8 @@ where /// to checking the latest block transaction. fn ensure_payment( &self, - block: &SealedBlock<::Block>, - output: &BlockExecutionOutput<::Receipt>, + block: &SealedBlock>, + output: &BlockExecutionOutput>, message: &BidTrace, ) -> Result<(), ValidationApiError> { let (mut balance_before, balance_after) = if let Some(acc) = @@ -478,7 +476,7 @@ where #[async_trait] impl BlockSubmissionValidationApiServer for ValidationApi where - Provider: BlockReaderIdExt
::BlockHeader> + Provider: BlockReaderIdExt
> + ChainSpecProvider + StateProviderFactory + Clone @@ -563,8 +561,7 @@ pub struct ValidationApiInner { /// Consensus implementation. consensus: Arc>, /// Execution payload validator. - payload_validator: - Arc::Block>>, + payload_validator: Arc>>, /// Block executor factory. evm_config: E, /// Set of disallowed addresses diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index f78b8258220..59c672a08af 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -8,7 +8,9 @@ use reth_db::{static_file::HeaderMask, tables}; use reth_evm::{execute::Executor, metrics::ExecutorMetrics, ConfigureEvm}; use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; -use reth_primitives_traits::{format_gas_throughput, BlockBody, NodePrimitives}; +use reth_primitives_traits::{ + format_gas_throughput, BlockBody, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, +}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockReader, DBProvider, EitherWriter, ExecutionOutcome, HeaderProvider, @@ -259,14 +261,12 @@ impl Stage for ExecutionStage where E: ConfigureEvm, Provider: DBProvider - + BlockReader< - Block = ::Block, - Header = ::BlockHeader, - > + StaticFileProviderFactory< + + BlockReader, Header = HeaderTy> + + StaticFileProviderFactory< Primitives: NodePrimitives, > + StatsReader + BlockHashReader - + StateWriter::Receipt> + + StateWriter> + StorageSettingsCache, { /// Return the id of the stage diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 360b34b5db9..2d3a1356bc3 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -14,9 +14,7 @@ use reth_network_p2p::headers::{ downloader::{HeaderDownloader, HeaderSyncGap, SyncTarget}, error::HeadersDownloaderError, }; -use reth_primitives_traits::{ - serde_bincode_compat, FullBlockHeader, HeaderTy, NodePrimitives, SealedHeader, -}; +use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader, HeaderTy, SealedHeader}; use reth_provider::{ providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderSyncGapProvider, StaticFileProviderFactory, @@ -93,8 +91,8 @@ where fn write_headers

(&mut self, provider: &P) -> Result where P: DBProvider + StaticFileProviderFactory, - Downloader: HeaderDownloader

::BlockHeader>, - ::BlockHeader: Value + FullBlockHeader, + Downloader: HeaderDownloader
>, + HeaderTy: Value + FullBlockHeader, { let total_headers = self.header_collector.len(); @@ -180,9 +178,9 @@ where impl Stage for HeaderStage where Provider: DBProvider + StaticFileProviderFactory, - P: HeaderSyncGapProvider
::BlockHeader>, - D: HeaderDownloader
::BlockHeader>, - ::BlockHeader: FullBlockHeader + Value, + P: HeaderSyncGapProvider
>, + D: HeaderDownloader
>, + HeaderTy: FullBlockHeader + Value, { /// Return the id of the stage fn id(&self) -> StageId { diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index b09dadd1ea4..4ffbc54f301 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -2,7 +2,7 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_codecs::Compact; use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_provider::{BlockReader, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -39,8 +39,7 @@ where let mut receipts_cursor = provider .tx_ref() - .cursor_read::::Receipt>>( - )?; + .cursor_read::>>()?; let receipts_walker = receipts_cursor.walk_range(block_body_indices.tx_num_range())?; static_file_writer.append_receipts( diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index fac85af5b88..73bc369ee58 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -62,9 +62,15 @@ pub trait DbCursorRO { /// A read-only cursor over the dup table `T`. pub trait DbDupCursorRO { + /// Positions the cursor at the prev KV pair of the table, returning it. + fn prev_dup(&mut self) -> PairResult; + /// Positions the cursor at the next KV pair of the table, returning it. fn next_dup(&mut self) -> PairResult; + /// Positions the cursor at the last duplicate value of the current key. + fn last_dup(&mut self) -> ValueOnlyResult; + /// Positions the cursor at the next KV pair of the table, skipping duplicates. fn next_no_dup(&mut self) -> PairResult; diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 60f69ae8f0d..0878de1e4cd 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -290,12 +290,18 @@ impl DbCursorRO for CursorMock { } impl DbDupCursorRO for CursorMock { - /// Moves to the next duplicate entry. - /// **Mock behavior**: Always returns `None`. + fn prev_dup(&mut self) -> PairResult { + Ok(None) + } + fn next_dup(&mut self) -> PairResult { Ok(None) } + fn last_dup(&mut self) -> ValueOnlyResult { + Ok(None) + } + /// Moves to the next entry with a different key. /// **Mock behavior**: Always returns `None`. fn next_no_dup(&mut self) -> PairResult { diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index d2b7b7f1141..56e36765333 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -10,7 +10,7 @@ use reth_db_api::{tables, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_execution_errors::StateRootError; use reth_primitives_traits::{ - Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry, + Account, Bytecode, GotExpected, HeaderTy, NodePrimitives, SealedHeader, StorageEntry, }; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, @@ -102,7 +102,7 @@ where + MetadataWriter + ChainSpecProvider + AsRef, - PF::ChainSpec: EthChainSpec
::BlockHeader>, + PF::ChainSpec: EthChainSpec
>, { #[cfg(feature = "edge")] { @@ -136,7 +136,7 @@ where + MetadataWriter + ChainSpecProvider + AsRef, - PF::ChainSpec: EthChainSpec
::BlockHeader>, + PF::ChainSpec: EthChainSpec
>, { let chain = factory.chain_spec(); @@ -412,7 +412,7 @@ pub fn insert_genesis_header( where Provider: StaticFileProviderFactory> + DBProvider, - Spec: EthChainSpec
::BlockHeader>, + Spec: EthChainSpec
>, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); let static_file_provider = provider.static_file_provider(); diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 5ca6eacb6c7..f432e76642d 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -158,11 +158,25 @@ impl DbCursorRO for Cursor { } impl DbDupCursorRO for Cursor { + /// Returns the previous `(key, value)` pair of a DUPSORT table. + fn prev_dup(&mut self) -> PairResult { + decode::(self.inner.prev_dup()) + } + /// Returns the next `(key, value)` pair of a DUPSORT table. fn next_dup(&mut self) -> PairResult { decode::(self.inner.next_dup()) } + /// Returns the last `value` of the current duplicate `key`. + fn last_dup(&mut self) -> ValueOnlyResult { + self.inner + .last_dup() + .map_err(|e| DatabaseError::Read(e.into()))? + .map(decode_one::) + .transpose() + } + /// Returns the next `(key, value)` pair skipping the duplicates. fn next_no_dup(&mut self) -> PairResult { decode::(self.inner.next_nodup()) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 3565d99d8d9..cb038d983df 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -817,7 +817,6 @@ mod tests { }; use revm_database::{BundleState, OriginalValuesKnown}; use std::{ - collections::BTreeMap, ops::{Bound, Range, RangeBounds}, sync::Arc, }; @@ -1355,8 +1354,8 @@ mod tests { let chain = Chain::new( vec![block_2], ExecutionOutcome::default(), - BTreeMap::new(), - BTreeMap::new(), + Default::default(), + Default::default(), ); let commit = CanonStateNotification::Commit { new: Arc::new(chain.clone()) }; in_memory_state.notify_canon_state(commit.clone()); @@ -1370,8 +1369,8 @@ mod tests { let new_chain = Chain::new( vec![block_3, block_4], ExecutionOutcome::default(), - BTreeMap::new(), - BTreeMap::new(), + Default::default(), + Default::default(), ); let re_org = CanonStateNotification::Reorg { old: Arc::new(chain), new: Arc::new(new_chain) }; diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index f515fc20f67..ac52ed8993c 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -538,9 +538,9 @@ pub struct TrieUpdatesSortedRef<'a> { pub struct TrieUpdatesSorted { /// Sorted collection of updated state nodes with corresponding paths. None indicates that a /// node was removed. - account_nodes: Vec<(Nibbles, Option)>, - /// Storage tries stored by hashed address of the account the trie belongs to. - storage_tries: B256Map, + pub account_nodes: Vec<(Nibbles, Option)>, + /// Map of hashed addresses to their storage tries stored by path. + pub storage_tries: B256Map, } impl TrieUpdatesSorted { diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index d3540adda88..b08d7d89b46 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -96,6 +96,13 @@ test-utils = [ "reth-trie-sparse/test-utils", "reth-stages-types/test-utils", ] +serde-bincode-compat = [ + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", +] [[bench]] name = "hash_post_state" diff --git a/deny.toml b/deny.toml index 98fc336b08b..de3de31e04a 100644 --- a/deny.toml +++ b/deny.toml @@ -8,10 +8,8 @@ ignore = [ "RUSTSEC-2024-0384", # https://rustsec.org/advisories/RUSTSEC-2024-0436 paste! is unmaintained "RUSTSEC-2024-0436", - # https://rustsec.org/advisories/RUSTSEC-2025-0141 bincode is unmaintained, need to transition all deps to wincode first + # https://rustsec.org/advisories/RUSTSEC-2025-0141 bincode is unmaintained, used by reth-nippy-jar "RUSTSEC-2025-0141", - # https://rustsec.org/advisories/RUSTSEC-2026-0002 lru unused directly: - "RUSTSEC-2026-0002", ] # This section is considered when running `cargo deny check bans`. @@ -57,7 +55,6 @@ allow = [ # https://github.com/rustls/webpki/blob/main/LICENSE ISC Style "LicenseRef-rustls-webpki", "CDLA-Permissive-2.0", - "MPL-2.0", ] # Allow 1 or more licenses on a per-crate basis, so that particular licenses @@ -66,7 +63,6 @@ exceptions = [ # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in https://github.com/paradigmxyz/reth/pull/6980 { allow = ["MPL-2.0"], name = "option-ext" }, - { allow = ["MPL-2.0"], name = "webpki-root-certs" }, ] [[licenses.clarify]] diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 882b1f292f0..7adcf9dc9ee 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -103,4 +103,8 @@ - [`op-reth p2p bootnode`](./op-reth/p2p/bootnode.mdx) - [`op-reth config`](./op-reth/config.mdx) - [`op-reth prune`](./op-reth/prune.mdx) - - [`op-reth re-execute`](./op-reth/re-execute.mdx) \ No newline at end of file + - [`op-reth re-execute`](./op-reth/re-execute.mdx) + - [`op-reth proofs`](./op-reth/proofs.mdx) + - [`op-reth proofs init`](./op-reth/proofs/init.mdx) + - [`op-reth proofs prune`](./op-reth/proofs/prune.mdx) + - [`op-reth proofs unwind`](./op-reth/proofs/unwind.mdx) \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth.mdx b/docs/vocs/docs/pages/cli/op-reth.mdx index d0bbd53e2b6..5a12c6ad375 100644 --- a/docs/vocs/docs/pages/cli/op-reth.mdx +++ b/docs/vocs/docs/pages/cli/op-reth.mdx @@ -21,6 +21,7 @@ Commands: config Write config to stdout prune Prune according to the configuration without any limits re-execute Re-execute blocks in parallel to verify historical sync correctness + proofs Manage storage of historical proofs in expanded trie db in fault proof window help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/op-reth/initialize-op-proofs.mdx b/docs/vocs/docs/pages/cli/op-reth/initialize-op-proofs.mdx new file mode 100644 index 00000000000..990db9a156c --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/initialize-op-proofs.mdx @@ -0,0 +1,247 @@ +# op-reth initialize-op-proofs + +Initializes the proofs storage with the current state of the chain + +```bash +$ op-reth initialize-op-proofs --help +``` +```txt +Usage: op-reth initialize-op-proofs [OPTIONS] --proofs-history.storage-path + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --proofs-history.storage-path + The path to the storage DB for proofs history. + + This should match the path used when starting the node with `--proofs-history.storage-path`. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 102e9c02fc7..28fdcea71ee 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -1065,6 +1065,37 @@ Rollup: When enabled, the flashblock consensus client will process flashblock sequences and submit them to the engine API to advance the chain. Requires `flashblocks_url` to be set. + --proofs-history + If true, initialize external-proofs exex to save and serve trie nodes to provide proofs faster + + --proofs-history.storage-path + The path to the storage DB for proofs history + + --proofs-history.window + The window to span blocks for proofs history. Value is the number of blocks. Default is 1 month of blocks based on 2 seconds block time. 30 * 24 * 60 * 60 / 2 = `1_296_000` + + [default: 1296000] + + --proofs-history.prune-interval + Interval between proof-storage prune runs. Accepts human-friendly durations like "100s", "5m", "1h". Defaults to 15s. + + - Shorter intervals prune smaller batches more often, so each prune run tends to be faster and the blocking pause for writes is shorter, at the cost of more frequent pauses. - Longer intervals prune larger batches less often, which reduces how often pruning runs, but each run can take longer and block writes for longer. + + A shorter interval is preferred so that prune runs stay small and don’t stall writes for too long. + + CLI: `--proofs-history.prune-interval 10m` + + [default: 15s] + + --proofs-history.verification-interval + Verification interval: perform full block execution every N blocks for data integrity. - 0: Disabled (Default) (always use fast path with pre-computed data from notifications) - 1: Always verify (always execute blocks, slowest) - N: Verify every Nth block (e.g., 100 = every 100 blocks) + + Periodic verification helps catch data corruption or consensus bugs while maintaining good performance. + + CLI: `--proofs-history.verification-interval 100` + + [default: 0] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/op-reth/proofs.mdx b/docs/vocs/docs/pages/cli/op-reth/proofs.mdx new file mode 100644 index 00000000000..81ba7efad99 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/proofs.mdx @@ -0,0 +1,144 @@ +# op-reth proofs + +Manage storage of historical proofs in expanded trie db in fault proof window + +```bash +$ op-reth proofs --help +``` +```txt +Usage: op-reth proofs [OPTIONS] + +Commands: + init Initialize the proofs storage with the current state of the chain + prune Prune old proof history to reclaim space + unwind Unwind the proofs storage to a specific block + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/proofs/init.mdx b/docs/vocs/docs/pages/cli/op-reth/proofs/init.mdx new file mode 100644 index 00000000000..9c27adab740 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/proofs/init.mdx @@ -0,0 +1,260 @@ +# op-reth proofs init + +Initialize the proofs storage with the current state of the chain + +```bash +$ op-reth proofs init --help +``` +```txt +Usage: op-reth proofs init [OPTIONS] --proofs-history.storage-path + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --datadir.pprof-dumps + The absolute path to store pprof dumps in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.blocks-per-file.account-change-sets + Number of blocks per file for the account changesets segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.account-change-sets + Store account changesets in static files. + + When enabled, account changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --proofs-history.storage-path + The path to the storage DB for proofs history. + + This should match the path used when starting the node with `--proofs-history.storage-path`. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/proofs/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/proofs/prune.mdx new file mode 100644 index 00000000000..26d45954fba --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/proofs/prune.mdx @@ -0,0 +1,268 @@ +# op-reth proofs prune + +Prune old proof history to reclaim space + +```bash +$ op-reth proofs prune --help +``` +```txt +Usage: op-reth proofs prune [OPTIONS] --proofs-history.storage-path + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --datadir.pprof-dumps + The absolute path to store pprof dumps in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.blocks-per-file.account-change-sets + Number of blocks per file for the account changesets segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.account-change-sets + Store account changesets in static files. + + When enabled, account changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --proofs-history.storage-path + The path to the storage DB for proofs history + + --proofs-history.window + The window to span blocks for proofs history. Value is the number of blocks. Default is 1 month of blocks based on 2 seconds block time. 30 * 24 * 60 * 60 / 2 = `1_296_000` + + [default: 1296000] + + --proofs-history.prune-batch-size + The batch size for pruning operations + + [default: 1000] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/proofs/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/proofs/unwind.mdx new file mode 100644 index 00000000000..1598fe4f50b --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/proofs/unwind.mdx @@ -0,0 +1,263 @@ +# op-reth proofs unwind + +Unwind the proofs storage to a specific block + +```bash +$ op-reth proofs unwind --help +``` +```txt +Usage: op-reth proofs unwind [OPTIONS] --proofs-history.storage-path --target + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --datadir.pprof-dumps + The absolute path to store pprof dumps in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.blocks-per-file.account-change-sets + Number of blocks per file for the account changesets segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.account-change-sets + Store account changesets in static files. + + When enabled, account changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --proofs-history.storage-path + The path to the storage DB for proofs history + + --target + The target block number to unwind to. + + All history *after* this block will be removed. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/prune-op-proofs.mdx b/docs/vocs/docs/pages/cli/op-reth/prune-op-proofs.mdx new file mode 100644 index 00000000000..1c0dddf9728 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/prune-op-proofs.mdx @@ -0,0 +1,255 @@ +# op-reth prune-op-proofs + +Prunes the proofs storage by removing old proof history and state updates + +```bash +$ op-reth prune-op-proofs --help +``` +```txt +Usage: op-reth prune-op-proofs [OPTIONS] --proofs-history.storage-path + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --proofs-history.storage-path + The path to the storage DB for proofs history + + --proofs-history.window + The window to span blocks for proofs history. Value is the number of blocks. Default is 1 month of blocks based on 2 seconds block time. 30 * 24 * 60 * 60 / 2 = `1_296_000` + + [default: 1296000] + + --proofs-history.prune-batch-size + The batch size for pruning operations + + [default: 1000] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/unwind-op-proofs.mdx b/docs/vocs/docs/pages/cli/op-reth/unwind-op-proofs.mdx new file mode 100644 index 00000000000..12cdeb21f4b --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/unwind-op-proofs.mdx @@ -0,0 +1,250 @@ +# op-reth unwind-op-proofs + +Unwinds the proofs storage to a specific block number + +```bash +$ op-reth unwind-op-proofs --help +``` +```txt +Usage: op-reth unwind-op-proofs [OPTIONS] --proofs-history.storage-path --target + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --proofs-history.storage-path + The path to the storage DB for proofs history + + --target + The target block number to unwind to. + + All history *after* this block will be removed. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts index ac5c356f5fc..0bedbb1b075 100644 --- a/docs/vocs/sidebar-cli-op-reth.ts +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -241,6 +241,25 @@ export const opRethCliSidebar: SidebarItem = { { text: "op-reth re-execute", link: "/cli/op-reth/re-execute" + }, + { + text: "op-reth proofs", + link: "/cli/op-reth/proofs", + collapsed: true, + items: [ + { + text: "op-reth proofs init", + link: "/cli/op-reth/proofs/init" + }, + { + text: "op-reth proofs prune", + link: "/cli/op-reth/proofs/prune" + }, + { + text: "op-reth proofs unwind", + link: "/cli/op-reth/proofs/unwind" + } + ] } ] }; diff --git a/etc/grafana/dashboards/op-proof-history.json b/etc/grafana/dashboards/op-proof-history.json new file mode 100644 index 00000000000..7d295851109 --- /dev/null +++ b/etc/grafana/dashboards/op-proof-history.json @@ -0,0 +1,2161 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + }, + { + "name": "DS_EXPRESSION", + "label": "Expression", + "description": "", + "type": "datasource", + "pluginId": "__expr__" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "datasource", + "id": "__expr__", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "12.3.0" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart", + "version": "12.3.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# External Proofs Storage Metrics\n\nThis dashboard monitors the performance of the External Proofs ExEx storage layer.\n\n## Key Concepts:\n\n- Block Processing: End-to-end time to process each block (execution + state root + writes)\n- Per-Item Metrics: Bulk write operations (store_*) report per-item latency, making metrics comparable regardless of batch size\n- Contexts: Operations are tagged by when they occur (execution, state_root, write, metadata)\n- Cursors: Iterator-like access patterns for reading trie and hashed state\n\n## What to Watch:\n\n- High write latencies during backfill indicate storage bottlenecks\n- Storage overhead % shows how much time is spent in DB vs EVM execution\n- Cursor operation latencies impact state root calculation performance", + "mode": "markdown" + }, + "pluginVersion": "12.3.0", + "title": "Dashboard Overview", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Shows the current height of the canonical chain vs the latest block processed by the ExEx.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 19, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_blockchain_tree_canonical_chain_height", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Canonical Chain Height", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_trie_block_latest_number", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "ExEx Latest Block Number", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "__expr__", + "uid": "${DS_EXPRESSION}" + }, + "expression": "$A - $B", + "hide": false, + "refId": "Syncing Lag", + "type": "math" + } + ], + "title": "ExEx Sync Status", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Shows the earliest and latest block numbers that are within the current proof window.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 12, + "y": 5 + }, + "id": 9, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_trie_block_earliest_number", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Earliest Block Number", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_trie_block_latest_number", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Latest Block Number", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Current Proof Window Range", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 2, + "panels": [], + "title": "Block Processing Overview - High Level Performance", + "type": "row" + }, + { + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 12 + }, + "id": 7, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "## 📈 Interpreting Block Metrics\n\nTotal Duration = Combined time for all phases\n\nExecution = EVM execution (non-storage)\n\nState Root = Trie calculation (uses cursors)\n\nWrite = Persisting trie updates to storage\n\nIf Write >> Execution, storage is the bottleneck!", + "mode": "markdown" + }, + "pluginVersion": "12.3.0", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current throughput: how many blocks per second are being processed. During backfill this should be high and steady. During live sync, this matches the chain's block production rate.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 12 + }, + "id": 5, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_trie_block_total_duration_seconds_count[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Throughput - Blocks/Second", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "What percentage of total time is spent in EVM execution (non-storage). Lower values mean storage is the bottleneck. Green is good (fast EVM), red means EVM overhead is high.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 12 + }, + "id": 6, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "100 - (100*rate(reth_optimism_trie_block_write_duration_seconds_sum[$__rate_interval]) / rate(reth_optimism_trie_block_total_duration_seconds_sum[$__rate_interval]))", + "hide": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "D" + } + ], + "title": "EVM Time % (vs Storage Overhead)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average time spent in each phase of block processing over X minutes. Total = Execution (EVM) + State Root (trie calculation) + Write (storage). This shows the overall performance and where time is spent.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Duration", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "D" + }, + "properties": [ + { + "id": "displayName", + "value": "Total" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 3, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_block_total_duration_seconds_sum[$__rate_interval]) / (rate(reth_optimism_trie_block_total_duration_seconds_count[$__rate_interval]))", + "hide": false, + "instant": false, + "legendFormat": "Total", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_block_execution_duration_seconds_sum[$__rate_interval]) / (rate(reth_optimism_trie_block_execution_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Execution", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_block_state_root_duration_seconds_sum[$__rate_interval]) / (rate(reth_optimism_trie_block_state_root_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_block_write_duration_seconds_sum[$__rate_interval]) / (rate(reth_optimism_trie_block_write_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Write", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Block Processing Duration - Average", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Rate of items written to storage per second during block processing. Shows the volume of updates: account trie branches, storage trie branches, hashed accounts, and hashed storage slots. High rates indicate heavy state changes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Items/sec", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_trie_block_account_trie_updates_written_total[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "Account Trie Updates", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_trie_block_storage_trie_updates_written_total[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Storage Trie Updates", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_trie_block_hashed_accounts_written_total[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Hashed Accounts", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_trie_block_hashed_storages_written_total[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Hashed Storages", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Items Written per Second - Volume of State Updates", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 8, + "panels": [], + "title": "Storage Operations - Detailed Operation Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "How frequently each storage operation is called. High store rates during backfill are normal. During live sync, you'll see more cursor operations for state root calculation.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Operations/sec", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 28 + }, + "id": 10, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(operation) (rate(reth_optimism_trie_storage_operation_duration_seconds_count{operation=~\"trie_cursor_current\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "trie_cursor_current", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(operation) (rate(reth_optimism_trie_storage_operation_duration_seconds_count{operation=~\"trie_cursor_next\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "trie_cursor_next", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(operation) (rate(reth_optimism_trie_storage_operation_duration_seconds_count{operation=~\"trie_cursor_seek\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "trie_cursor_seek", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(operation) (rate(reth_optimism_trie_storage_operation_duration_seconds_count{operation=~\"trie_cursor_seek_exact\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "trie_cursor_seek_exact", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(operation) (rate(reth_optimism_trie_storage_operation_duration_seconds_count{operation=~\"hashed_cursor_next\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "hashed_cursor_next", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(operation) (rate(reth_optimism_trie_storage_operation_duration_seconds_count{operation=~\"hashed_cursor_seek\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "hashed_cursor_seek", + "range": true, + "refId": "F", + "useBackend": false + } + ], + "title": "Storage Operations Frequency by Type (ops/sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "How much time each storage operation takes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 28 + }, + "id": 11, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_storage_operation_duration_seconds_sum{operation=\"trie_cursor_current\"}[$__rate_interval]) / (rate(reth_optimism_trie_storage_operation_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "trie_cursor_current", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_storage_operation_duration_seconds_sum{operation=\"trie_cursor_next\"}[$__rate_interval]) / (rate(reth_optimism_trie_storage_operation_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "trie_cursor_next", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_storage_operation_duration_seconds_sum{operation=\"trie_cursor_seek\"}[$__rate_interval]) / (rate(reth_optimism_trie_storage_operation_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "trie_cursor_seek", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_storage_operation_duration_seconds_sum{operation=\"trie_cursor_seek_exact\"}[$__rate_interval]) / (rate(reth_optimism_trie_storage_operation_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "trie_cursor_seek_exact", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_storage_operation_duration_seconds_sum{operation=\"hashed_cursor_next\"}[$__rate_interval]) / (rate(reth_optimism_trie_storage_operation_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "hashed_cursor_next", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_storage_operation_duration_seconds_sum{operation=\"hashed_cursor_seek\"}[$__rate_interval]) / (rate(reth_optimism_trie_storage_operation_duration_seconds_count[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "hashed_cursor_seek", + "range": true, + "refId": "F", + "useBackend": false + } + ], + "title": "Storage Operation Latency by Type - Average (1 min)", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 12, + "panels": [], + "title": "Database Stats", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Size of each tables used by proof history ExEx. It is given by num_pages * page_size.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 13, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "sort": "desc", + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_proof_storage_table_size", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "{{table}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Size of the Tables", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "For each table in proof history, this shows the number of rows. This does NOT count the number of `dup_sort` sub-table.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 14, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "sort": "desc", + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_proof_storage_table_entries", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "{{table}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Number of Entries ", + "type": "piechart" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 15, + "panels": [], + "title": "RPC Server", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "API metric for rate of successful requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "reqps" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "debug_executionWitness" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 46 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_rpc_eth_api_ext_get_proof_successful_responses[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "eth_getProof", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_rpc_debug_api_ext_successful_responses{api=\"debug_execute_payload\"}[$__rate_interval])", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "debug_executePayload", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_rpc_debug_api_ext_successful_responses{api=\"debug_execution_witness\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "debug_executionWitness", + "range": true, + "refId": "C" + } + ], + "title": "RPC API - Rate of Success", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "API metric for rate of failed requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "reqps" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "debug_executionWitness" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 46 + }, + "id": 24, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_optimism_rpc_eth_api_ext_get_proof_failures[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "eth_getProof", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_rpc_debug_api_ext_failures{api=\"debug_execute_payload\"}[$__rate_interval])", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "debug_executePayload", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_rpc_debug_api_ext_failures{api=\"debug_execution_witness\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "debug_executionWitness", + "range": true, + "refId": "C" + } + ], + "title": "RPC API - Rate of Failures", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "A time series on the latency of the API request", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "eth_getProof" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 46 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "desc" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "reth_optimism_rpc_eth_api_ext_get_proof_latency{quantile=\"1.0\"}", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "interval": "", + "legendFormat": "eth_getProof", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_optimism_rpc_debug_api_ext_latency{api=\"debug_execute_payload\", quantile=\"1.0\"}", + "hide": false, + "instant": false, + "legendFormat": "debug_executePayload", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_optimism_rpc_debug_api_ext_latency{api=\"debug_execution_witness\", quantile=\"1.0\"}", + "hide": false, + "instant": false, + "legendFormat": "debug_executionWitness", + "range": true, + "refId": "B" + } + ], + "title": "RPC API - Latency ", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 21, + "panels": [], + "title": "Prune Stats", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Shows different Prune Durations in a time series", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Duration", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "dtdurations" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 22, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_pruner_prune_duration_seconds_sum[$__rate_interval])/rate(reth_optimism_trie_pruner_prune_duration_seconds_count[$__rate_interval])", + "legendFormat": "Prune Duration", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_pruner_state_diff_fetch_duration_seconds_sum[$__rate_interval])/rate(reth_optimism_trie_pruner_state_diff_fetch_duration_seconds_count[$__rate_interval])\n", + "hide": false, + "instant": false, + "legendFormat": "State Diff Fetch Duration", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_optimism_trie_pruner_total_duration_seconds_sum[$__rate_interval])/rate(reth_optimism_trie_pruner_total_duration_seconds_count[$__rate_interval])\n", + "hide": false, + "instant": false, + "legendFormat": "Total Duration", + "range": true, + "refId": "C" + } + ], + "title": "Prune Duration Over Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Shows Prune in depth stats in time series", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Count", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 23, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_optimism_trie_pruner_pruned_blocks", + "legendFormat": "Pruned Blocks", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_optimism_trie_pruner_account_trie_updates_written", + "hide": false, + "instant": false, + "legendFormat": "Account Trie Nodes", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_optimism_trie_pruner_storage_trie_updates_written", + "hide": false, + "instant": false, + "legendFormat": "Storage Trie Nodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_optimism_trie_pruner_hashed_accounts_written", + "hide": false, + "instant": false, + "legendFormat": "Hashed Accounts", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_optimism_trie_pruner_hashed_storages_written", + "hide": false, + "instant": false, + "legendFormat": "Hashed Storage", + "range": true, + "refId": "E" + } + ], + "title": "Prune State Over Time", + "type": "timeseries" + } + ], + "preload": false, + "refresh": "", + "schemaVersion": 42, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "OP Proofs History", + "uid": "af5do16b25ptsaa", + "version": 2, + "weekStart": "" +} \ No newline at end of file diff --git a/etc/grafana/dashboards/op-reth.json b/etc/grafana/dashboards/op-reth.json new file mode 100644 index 00000000000..755769a0c13 --- /dev/null +++ b/etc/grafana/dashboards/op-reth.json @@ -0,0 +1,1046 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "12.2.1" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Optimism reth metrics", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 100, + "panels": [], + "title": "RPC Server", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Maximum DA transaction size configured on the miner", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 101, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {}, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_optimism_rpc_miner_max_da_tx_size{instance=~\"$instance\"}", + "legendFormat": "Max DA TX Size", + "range": true, + "refId": "A" + } + ], + "title": "Miner Max DA Transaction Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Maximum DA block size configured on the miner", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 102, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {}, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_optimism_rpc_miner_max_da_block_size{instance=~\"$instance\"}", + "legendFormat": "Max DA Block Size", + "range": true, + "refId": "A" + } + ], + "title": "Miner Max DA Block Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Latency histogram for forwarding a transaction to the Sequencer", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 210, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_rpc_sequencer_sequencer_forward_latency{instance=~\"$instance\", quantile=\"0\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "min", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_rpc_sequencer_sequencer_forward_latency{instance=~\"$instance\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p50", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_rpc_sequencer_sequencer_forward_latency{instance=~\"$instance\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_rpc_sequencer_sequencer_forward_latency{instance=~\"$instance\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p95", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_optimism_rpc_sequencer_sequencer_forward_latency{instance=~\"$instance\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p99", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Sequencer Transaction Forward Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "API metric for rate of failed requests per min", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "op-reth" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 9 + }, + "id": 302, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_rpc_eth_api_ext_get_proof_failures{instance=~\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "{{client_name}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "eth_getProof - Rate of Failures", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "API metric for rate of successful requests per min.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 6, + "y": 9 + }, + "id": 303, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_optimism_rpc_eth_api_ext_get_proof_successful_responses{instance=~\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "{{client_name}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "eth_getProof - Rate of Success", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "A time series on the latency of the API request", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 304, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_optimism_rpc_eth_api_ext_get_proof_latency{quantile=\"0.99\", instance=~\"$instance\"}", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "eth_getProof - Latency (p99)", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 300, + "panels": [], + "title": "Transaction Pool", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of conditional transactions removed from the pool per second due to exceeded block attributes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 301, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_transaction_pool_removed_tx_conditional{instance=~\"$instance\"}[$__rate_interval])", + "legendFormat": "Removed transactions", + "range": true, + "refId": "A" + } + ], + "title": "Conditional Transactions Removed", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 305, + "panels": [], + "title": "Payload Builder", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Length of a completed flashblock sequence per block.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 8 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 211, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_flashblock_service_last_flashblock_length{instance=~\"$instance\", quantile=\"0\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "min", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_flashblock_service_last_flashblock_length{instance=~\"$instance\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p50", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_flashblock_service_last_flashblock_length{instance=~\"$instance\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_flashblock_service_last_flashblock_length{instance=~\"$instance\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p95", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_flashblock_service_last_flashblock_length{instance=~\"$instance\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "p99", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Flashblock Sequence Length", + "type": "timeseries" + } + ], + "refresh": "5s", + "tags": [], + "schemaVersion": 42, + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "includeAll": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "type": "query" + } + ] + }, + "time": { + "from": "now-12h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "OP-Reth", + "uid": "8438c957-55f5-44df-869d-a9a30a3c9a97", + "version": 1, + "weekStart": "", + "id": null +} \ No newline at end of file diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index a26ce1594a2..00f2b9a241e 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -51,7 +51,7 @@ use reth_ethereum::{ EthEvmConfig, EthereumEthApiBuilder, }, pool::{PoolTransaction, TransactionPool}, - primitives::{Block, SealedBlock}, + primitives::{Block, BlockTy, SealedBlock}, provider::{EthStorage, StateProviderFactory}, rpc::types::engine::ExecutionPayload, tasks::TaskManager, @@ -154,8 +154,8 @@ impl PayloadTypes for CustomEngineTypes { fn block_to_payload( block: SealedBlock< - <::Primitives as reth_ethereum::node::api::NodePrimitives>::Block, - >, + BlockTy<::Primitives>, + >, ) -> ExecutionData { let (payload, sidecar) = ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index d6d363db356..be884ddc76b 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -12,17 +12,20 @@ use reth_ethereum::{ node::api::{ validate_version_specific_fields, AddOnsContext, BuiltPayload, BuiltPayloadExecutedBlock, EngineApiMessageVersion, EngineObjectValidationError, ExecutionPayload, FullNodeComponents, - NewPayloadError, NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, - PayloadOrAttributes, PayloadTypes, PayloadValidator, + NewPayloadError, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, + PayloadTypes, PayloadValidator, }, primitives::SealedBlock, storage::StateProviderFactory, trie::{KeccakKeyHasher, KeyHasher}, }; use reth_node_builder::{rpc::PayloadValidatorBuilder, InvalidPayloadAttributesError}; -use reth_op::node::{ - engine::OpEngineValidator, payload::OpAttributes, OpBuiltPayload, OpEngineTypes, - OpPayloadAttributes, OpPayloadBuilderAttributes, +use reth_op::{ + node::{ + engine::OpEngineValidator, payload::OpAttributes, OpBuiltPayload, OpEngineTypes, + OpPayloadAttributes, OpPayloadBuilderAttributes, + }, + primitives::{BlockTy, NodePrimitives}, }; use revm_primitives::U256; use serde::{Deserialize, Serialize}; @@ -178,7 +181,7 @@ pub struct CustomBuiltPayload(pub OpBuiltPayload); impl BuiltPayload for CustomBuiltPayload { type Primitives = CustomNodePrimitives; - fn block(&self) -> &SealedBlock<::Block> { + fn block(&self) -> &SealedBlock> { self.0.block() } @@ -210,9 +213,7 @@ impl PayloadTypes for CustomPayloadTypes { type PayloadBuilderAttributes = CustomPayloadBuilderAttributes; fn block_to_payload( - block: SealedBlock< - <::Primitives as NodePrimitives>::Block, - >, + block: SealedBlock::Primitives>>, ) -> Self::ExecutionData { let extension = block.header().extension; let block_hash = block.hash(); diff --git a/mise.toml b/mise.toml new file mode 100644 index 00000000000..f5e9615a01f --- /dev/null +++ b/mise.toml @@ -0,0 +1,2 @@ +[tools] +"ubi:kurtosis-tech/kurtosis-cli-release-artifacts[exe=kurtosis]" = "1.8.1"