Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 1 addition & 19 deletions crates/optimism/exex/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,7 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]

use alloy_consensus::{
private::alloy_primitives::{Address, B256},
BlockHeader,
};
use alloy_consensus::BlockHeader;
use alloy_eips::eip1898::BlockWithParent;
use derive_more::Constructor;
use futures_util::TryStreamExt;
Expand Down Expand Up @@ -211,11 +208,9 @@ where

match &notification {
ExExNotification::ChainCommitted { new } => {
self.store_address_mappings(new.clone()).await?;
self.handle_chain_committed(new.clone(), latest_stored, collector).await?
}
ExExNotification::ChainReorged { old, new } => {
self.store_address_mappings(new.clone()).await?;
self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector)
.await?
}
Expand Down Expand Up @@ -405,17 +400,4 @@ where
collector.unwind_history(old.first().block_with_parent()).await?;
Ok(())
}

async fn store_address_mappings(&self, chain: Arc<Chain<Primitives>>) -> eyre::Result<()> {
let mappings: Vec<(B256, Address)> = chain
.execution_outcome()
.accounts_iter()
.filter_map(|(address, info)| info.map(|info| (info.code_hash, address)))
.collect();

self.storage
.store_address_mappings(mappings)
.await
.map_err(|err| eyre::eyre!("Failed to store address mappings: {}", err))
}
}
8 changes: 1 addition & 7 deletions crates/optimism/trie/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use crate::OpProofsStorageResult;
use alloy_eips::eip1898::BlockWithParent;
use alloy_primitives::{map::HashMap, Address, B256, U256};
use alloy_primitives::{map::HashMap, B256, U256};
use auto_impl::auto_impl;
use derive_more::{AddAssign, Constructor};
use reth_primitives_traits::Account;
Expand Down Expand Up @@ -110,12 +110,6 @@ pub trait OpProofsStore: Send + Sync + Debug {
storages: Vec<(B256, U256)>,
) -> impl Future<Output = OpProofsStorageResult<()>> + Send;

/// Store a batch of address mappings from hashed to original addresses.
fn store_address_mappings(
&self,
mappings: Vec<(B256, Address)>,
) -> impl Future<Output = OpProofsStorageResult<()>> + Send;

/// Get the earliest block number and hash that has been stored
///
/// This is used to determine the block number of trie nodes with block number 0.
Expand Down
56 changes: 1 addition & 55 deletions crates/optimism/trie/src/backfill.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! Backfill job for proofs storage. Handles storing the existing state into the proofs storage.

use crate::{OpProofsStorageError, OpProofsStore};
use alloy_primitives::{keccak256, Address, B256};
use alloy_primitives::B256;
use reth_db::{
cursor::{DbCursorRO, DbDupCursorRO},
tables,
Expand Down Expand Up @@ -82,7 +82,6 @@ macro_rules! define_dup_cursor_iter {

// Generate iterators for all 4 table types
define_simple_cursor_iter!(HashedAccountsIter, tables::HashedAccounts, B256, Account);
define_simple_cursor_iter!(AddressLookupIter, tables::PlainAccountState, Address, Account);
define_dup_cursor_iter!(HashedStoragesIter, tables::HashedStorages, B256, StorageEntry);
define_simple_cursor_iter!(
AccountsTrieIter,
Expand Down Expand Up @@ -267,30 +266,6 @@ impl<'a, Tx: DbTx, S: OpProofsStore + Send> BackfillJob<'a, Tx, S> {
Ok(())
}

/// Backfill address mappings data
async fn backfill_address_mappings(&self) -> Result<(), OpProofsStorageError> {
let start_cursor = self.tx.cursor_read::<tables::PlainAccountState>()?;

let source = AddressLookupIter::new(start_cursor)
.map(|res| res.map(|(addr, _)| (keccak256(addr), addr)));
let storage = &self.storage;
let save_fn = async |entries: Vec<(B256, Address)>| -> Result<(), OpProofsStorageError> {
storage.store_address_mappings(entries).await?;
Ok(())
};

backfill(
"address mappings",
source,
BACKFILL_STORAGE_THRESHOLD,
BACKFILL_LOG_THRESHOLD,
save_fn,
)
.await?;

Ok(())
}

/// Backfill accounts trie data
async fn backfill_accounts_trie(&self) -> Result<(), OpProofsStorageError> {
let start_cursor = self.tx.cursor_read::<tables::AccountsTrie>()?;
Expand Down Expand Up @@ -359,7 +334,6 @@ impl<'a, Tx: DbTx, S: OpProofsStore + Send> BackfillJob<'a, Tx, S> {
async fn backfill_trie(&self) -> Result<(), OpProofsStorageError> {
self.backfill_hashed_accounts().await?;
self.backfill_hashed_storages().await?;
self.backfill_address_mappings().await?;
self.backfill_storages_trie().await?;
self.backfill_accounts_trie().await?;

Expand Down Expand Up @@ -552,34 +526,6 @@ mod tests {
assert_eq!(count, 3);
}

#[tokio::test]
async fn test_backfill_address_mappings() {
let db = create_test_rw_db();
let storage = InMemoryProofsStorage::new();

// Insert test address mappings into database
let tx = db.tx_mut().unwrap();
let mut cursor = tx.cursor_write::<tables::PlainAccountState>().unwrap();

let accounts = vec![
(Address::repeat_byte(0x01), Account { nonce: 1, ..Default::default() }),
(Address::repeat_byte(0x02), Account { nonce: 2, ..Default::default() }),
];

for (addr, account) in &accounts {
cursor.append(*addr, account).unwrap();
}
drop(cursor);
tx.commit().unwrap();

// Run backfill
let tx = db.tx().unwrap();
let job = BackfillJob::new(storage.clone(), &tx);
job.backfill_address_mappings().await.unwrap();

// Todo: Verify data was stored
}

#[tokio::test]
async fn test_backfill_storages_trie() {
let db = create_test_rw_db();
Expand Down
8 changes: 1 addition & 7 deletions crates/optimism/trie/src/db/models/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pub(crate) mod kv;
pub use change_set::*;
pub use kv::*;

use alloy_primitives::{Address, B256};
use alloy_primitives::B256;
use reth_db::{
table::{DupSort, TableInfo},
tables, TableSet, TableType, TableViewer,
Expand Down Expand Up @@ -81,10 +81,4 @@ tables! {
type Key = u64; // Block number
type Value = ChangeSet;
}

/// A mapping table from hashed addresses to their original addresses.
table AddressLookup {
type Key = B256;
type Value = Address;
}
}
191 changes: 5 additions & 186 deletions crates/optimism/trie/src/db/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@ use crate::{
db::{
cursor::Dup,
models::{
kv::IntoKV, AccountTrieHistory, AddressLookup, BlockChangeSet, ChangeSet,
HashedAccountHistory, HashedStorageHistory, HashedStorageKey, MaybeDeleted,
StorageTrieHistory, StorageTrieKey, StorageValue, VersionedValue,
kv::IntoKV, AccountTrieHistory, BlockChangeSet, ChangeSet, HashedAccountHistory,
HashedStorageHistory, HashedStorageKey, MaybeDeleted, StorageTrieHistory,
StorageTrieKey, StorageValue, VersionedValue,
},
MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor,
},
BlockStateDiff, OpProofsStorageError, OpProofsStorageResult, OpProofsStore,
};
use alloy_eips::{eip1898::BlockWithParent, NumHash};
use alloy_primitives::{map::HashMap, Address, B256, U256};
use alloy_primitives::{map::HashMap, B256, U256};
#[cfg(feature = "metrics")]
use metrics::{gauge, Label};
use reth_db::{
Expand Down Expand Up @@ -519,27 +519,6 @@ impl OpProofsStore for MdbxProofsStorage {
})?
}

async fn store_address_mappings(
&self,
mappings: Vec<(B256, Address)>,
) -> OpProofsStorageResult<()> {
let mut mappings = mappings;
if mappings.is_empty() {
return Ok(());
}

// sort the mappings by key to ensure insertion is efficient
mappings.sort_by_key(|(key, _)| *key);

self.env.update(|tx| {
let mut cur = tx.cursor_write::<AddressLookup>()?;
for (k, v) in mappings {
cur.upsert(k, &v)?;
}
Ok(())
})?
}

async fn get_earliest_block_number(&self) -> OpProofsStorageResult<Option<(u64, B256)>> {
self.env.view(|tx| self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock))?
}
Expand Down Expand Up @@ -927,7 +906,7 @@ mod tests {
StorageTrieKey,
};
use alloy_eips::NumHash;
use alloy_primitives::{keccak256, B256};
use alloy_primitives::B256;
use reth_db::{
cursor::DbDupCursorRO,
transaction::{DbTx, DbTxMut},
Expand Down Expand Up @@ -1348,166 +1327,6 @@ mod tests {
}
}

#[tokio::test]
async fn test_store_address_mappings() {
let dir = TempDir::new().unwrap();
let store = MdbxProofsStorage::new(dir.path()).expect("env");

let a1 = Address::random();
let h1 = keccak256(a1);
let a2 = Address::random();
let h2 = keccak256(a2);
let a3 = Address::random();
let h3 = keccak256(a3);

// Input is unsorted to verify the method sorts them before appending
// (MDBX append requires sorted keys)
let mappings = vec![(h3, a3), (h1, a1), (h2, a2)];

store.store_address_mappings(mappings).await.expect("store");

let tx = store.env.tx().expect("ro tx");
let mut cur = tx.cursor_read::<AddressLookup>().expect("cursor");

// Verify h1
let v1 = cur.seek_exact(h1).expect("seek").expect("exists");
assert_eq!(v1, (h1, a1));

// Verify h2
let v2 = cur.seek_exact(h2).expect("seek").expect("exists");
assert_eq!(v2, (h2, a2));

// Verify h3
let v3 = cur.seek_exact(h3).expect("seek").expect("exists");
assert_eq!(v3, (h3, a3));
}

#[tokio::test]
async fn test_store_address_mappings_with_existing_entries() {
let dir = TempDir::new().unwrap();
let store = MdbxProofsStorage::new(dir.path()).expect("env");

// --- existing entries in db (high keys) ---
let a200 = Address::repeat_byte(200);
let a201 = Address::repeat_byte(201);
let a202 = Address::repeat_byte(202);

let mut k200 = [0u8; 32];
k200[31] = 200;
let k200 = B256::from(k200);

let mut k201 = [0u8; 32];
k201[31] = 201;
let k201 = B256::from(k201);

let mut k202 = [0u8; 32];
k202[31] = 202;
let k202 = B256::from(k202);

store
.store_address_mappings(vec![(k200, a200), (k201, a201), (k202, a202)])
.await
.expect("store existing");

// --- add more entries (contains smaller keys than existing max, and is unsorted) ---
let a9 = Address::repeat_byte(9);
let a10 = Address::repeat_byte(10);
let a203 = Address::repeat_byte(203);

let mut k9 = [0u8; 32];
k9[31] = 9;
let k9 = B256::from(k9);

let mut k10 = [0u8; 32];
k10[31] = 10;
let k10 = B256::from(k10);

let mut k203 = [0u8; 32];
k203[31] = 203;
let k203 = B256::from(k203);

// Unsorted on purpose
let new_mappings = vec![(k10, a10), (k203, a203), (k9, a9)];

// Requirement: should not fail even if new keys are not guaranteed > existing ones
store.store_address_mappings(new_mappings).await.expect("store new");

// --- verify all keys exist ---
let tx = store.env.tx().expect("ro tx");
let mut cur = tx.cursor_read::<AddressLookup>().expect("cursor");

let v9 = cur.seek_exact(k9).expect("seek").expect("exists");
assert_eq!(v9, (k9, a9));

let v10 = cur.seek_exact(k10).expect("seek").expect("exists");
assert_eq!(v10, (k10, a10));

let v200 = cur.seek_exact(k200).expect("seek").expect("exists");
assert_eq!(v200, (k200, a200));

let v201 = cur.seek_exact(k201).expect("seek").expect("exists");
assert_eq!(v201, (k201, a201));

let v202 = cur.seek_exact(k202).expect("seek").expect("exists");
assert_eq!(v202, (k202, a202));

let v203 = cur.seek_exact(k203).expect("seek").expect("exists");
assert_eq!(v203, (k203, a203));
}

#[tokio::test]
async fn test_store_address_mappings_idempotent() {
let dir = TempDir::new().unwrap();
let store = MdbxProofsStorage::new(dir.path()).expect("env");

let a1 = Address::repeat_byte(1);
let a2 = Address::repeat_byte(2);
let a3 = Address::repeat_byte(3);

let mut k1 = [0u8; 32];
k1[31] = 1;
let k1 = B256::from(k1);

let mut k2 = [0u8; 32];
k2[31] = 2;
let k2 = B256::from(k2);

let mut k3 = [0u8; 32];
k3[31] = 3;
let k3 = B256::from(k3);

let mappings = vec![(k3, a3), (k1, a1), (k2, a2)]; // unsorted on purpose

// First insert
store.store_address_mappings(mappings.clone()).await.expect("store first");

// Re-add exact same entries
store.store_address_mappings(mappings).await.expect("store second");

// Verify values are correct
let tx = store.env.tx().expect("ro tx");
let mut cur = tx.cursor_read::<AddressLookup>().expect("cursor");

let v1 = cur.seek_exact(k1).expect("seek").expect("exists");
assert_eq!(v1, (k1, a1));

let v2 = cur.seek_exact(k2).expect("seek").expect("exists");
assert_eq!(v2, (k2, a2));

let v3 = cur.seek_exact(k3).expect("seek").expect("exists");
assert_eq!(v3, (k3, a3));

// Ensure no duplicates were created: table length should be exactly 3.
let mut count = 0usize;
if let Some(_first) = cur.first().expect("first") {
count += 1;
while let Some(_next) = cur.next().expect("next") {
count += 1;
}
}
assert_eq!(count, 3, "re-adding same mappings should not create duplicate rows");
}

#[tokio::test]
async fn test_store_trie_updates_comprehensive() {
let dir = TempDir::new().unwrap();
Expand Down
Loading
Loading