diff --git a/.github/workflows/pr-main_l2.yaml b/.github/workflows/pr-main_l2.yaml index 0712a5d0172..e0557cad6c7 100644 --- a/.github/workflows/pr-main_l2.yaml +++ b/.github/workflows/pr-main_l2.yaml @@ -138,7 +138,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} tags: ethrex:main-l2 artifact_path: ethrex_image_l2.tar - build_args: BUILD_FLAGS=--features l2 + build_args: BUILD_FLAGS=--features l2,l2-sql - name: Upload artifacts uses: actions/upload-artifact@v4 diff --git a/cmd/ethrex/l2/command.rs b/cmd/ethrex/l2/command.rs index 757b39d9a4e..1ce205a6e97 100644 --- a/cmd/ethrex/l2/command.rs +++ b/cmd/ethrex/l2/command.rs @@ -8,15 +8,21 @@ use crate::{ }, utils::{self, default_datadir, init_datadir, parse_private_key}, }; +use bytes::Bytes; use clap::{FromArgMatches, Parser, Subcommand}; -use ethrex_common::utils::keccak; +use ethrex_blockchain::{ + Blockchain, BlockchainOptions, BlockchainType, L2Config, fork_choice::apply_fork_choice, +}; use ethrex_common::{ - Address, H256, U256, - types::{BYTES_PER_BLOB, BlobsBundle, BlockHeader, batch::Batch, bytes_from_blob}, + Address, U256, + types::{BYTES_PER_BLOB, Block, blobs_bundle, bytes_from_blob, fee_config::FeeConfig}, }; +use ethrex_common::{types::BlobsBundle, utils::keccak}; use ethrex_config::networks::Network; -use ethrex_l2_common::{calldata::Value, l1_messages::get_l1_message_hash, state_diff::StateDiff}; +use ethrex_l2::utils::state_reconstruct::get_batch; +use ethrex_l2_common::calldata::Value; use ethrex_l2_sdk::call_contract; +use ethrex_rlp::decode::RLPDecode as _; use ethrex_rpc::{ EthClient, clients::beacon::BeaconClient, types::block_identifier::BlockIdentifier, }; @@ -138,8 +144,12 @@ pub enum Command { blobs_dir: PathBuf, #[arg(short = 's', long, help = "The path to the store.")] store_path: PathBuf, - #[arg(short = 'c', long, help = "Address of the L2 proposer coinbase")] - coinbase: Address, + #[arg( + short = 'o', + long, + help = "Whether Osaka fork is activated or not. If None, it assumes it is active." + )] + osaka_activated: Option, }, #[command(about = "Reverts unverified batches.")] RevertBatch { @@ -370,7 +380,7 @@ impl Command { genesis, blobs_dir, store_path, - coinbase, + osaka_activated, } => { #[cfg(feature = "rocksdb")] let store_type = EngineType::RocksDB; @@ -389,21 +399,13 @@ impl Command { .await?; let rollup_store = - StoreRollup::new(&store_path.join("./rollup_store"), rollup_store_type)?; + StoreRollup::new(&store_path.join("rollup_store"), rollup_store_type)?; rollup_store .init() .await .map_err(|e| format!("Failed to init rollup store: {e}")) .unwrap(); - // Get genesis - let genesis_header = store.get_block_header(0)?.expect("Genesis block not found"); - - let mut current_state_root = genesis_header.state_root; - - let mut last_block_number = 0; - let mut new_canonical_blocks = vec![]; - // Iterate over each blob let files: Vec = read_dir(blobs_dir)?.try_collect()?; for (file_number, file) in files @@ -418,100 +420,116 @@ impl Command { panic!("Invalid blob size"); } - // Decode state diff from blob let blob = bytes_from_blob(blob.into()); - let state_diff = StateDiff::decode(&blob)?; - // Apply all account updates to trie - let mut trie = store.open_direct_state_trie(current_state_root)?; + // Decode blocks + let blocks_count = u64::from_be_bytes( + blob[0..8].try_into().expect("Failed to get blob length"), + ); - let account_updates = state_diff.to_account_updates(&trie)?; + let mut buf = &blob[8..]; + let mut blocks = Vec::new(); + for _ in 0..blocks_count { + let (item, rest) = Block::decode_unfinished(buf)?; + blocks.push(item); + buf = rest; + } - let account_updates_list = store - .apply_account_updates_from_trie_batch(&mut trie, account_updates.values()) - .map_err(|e| format!("Error applying account updates: {e}")) - .unwrap(); + // Decode fee configs + let mut fee_configs = Vec::new(); - store - .open_direct_state_trie(current_state_root)? - .db() - .put_batch(account_updates_list.state_updates)?; + for _ in 0..blocks_count { + let (consumed, fee_config) = FeeConfig::decode(buf)?; + fee_configs.push(fee_config); + buf = &buf[consumed..]; + } - current_state_root = account_updates_list.state_trie_hash; + // Create blockchain to execute blocks + let blockchain_type = + ethrex_blockchain::BlockchainType::L2(L2Config::default()); + let opts = BlockchainOptions { + r#type: blockchain_type, + ..Default::default() + }; + let blockchain = Blockchain::new(store.clone(), opts); - store - .write_storage_trie_nodes_batch(account_updates_list.storage_updates) - .await?; + for (i, block) in blocks.iter().enumerate() { + // Update blockchain with the block's fee config + let fee_config = fee_configs + .get(i) + .cloned() + .ok_or_eyre("Fee config not found for block")?; - store - .write_account_code_batch(account_updates_list.code_updates) - .await?; + let BlockchainType::L2(l2_config) = &blockchain.options.r#type else { + panic!("Invalid blockchain type. Expected L2."); + }; - // Get withdrawal hashes - let message_hashes = state_diff - .l1_messages - .iter() - .map(get_l1_message_hash) - .collect(); - - // Get the first block of the batch - let first_block_number = last_block_number + 1; - - // Build the header of the last block. - // Note that its state_root is the root of new_trie. - let new_block = BlockHeader { - coinbase, - state_root: account_updates_list.state_trie_hash, - ..state_diff.last_header - }; + { + let Ok(mut fee_config_guard) = l2_config.fee_config.write() else { + panic!("Fee config lock was poisoned."); + }; - // Store last block. - let new_block_hash = new_block.hash(); - store - .add_block_header(new_block_hash, new_block.clone()) - .await?; - store - .add_block_number(new_block_hash, state_diff.last_header.number) - .await?; - new_canonical_blocks.push((state_diff.last_header.number, new_block_hash)); - println!( - "Stored last block of blob. Block {}. State root {}", - new_block.number, new_block.state_root - ); + *fee_config_guard = fee_config; + } + + // Execute block + blockchain.add_block(block.clone())?; + + // Add fee config to rollup store + rollup_store + .store_fee_config_by_block(block.header.number, fee_config) + .await?; + + info!( + "Added block {} with hash {:#x}", + block.header.number, + block.hash(), + ); + } + // Apply fork choice + let latest_hash_on_batch = blocks.last().ok_or_eyre("Batch is empty")?.hash(); + apply_fork_choice( + &store, + latest_hash_on_batch, + latest_hash_on_batch, + latest_hash_on_batch, + ) + .await?; + + // Prepare batch sealing + let blob = blobs_bundle::blob_from_bytes(Bytes::copy_from_slice(&blob)) + .expect("Failed to create blob from bytes; blob was just read from file"); - last_block_number = new_block.number; - - let batch = Batch { - number: batch_number, - first_block: first_block_number, - last_block: new_block.number, - state_root: new_block.state_root, - privileged_transactions_hash: H256::zero(), - message_hashes, - blobs_bundle: BlobsBundle::empty(), - commit_tx: None, - verify_tx: None, + let wrapper_version = if let Some(activated) = osaka_activated + && !activated + { + None + } else { + Some(1) }; - // Store batch info in L2 storage - rollup_store - .seal_batch(batch) - .await - .map_err(|e| format!("Error storing batch: {e}")) - .unwrap(); - } - let Some((last_number, last_hash)) = new_canonical_blocks.pop() else { - return Err(eyre::eyre!("No blocks found in blobs directory")); - }; - store - .forkchoice_update( - Some(new_canonical_blocks), - last_number, - last_hash, - None, + let blobs_bundle = + BlobsBundle::create_from_blobs(&vec![blob], wrapper_version)?; + + let batch = get_batch( + &store, + &blocks, + U256::from(batch_number), None, + blobs_bundle, ) .await?; + + // Seal batch + rollup_store.seal_batch(batch).await?; + + // Create checkpoint + let checkpoint_path = + store_path.join(format!("checkpoint_batch_{batch_number}")); + store.create_checkpoint(&checkpoint_path).await?; + + info!("Sealed batch {batch_number}."); + } } Command::RevertBatch { batch, diff --git a/cmd/ethrex/l2/deployer.rs b/cmd/ethrex/l2/deployer.rs index 7cd667281fd..475687aee39 100644 --- a/cmd/ethrex/l2/deployer.rs +++ b/cmd/ethrex/l2/deployer.rs @@ -256,7 +256,7 @@ pub struct DeployerOptions { env = "ETHREX_L2_VALIDIUM", action = ArgAction::Set, help_heading = "Deployer options", - help = "If true, L2 will run on validium mode as opposed to the default rollup mode, meaning it will not publish state diffs to the L1." + help = "If true, L2 will run on validium mode as opposed to the default rollup mode, meaning it will not publish blobs to the L1." )] pub validium: bool, #[arg( diff --git a/cmd/ethrex/l2/options.rs b/cmd/ethrex/l2/options.rs index 02446ca3c1a..f7b53ed2ae3 100644 --- a/cmd/ethrex/l2/options.rs +++ b/cmd/ethrex/l2/options.rs @@ -86,7 +86,7 @@ pub struct SequencerOptions { value_name = "BOOLEAN", env = "ETHREX_L2_VALIDIUM", help_heading = "L2 options", - long_help = "If true, L2 will run on validium mode as opposed to the default rollup mode, meaning it will not publish state diffs to the L1." + long_help = "If true, L2 will run on validium mode as opposed to the default rollup mode, meaning it will not publish blobs to the L1." )] pub validium: bool, #[clap( diff --git a/crates/common/types/l2.rs b/crates/common/types/l2.rs index 59f559d0906..d1764247099 100644 --- a/crates/common/types/l2.rs +++ b/crates/common/types/l2.rs @@ -1,3 +1,2 @@ -pub mod account_diff; pub mod batch; pub mod fee_config; diff --git a/crates/common/types/l2/account_diff.rs b/crates/common/types/l2/account_diff.rs deleted file mode 100644 index f7589a60eac..00000000000 --- a/crates/common/types/l2/account_diff.rs +++ /dev/null @@ -1,323 +0,0 @@ -// This file needs to be accessible from both the `vm` and `L2` crates. - -use bytes::Bytes; -use ethereum_types::{Address, H256, U256}; -use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, HashMap}; -use tracing::debug; - -#[derive(Debug, thiserror::Error)] -pub enum DecoderError { - #[error("Decoder failed to deserialize: {0}")] - FailedToDeserialize(String), - #[error("StateDiff failed to deserialize: {0}")] - FailedToDeserializeStateDiff(String), -} - -#[derive(Debug, thiserror::Error)] -pub enum AccountDiffError { - #[error("StateDiff invalid account state diff type: {0}")] - InvalidAccountStateDiffType(u8), - #[error("Both bytecode and bytecode hash are set")] - BytecodeAndBytecodeHashSet, - #[error("The length of the vector is too big to fit in u16: {0}")] - LengthTooBig(#[from] core::num::TryFromIntError), - #[error("Empty account diff")] - EmptyAccountDiff, -} - -#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)] -pub struct AccountStateDiff { - pub new_balance: Option, - pub nonce_diff: u16, - pub storage: BTreeMap, - pub bytecode: Option, - pub bytecode_hash: Option, -} - -#[derive(Debug, Clone, Copy)] -pub enum AccountStateDiffType { - NewBalance = 1, - NonceDiff = 2, - Storage = 4, - Bytecode = 8, - BytecodeHash = 16, -} - -impl TryFrom for AccountStateDiffType { - type Error = AccountDiffError; - - fn try_from(value: u8) -> Result { - match value { - 1 => Ok(AccountStateDiffType::NewBalance), - 2 => Ok(AccountStateDiffType::NonceDiff), - 4 => Ok(AccountStateDiffType::Storage), - 8 => Ok(AccountStateDiffType::Bytecode), - 16 => Ok(AccountStateDiffType::BytecodeHash), - _ => Err(AccountDiffError::InvalidAccountStateDiffType(value)), - } - } -} - -impl From for u8 { - fn from(value: AccountStateDiffType) -> Self { - match value { - AccountStateDiffType::NewBalance => 1, - AccountStateDiffType::NonceDiff => 2, - AccountStateDiffType::Storage => 4, - AccountStateDiffType::Bytecode => 8, - AccountStateDiffType::BytecodeHash => 16, - } - } -} - -impl AccountStateDiffType { - // Checks if the type is present in the given value - pub fn is_in(&self, value: u8) -> bool { - value & u8::from(*self) == u8::from(*self) - } -} - -pub fn get_accounts_diff_size( - account_diffs: &HashMap, -) -> Result { - let mut new_accounts_diff_size = 0; - - for (address, diff) in account_diffs.iter() { - let encoded = match diff.encode(address) { - Ok(encoded) => encoded, - Err(AccountDiffError::EmptyAccountDiff) => { - debug!("Skipping empty account diff for address: {address}"); - continue; - } - Err(e) => { - return Err(e); - } - }; - let encoded_len: u64 = encoded.len().try_into()?; - new_accounts_diff_size += encoded_len; - } - Ok(new_accounts_diff_size) -} - -impl AccountStateDiff { - pub fn encode(&self, address: &Address) -> Result, AccountDiffError> { - if self.bytecode.is_some() && self.bytecode_hash.is_some() { - return Err(AccountDiffError::BytecodeAndBytecodeHashSet); - } - - let mut r#type = 0; - let mut encoded: Vec = Vec::new(); - - if let Some(new_balance) = self.new_balance { - let r_type: u8 = AccountStateDiffType::NewBalance.into(); - r#type += r_type; - encoded.extend_from_slice(&new_balance.to_big_endian()); - } - - if self.nonce_diff != 0 { - let r_type: u8 = AccountStateDiffType::NonceDiff.into(); - r#type += r_type; - encoded.extend(self.nonce_diff.to_be_bytes()); - } - - if !self.storage.is_empty() { - let r_type: u8 = AccountStateDiffType::Storage.into(); - let storage_len: u16 = self.storage.len().try_into()?; - r#type += r_type; - encoded.extend(storage_len.to_be_bytes()); - for (key, value) in &self.storage { - encoded.extend_from_slice(&key.0); - encoded.extend_from_slice(&value.to_big_endian()); - } - } - - if let Some(bytecode) = &self.bytecode { - let r_type: u8 = AccountStateDiffType::Bytecode.into(); - let bytecode_len: u16 = bytecode.len().try_into()?; - r#type += r_type; - encoded.extend(bytecode_len.to_be_bytes()); - encoded.extend(bytecode); - } - - if let Some(bytecode_hash) = &self.bytecode_hash { - let r_type: u8 = AccountStateDiffType::BytecodeHash.into(); - r#type += r_type; - encoded.extend(&bytecode_hash.0); - } - - if r#type == 0 { - return Err(AccountDiffError::EmptyAccountDiff); - } - - let mut result = Vec::with_capacity(1 + address.0.len() + encoded.len()); - result.extend(r#type.to_be_bytes()); - result.extend(address.0); - result.extend(encoded); - - Ok(result) - } - - /// Returns a tuple of the number of bytes read, the address of the account - /// and the decoded `AccountStateDiff` - pub fn decode(bytes: &[u8]) -> Result<(usize, Address, Self), DecoderError> { - let mut decoder = Decoder::new(bytes); - - let update_type = decoder.get_u8()?; - - let address = decoder.get_address()?; - - let new_balance = if AccountStateDiffType::NewBalance.is_in(update_type) { - Some(decoder.get_u256()?) - } else { - None - }; - - let nonce_diff = if AccountStateDiffType::NonceDiff.is_in(update_type) { - Some(decoder.get_u16()?) - } else { - None - }; - - let mut storage_diff = BTreeMap::new(); - if AccountStateDiffType::Storage.is_in(update_type) { - let storage_slots_updated = decoder.get_u16()?; - - for _ in 0..storage_slots_updated { - let key = decoder.get_h256()?; - let new_value = decoder.get_u256()?; - - storage_diff.insert(key, new_value); - } - } - - let bytecode = if AccountStateDiffType::Bytecode.is_in(update_type) { - let bytecode_len = decoder.get_u16()?; - Some(decoder.get_bytes(bytecode_len.into())?) - } else { - None - }; - - let bytecode_hash = if AccountStateDiffType::BytecodeHash.is_in(update_type) { - Some(decoder.get_h256()?) - } else { - None - }; - - Ok(( - decoder.consumed(), - address, - AccountStateDiff { - new_balance, - nonce_diff: nonce_diff.unwrap_or(0), - storage: storage_diff, - bytecode, - bytecode_hash, - }, - )) - } -} - -pub struct Decoder { - bytes: Bytes, - offset: usize, -} - -impl Decoder { - pub fn new(bytes: &[u8]) -> Self { - Decoder { - bytes: Bytes::copy_from_slice(bytes), - offset: 0, - } - } - - pub fn consumed(&self) -> usize { - self.offset - } - - pub fn advance(&mut self, size: usize) { - self.offset += size; - } - - pub fn get_address(&mut self) -> Result { - let res = Address::from_slice(self.bytes.get(self.offset..self.offset + 20).ok_or( - DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), - )?); - self.offset += 20; - - Ok(res) - } - - pub fn get_u256(&mut self) -> Result { - let res = U256::from_big_endian(self.bytes.get(self.offset..self.offset + 32).ok_or( - DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), - )?); - self.offset += 32; - - Ok(res) - } - - pub fn get_h256(&mut self) -> Result { - let res = H256::from_slice(self.bytes.get(self.offset..self.offset + 32).ok_or( - DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), - )?); - self.offset += 32; - - Ok(res) - } - - pub fn get_u8(&mut self) -> Result { - let res = self - .bytes - .get(self.offset) - .ok_or(DecoderError::FailedToDeserializeStateDiff( - "Not enough bytes".to_string(), - ))?; - self.offset += 1; - - Ok(*res) - } - - pub fn get_u16(&mut self) -> Result { - let res = u16::from_be_bytes( - self.bytes - .get(self.offset..self.offset + 2) - .ok_or(DecoderError::FailedToDeserializeStateDiff( - "Not enough bytes".to_string(), - ))? - .try_into() - .map_err(|_| { - DecoderError::FailedToDeserializeStateDiff("Cannot parse u16".to_string()) - })?, - ); - self.offset += 2; - - Ok(res) - } - - pub fn get_u64(&mut self) -> Result { - let res = u64::from_be_bytes( - self.bytes - .get(self.offset..self.offset + 8) - .ok_or(DecoderError::FailedToDeserializeStateDiff( - "Not enough bytes".to_string(), - ))? - .try_into() - .map_err(|_| { - DecoderError::FailedToDeserializeStateDiff("Cannot parse u64".to_string()) - })?, - ); - self.offset += 8; - - Ok(res) - } - - pub fn get_bytes(&mut self, size: usize) -> Result { - let res = self.bytes.get(self.offset..self.offset + size).ok_or( - DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), - )?; - self.offset += size; - - Ok(Bytes::copy_from_slice(res)) - } -} diff --git a/crates/common/types/l2/fee_config.rs b/crates/common/types/l2/fee_config.rs index 148b89c5977..db508f98476 100644 --- a/crates/common/types/l2/fee_config.rs +++ b/crates/common/types/l2/fee_config.rs @@ -1,4 +1,5 @@ -use ethereum_types::Address; +use bytes::Bytes; +use ethereum_types::{Address, H256, U256}; use rkyv::{Archive, Deserialize as RDeserialize, Serialize as RSerialize}; use serde::{Deserialize, Serialize}; @@ -34,3 +35,257 @@ pub struct L1FeeConfig { pub l1_fee_vault: Address, pub l1_fee_per_blob_gas: u64, } + +#[derive(Debug, thiserror::Error)] +pub enum FeeConfigError { + #[error("Encoding error: {0}")] + EncodingError(String), + #[error("Unsupported version: {0}")] + UnsupportedVersion(u8), + #[error("Invalid fee config type: {0}")] + InvalidFeeConfigType(u8), + #[error("DecoderError error: {0}")] + DecoderError(#[from] DecoderError), +} + +#[derive(Debug, Clone, Copy)] +pub enum FeeConfigType { + BaseFeeVault = 1, + OperatorFee = 2, + L1Fee = 4, +} + +impl TryFrom for FeeConfigType { + type Error = FeeConfigError; + + fn try_from(value: u8) -> Result { + match value { + 1 => Ok(FeeConfigType::BaseFeeVault), + 2 => Ok(FeeConfigType::OperatorFee), + 4 => Ok(FeeConfigType::L1Fee), + _ => Err(FeeConfigError::InvalidFeeConfigType(value)), + } + } +} + +impl From for u8 { + fn from(value: FeeConfigType) -> Self { + match value { + FeeConfigType::BaseFeeVault => 1, + FeeConfigType::OperatorFee => 2, + FeeConfigType::L1Fee => 4, + } + } +} + +impl FeeConfigType { + // Checks if the type is present in the given value + pub fn is_in(&self, value: u8) -> bool { + value & u8::from(*self) == u8::from(*self) + } +} + +impl FeeConfig { + pub fn to_vec(&self) -> Vec { + let version = 0u8; + let mut encoded: Vec = Vec::new(); + + let mut fee_config_type = 0; + + if let Some(base_fee_vault) = self.base_fee_vault { + // base fee vault is set + let base_fee_vault_type: u8 = FeeConfigType::BaseFeeVault.into(); + fee_config_type += base_fee_vault_type; + encoded.extend_from_slice(&base_fee_vault.0); + } + + if let Some(operator_fee_config) = self.operator_fee_config { + // operator fee vault is set + let base_fee_vault_type: u8 = FeeConfigType::OperatorFee.into(); + fee_config_type += base_fee_vault_type; + encoded.extend_from_slice(&operator_fee_config.operator_fee_vault.0); + encoded.extend(operator_fee_config.operator_fee_per_gas.to_be_bytes()); + } + + if let Some(l1_fee_config) = self.l1_fee_config { + // l1 fee vault is set + let l1_fee_type: u8 = FeeConfigType::L1Fee.into(); + fee_config_type += l1_fee_type; + encoded.extend_from_slice(&l1_fee_config.l1_fee_vault.0); + encoded.extend(l1_fee_config.l1_fee_per_blob_gas.to_be_bytes()); + } + + let mut result = Vec::with_capacity(1 + 1 + encoded.len()); + result.extend(version.to_be_bytes()); + result.extend(fee_config_type.to_be_bytes()); + result.extend(encoded); + + result + } + + pub fn decode(bytes: &[u8]) -> Result<(usize, Self), FeeConfigError> { + let mut decoder = Decoder::new(bytes); + + // Read version + let version = decoder.get_u8()?; + if version != 0 { + return Err(FeeConfigError::UnsupportedVersion(version)); + } + + // Read fee config type + let fee_config_type = decoder.get_u8()?; + + // Read base fee vault if present + let base_fee_vault = if FeeConfigType::BaseFeeVault.is_in(fee_config_type) { + let address = decoder.get_address()?; + Some(address) + } else { + None + }; + + // Read operator fee config if present + let operator_fee_config = if FeeConfigType::OperatorFee.is_in(fee_config_type) { + let operator_fee_vault = decoder.get_address()?; + let operator_fee_per_gas = decoder.get_u64()?; + Some(OperatorFeeConfig { + operator_fee_vault, + operator_fee_per_gas, + }) + } else { + None + }; + + // Read L1 fee config if present + let l1_fee_config = if FeeConfigType::L1Fee.is_in(fee_config_type) { + let l1_fee_vault = decoder.get_address()?; + let l1_fee_per_blob_gas = decoder.get_u64()?; + Some(L1FeeConfig { + l1_fee_vault, + l1_fee_per_blob_gas, + }) + } else { + None + }; + + Ok(( + decoder.consumed(), + FeeConfig { + base_fee_vault, + operator_fee_config, + l1_fee_config, + }, + )) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum DecoderError { + #[error("Decoder failed to deserialize: {0}")] + FailedToDeserialize(String), + #[error("StateDiff failed to deserialize: {0}")] + FailedToDeserializeStateDiff(String), +} + +pub struct Decoder { + bytes: Bytes, + offset: usize, +} + +impl Decoder { + pub fn new(bytes: &[u8]) -> Self { + Decoder { + bytes: Bytes::copy_from_slice(bytes), + offset: 0, + } + } + + pub fn consumed(&self) -> usize { + self.offset + } + + pub fn advance(&mut self, size: usize) { + self.offset += size; + } + + pub fn get_address(&mut self) -> Result { + let res = Address::from_slice(self.bytes.get(self.offset..self.offset + 20).ok_or( + DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), + )?); + self.offset += 20; + + Ok(res) + } + + pub fn get_u256(&mut self) -> Result { + let res = U256::from_big_endian(self.bytes.get(self.offset..self.offset + 32).ok_or( + DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), + )?); + self.offset += 32; + + Ok(res) + } + + pub fn get_h256(&mut self) -> Result { + let res = H256::from_slice(self.bytes.get(self.offset..self.offset + 32).ok_or( + DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), + )?); + self.offset += 32; + + Ok(res) + } + + pub fn get_u8(&mut self) -> Result { + let res = self + .bytes + .get(self.offset) + .ok_or(DecoderError::FailedToDeserializeStateDiff( + "Not enough bytes".to_string(), + ))?; + self.offset += 1; + + Ok(*res) + } + + pub fn get_u16(&mut self) -> Result { + let res = u16::from_be_bytes( + self.bytes + .get(self.offset..self.offset + 2) + .ok_or(DecoderError::FailedToDeserializeStateDiff( + "Not enough bytes".to_string(), + ))? + .try_into() + .map_err(|_| { + DecoderError::FailedToDeserializeStateDiff("Cannot parse u16".to_string()) + })?, + ); + self.offset += 2; + + Ok(res) + } + + pub fn get_u64(&mut self) -> Result { + let res = u64::from_be_bytes( + self.bytes + .get(self.offset..self.offset + 8) + .ok_or(DecoderError::FailedToDeserializeStateDiff( + "Not enough bytes".to_string(), + ))? + .try_into() + .map_err(|_| { + DecoderError::FailedToDeserializeStateDiff("Cannot parse u64".to_string()) + })?, + ); + self.offset += 8; + + Ok(res) + } + + pub fn get_bytes(&mut self, size: usize) -> Result { + let res = self.bytes.get(self.offset..self.offset + size).ok_or( + DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()), + )?; + self.offset += size; + + Ok(Bytes::copy_from_slice(res)) + } +} diff --git a/crates/common/types/transaction.rs b/crates/common/types/transaction.rs index c083507cd3a..83e134e15b3 100644 --- a/crates/common/types/transaction.rs +++ b/crates/common/types/transaction.rs @@ -10,6 +10,9 @@ use secp256k1::{Message, ecdsa::RecoveryId}; use serde::{Serialize, ser::SerializeStruct}; pub use serde_impl::{AccessListEntry, GenericTransaction, GenericTransactionError}; +/// The serialized length of a default eip1559 transaction +pub const EIP1559_DEFAULT_SERIALIZED_LENGTH: usize = 15; + use ethrex_rlp::{ constants::RLP_NULL, decode::{RLPDecode, decode_rlp_item}, @@ -3148,4 +3151,10 @@ mod tests { let decoded_tx = Transaction::decode(&encoded).unwrap(); assert_eq!(tx, decoded_tx); } + + #[test] + fn test_eip1559_simple_transfer_size() { + let tx = Transaction::EIP1559Transaction(EIP1559Transaction::default()); + assert_eq!(tx.encode_to_vec().len(), EIP1559_DEFAULT_SERIALIZED_LENGTH); + } } diff --git a/crates/l2/Makefile b/crates/l2/Makefile index b74e77cf15b..ed1a0efa1e1 100644 --- a/crates/l2/Makefile +++ b/crates/l2/Makefile @@ -263,10 +263,9 @@ state-diff-test: l2 reconstruct \ -g ../../fixtures/genesis/l2.json \ -b ../../fixtures/blobs/ \ - -s $$PWD/store \ - -c 0x0007a881CD95B1484fca47615B64803dad620C8d + -s $$PWD/store DOCKER_ETHREX_WORKDIR=${DOCKER_ETHREX_WORKDIR} \ ETHREX_WATCHER_BLOCK_DELAY=0 \ docker compose -f docker-compose.yaml -f docker-compose-l2-store.overrides.yaml up --detach --no-deps ethrex_l2 - + docker logs --follow ethrex_l2 & \ cargo test state_reconstruct --release diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index e6b1f137ec5..0428d05e0fc 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -1,20 +1,11 @@ -use std::{cmp::min, collections::HashMap, sync::Arc, time::Duration}; +use std::{cmp::min, sync::Arc, time::Duration}; -use ethrex_blockchain::error::ChainError; -use ethrex_blockchain::{Blockchain, fork_choice::apply_fork_choice, vm::StoreVmDatabase}; +use ethrex_blockchain::{Blockchain, fork_choice::apply_fork_choice}; +use ethrex_common::types::BlobsBundle; use ethrex_common::utils::keccak; -use ethrex_common::{ - Address, H160, H256, U256, - types::{ - AccountUpdate, Block, BlockNumber, PrivilegedL2Transaction, Transaction, batch::Batch, - }, -}; -use ethrex_l2_common::{ - l1_messages::{L1Message, get_block_l1_messages, get_l1_message_hash}, - privileged_transactions::compute_privileged_transactions_hash, - state_diff::prepare_state_diff, -}; -use ethrex_l2_sdk::{get_l1_active_fork, get_last_committed_batch, get_last_fetched_l1_block}; +use ethrex_common::{Address, H256, U256, types::Block}; + +use ethrex_l2_sdk::{get_last_committed_batch, get_last_fetched_l1_block}; use ethrex_rlp::decode::RLPDecode; use ethrex_rpc::{EthClient, types::receipt::RpcLog}; use ethrex_storage::Store; @@ -26,10 +17,11 @@ use spawned_concurrency::{ }; use tracing::{debug, error, info}; +use crate::utils::state_reconstruct::get_batch; use crate::{ SequencerConfig, based::sequencer_state::{SequencerState, SequencerStatus}, - sequencer::{l1_committer::generate_blobs_bundle, utils::node_is_up_to_date}, + sequencer::utils::node_is_up_to_date, }; #[derive(Debug, thiserror::Error)] @@ -94,7 +86,6 @@ pub struct BlockFetcher { fetch_interval_ms: u64, last_l1_block_fetched: U256, fetch_block_step: U256, - osaka_activation_time: Option, } impl BlockFetcher { @@ -120,7 +111,6 @@ impl BlockFetcher { fetch_interval_ms: cfg.based.block_fetcher.fetch_interval_ms, last_l1_block_fetched, fetch_block_step: cfg.based.block_fetcher.fetch_block_step.into(), - osaka_activation_time: cfg.eth.osaka_activation_time, }) } @@ -302,7 +292,14 @@ impl BlockFetcher { batch_number: U256, commit_tx: H256, ) -> Result<(), BlockFetcherError> { - let batch = self.get_batch(batch, batch_number, commit_tx).await?; + let batch = get_batch( + &self.store, + batch, + batch_number, + Some(commit_tx), + BlobsBundle::default(), + ) + .await?; self.rollup_store.seal_batch(batch).await?; @@ -311,163 +308,6 @@ impl BlockFetcher { Ok(()) } - async fn get_batch( - &mut self, - batch: &[Block], - batch_number: U256, - commit_tx: H256, - ) -> Result { - let privileged_transactions: Vec = batch - .iter() - .flat_map(|block| { - block.body.transactions.iter().filter_map(|tx| { - if let Transaction::PrivilegedL2Transaction(tx) = tx { - Some(tx.clone()) - } else { - None - } - }) - }) - .collect(); - let privileged_transaction_hashes = privileged_transactions - .iter() - .filter_map(|tx| tx.get_privileged_hash()) - .collect(); - let mut messages = Vec::new(); - for block in batch { - let block_messages = self.extract_block_messages(block.header.number).await?; - messages.extend(block_messages); - } - let privileged_transactions_hash = - compute_privileged_transactions_hash(privileged_transaction_hashes)?; - - let first_block = batch.first().ok_or(BlockFetcherError::RetrievalError( - "Batch is empty. This shouldn't happen.".to_owned(), - ))?; - - let last_block = batch.last().ok_or(BlockFetcherError::RetrievalError( - "Batch is empty. This shouldn't happen.".to_owned(), - ))?; - - let new_state_root = self - .store - .state_trie(last_block.hash())? - .ok_or(BlockFetcherError::InconsistentStorage( - "This block should be in the store".to_owned(), - ))? - .hash_no_commit(); - - // This is copied from the L1Committer, this should be reviewed. - let mut acc_account_updates: HashMap = HashMap::new(); - for block in batch { - let parent_header = self - .store - .get_block_header_by_hash(block.header.parent_hash)? - .ok_or(BlockFetcherError::ChainError(ChainError::ParentNotFound))?; - let vm_db = StoreVmDatabase::new(self.store.clone(), parent_header); - let mut vm = self.blockchain.new_evm(vm_db)?; - vm.execute_block(block) - .map_err(BlockFetcherError::EvmError)?; - let account_updates = vm - .get_state_transitions() - .map_err(BlockFetcherError::EvmError)?; - - for account in account_updates { - let address = account.address; - if let Some(existing) = acc_account_updates.get_mut(&address) { - existing.merge(account); - } else { - acc_account_updates.insert(address, account); - } - } - } - - let parent_block_hash = first_block.header.parent_hash; - let parent_header = self - .store - .get_block_header_by_hash(parent_block_hash)? - .ok_or(BlockFetcherError::ChainError(ChainError::ParentNotFound))?; - - let parent_db = StoreVmDatabase::new(self.store.clone(), parent_header); - - let state_diff = prepare_state_diff( - last_block.header.clone(), - &parent_db, - &messages, - &privileged_transactions, - acc_account_updates.into_values().collect(), - ) - .map_err(|_| BlockFetcherError::BlobBundleError)?; - - let l1_fork = get_l1_active_fork(&self.eth_client, self.osaka_activation_time) - .await - .map_err(BlockFetcherError::EthClientError)?; - let (blobs_bundle, _) = generate_blobs_bundle(&state_diff, l1_fork) - .map_err(|_| BlockFetcherError::BlobBundleError)?; - - Ok(Batch { - number: batch_number.as_u64(), - first_block: first_block.header.number, - last_block: last_block.header.number, - state_root: new_state_root, - privileged_transactions_hash, - message_hashes: self.get_batch_message_hashes(batch).await?, - blobs_bundle, - commit_tx: Some(commit_tx), - verify_tx: None, - }) - } - - async fn get_batch_message_hashes( - &mut self, - batch: &[Block], - ) -> Result, BlockFetcherError> { - let mut message_hashes = Vec::new(); - - for block in batch { - let block_messages = self.extract_block_messages(block.header.number).await?; - - for msg in &block_messages { - message_hashes.push(get_l1_message_hash(msg)); - } - } - - Ok(message_hashes) - } - - async fn extract_block_messages( - &mut self, - block_number: BlockNumber, - ) -> Result, BlockFetcherError> { - let Some(block_body) = self.store.get_block_body(block_number).await? else { - return Err(BlockFetcherError::InconsistentStorage(format!( - "Block {block_number} is supposed to be in store at this point" - ))); - }; - - let mut txs = vec![]; - let mut receipts = vec![]; - for (index, tx) in block_body.transactions.iter().enumerate() { - let receipt = self - .store - .get_receipt( - block_number, - index.try_into().map_err(|_| { - BlockFetcherError::ConversionError( - "Failed to convert index to u64".to_owned(), - ) - })?, - ) - .await? - .ok_or(BlockFetcherError::RetrievalError( - "Transactions in a block should have a receipt".to_owned(), - ))?; - txs.push(tx.clone()); - receipts.push(receipt); - } - Ok(get_block_l1_messages(&receipts)) - } - /// Process the logs from the event `BatchVerified`. /// Gets the batch number from the logs and stores the verify transaction hash in the rollup store async fn process_verified_logs( @@ -558,7 +398,7 @@ fn decode_batch_from_calldata(calldata: &[u8]) -> Result, BlockFetche // function commitBatch( // uint256 batchNumber, // bytes32 newStateRoot, - // bytes32 stateDiffKZGVersionedHash, + // bytes32 BlobKZGVersionedHash, // bytes32 messagesLogsMerkleRoot, // bytes32 processedPrivilegedTransactionsRollingHash, // bytes[] calldata _rlpEncodedBlocks @@ -567,7 +407,7 @@ fn decode_batch_from_calldata(calldata: &[u8]) -> Result, BlockFetche // data = 4 bytes (function selector) 0..4 // || 8 bytes (batch number) 4..36 // || 32 bytes (new state root) 36..68 - // || 32 bytes (state diff KZG versioned hash) 68..100 + // || 32 bytes (blob KZG versioned hash) 68..100 // || 32 bytes (messages logs merkle root) 100..132 // || 32 bytes (processed privileged transactions rolling hash) 132..164 diff --git a/crates/l2/common/src/lib.rs b/crates/l2/common/src/lib.rs index a0cfe7c67fe..deb4b774f5a 100644 --- a/crates/l2/common/src/lib.rs +++ b/crates/l2/common/src/lib.rs @@ -3,5 +3,4 @@ pub mod l1_messages; pub mod merkle_tree; pub mod privileged_transactions; pub mod prover; -pub mod state_diff; pub mod utils; diff --git a/crates/l2/common/src/state_diff.rs b/crates/l2/common/src/state_diff.rs deleted file mode 100644 index 3a921a80f23..00000000000 --- a/crates/l2/common/src/state_diff.rs +++ /dev/null @@ -1,385 +0,0 @@ -use std::collections::{BTreeMap, HashMap}; - -use bytes::Bytes; -use ethereum_types::Address; -use ethrex_common::types::{ - AccountInfo, AccountState, AccountUpdate, BlockHeader, Code, PrivilegedL2Transaction, TxKind, - account_diff::{AccountDiffError, AccountStateDiff, Decoder, DecoderError}, - code_hash, -}; -use ethrex_rlp::decode::RLPDecode; -use ethrex_storage::{error::StoreError, hash_address}; -use ethrex_trie::{Trie, TrieError}; -use ethrex_vm::{EvmError, VmDatabase}; -use serde::{Deserialize, Serialize}; - -use crate::{l1_messages::L1Message, privileged_transactions::PrivilegedTransactionLog}; - -/// The serialized length of a default l1message log -pub const L1MESSAGE_LOG_LEN: u64 = 84; - -/// The serialized length of a default privileged transaction log -pub const PRIVILEGED_TX_LOG_LEN: u64 = 52; - -/// The serialized lenght of a default block header -pub const BLOCK_HEADER_LEN: u64 = 136; - -// State diff size for a simple transfer. -// Two `AccountUpdates` with new_balance, one of which also has nonce_diff. -pub const SIMPLE_TX_STATE_DIFF_SIZE: u64 = 108; - -#[derive(Debug, thiserror::Error)] -pub enum StateDiffError { - #[error("StateDiff failed to deserialize: {0}")] - FailedToDeserializeStateDiff(String), - #[error("StateDiff failed to serialize: {0}")] - FailedToSerializeStateDiff(String), - #[error("StateDiff unsupported version: {0}")] - UnsupportedVersion(u8), - #[error("The length of the vector is too big to fit in u16: {0}")] - LengthTooBig(#[from] core::num::TryFromIntError), - #[error("DB Error: {0}")] - DbError(#[from] TrieError), - #[error("Store Error: {0}")] - StoreError(#[from] StoreError), - #[error("New nonce is lower than the previous one")] - FailedToCalculateNonce, - #[error("Unexpected Error: {0}")] - InternalError(String), - #[error("Evm Error: {0}")] - EVMError(#[from] EvmError), - #[error("Decoder Error: {0}")] - DecoderError(#[from] DecoderError), - #[error("AccountDiff Error: {0}")] - AccountDiffError(#[from] AccountDiffError), -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StateDiff { - pub version: u8, - pub last_header: BlockHeader, - pub modified_accounts: BTreeMap, - pub l1_messages: Vec, - pub privileged_transactions: Vec, -} - -impl Default for StateDiff { - fn default() -> Self { - StateDiff { - version: 1, - last_header: BlockHeader::default(), - modified_accounts: BTreeMap::new(), - l1_messages: Vec::new(), - privileged_transactions: Vec::new(), - } - } -} - -pub fn encode_block_header(block_header: &BlockHeader) -> Vec { - let mut encoded = Vec::new(); - encoded.extend(block_header.transactions_root.0); - encoded.extend(block_header.receipts_root.0); - encoded.extend(block_header.parent_hash.0); - encoded.extend(block_header.gas_limit.to_be_bytes()); - encoded.extend(block_header.gas_used.to_be_bytes()); - encoded.extend(block_header.timestamp.to_be_bytes()); - encoded.extend(block_header.number.to_be_bytes()); - encoded.extend(block_header.base_fee_per_gas.unwrap_or(0).to_be_bytes()); - - encoded -} - -impl StateDiff { - pub fn encode(&self) -> Result { - if self.version != 1 { - return Err(StateDiffError::UnsupportedVersion(self.version)); - } - - let mut encoded: Vec = Vec::new(); - encoded.push(self.version); - - let header_encoded = encode_block_header(&self.last_header); - encoded.extend(header_encoded); - - let modified_accounts_len: u16 = self - .modified_accounts - .len() - .try_into() - .map_err(StateDiffError::from)?; - encoded.extend(modified_accounts_len.to_be_bytes()); - - for (address, diff) in &self.modified_accounts { - let account_encoded = diff.encode(address)?; - encoded.extend(account_encoded); - } - - let message_len: u16 = self.l1_messages.len().try_into()?; - encoded.extend(message_len.to_be_bytes()); - for message in self.l1_messages.iter() { - let message_encoded = message.encode(); - encoded.extend(message_encoded); - } - - let privileged_tx_len: u16 = self.privileged_transactions.len().try_into()?; - encoded.extend(privileged_tx_len.to_be_bytes()); - for privileged_tx in self.privileged_transactions.iter() { - let privileged_tx_encoded = privileged_tx.encode(); - encoded.extend(privileged_tx_encoded); - } - - Ok(Bytes::from(encoded)) - } - - pub fn decode(bytes: &[u8]) -> Result { - let mut decoder = Decoder::new(bytes); - - let version = decoder.get_u8()?; - if version != 0x01 { - return Err(StateDiffError::UnsupportedVersion(version)); - } - - // Last header fields - let last_header = BlockHeader { - transactions_root: decoder.get_h256()?, - receipts_root: decoder.get_h256()?, - parent_hash: decoder.get_h256()?, - gas_limit: decoder.get_u64()?, - gas_used: decoder.get_u64()?, - timestamp: decoder.get_u64()?, - number: decoder.get_u64()?, - base_fee_per_gas: Some(decoder.get_u64()?), - ..Default::default() - }; - - // Accounts diff - let modified_accounts_len = decoder.get_u16()?; - - let mut modified_accounts = BTreeMap::new(); - for _ in 0..modified_accounts_len { - let next_bytes = bytes.get(decoder.consumed()..).ok_or( - StateDiffError::FailedToSerializeStateDiff("Not enough bytes".to_string()), - )?; - let (bytes_read, address, account_diff) = AccountStateDiff::decode(next_bytes)?; - decoder.advance(bytes_read); - modified_accounts.insert(address, account_diff); - } - - let l1messages_len = decoder.get_u16()?; - - let mut l1messages = Vec::with_capacity(l1messages_len.into()); - for _ in 0..l1messages_len { - let from = decoder.get_address()?; - let data = decoder.get_h256()?; - let index = decoder.get_u256()?; - - l1messages.push(L1Message { - from, - data_hash: data, - message_id: index, - }); - } - - let privileged_transactions_len = decoder.get_u16()?; - - let mut privileged_transactions = Vec::with_capacity(privileged_transactions_len.into()); - for _ in 0..privileged_transactions_len { - let address = decoder.get_address()?; - let amount = decoder.get_u256()?; - - privileged_transactions.push(PrivilegedTransactionLog { - address, - amount, - nonce: Default::default(), - }); - } - - Ok(Self { - version, - last_header, - modified_accounts, - l1_messages: l1messages, - privileged_transactions, - }) - } - - pub fn to_account_updates( - &self, - prev_state: &Trie, - ) -> Result, StateDiffError> { - let mut account_updates = HashMap::new(); - - for (address, diff) in &self.modified_accounts { - let account_state = match prev_state - .get(&hash_address(address)) - .map_err(StateDiffError::DbError)? - { - Some(rlp) => AccountState::decode(&rlp) - .map_err(|e| StateDiffError::FailedToDeserializeStateDiff(e.to_string()))?, - None => AccountState::default(), - }; - - let balance = diff.new_balance.unwrap_or(account_state.balance); - let nonce = account_state.nonce + u64::from(diff.nonce_diff); - let bytecode_hash = diff.bytecode_hash.unwrap_or_else(|| match &diff.bytecode { - Some(bytecode) => code_hash(bytecode), - None => code_hash(&Bytes::new()), - }); - - let account_info = if diff.new_balance.is_some() - || diff.nonce_diff != 0 - || diff.bytecode_hash.is_some() - { - Some(AccountInfo { - balance, - nonce, - code_hash: bytecode_hash, - }) - } else { - None - }; - - account_updates.insert( - *address, - AccountUpdate { - address: *address, - removed: false, - info: account_info, - code: diff.bytecode.clone().map(Code::from_bytecode), - added_storage: diff.storage.clone().into_iter().collect(), - removed_storage: false, - }, - ); - } - - Ok(account_updates) - } -} - -/// Calculates nonce_diff between current and previous block. -pub fn get_nonce_diff( - account_update: &AccountUpdate, - db: &impl VmDatabase, -) -> Result { - // Get previous account_state either from store or cache - let account_state = db.get_account_state(account_update.address)?; - - // Get previous nonce - let prev_nonce = match account_state { - Some(state) => state.nonce, - None => 0, - }; - - // Get current nonce - let new_nonce = if let Some(info) = account_update.info.clone() { - info.nonce - } else { - prev_nonce - }; - - // Calculate nonce diff - let nonce_diff = new_nonce - .checked_sub(prev_nonce) - .ok_or(StateDiffError::FailedToCalculateNonce)? - .try_into() - .map_err(StateDiffError::from)?; - - Ok(nonce_diff) -} - -/// Prepare the state diff for the block. -pub fn prepare_state_diff( - last_header: BlockHeader, - db: &impl VmDatabase, - l1messages: &[L1Message], - privileged_transactions: &[PrivilegedL2Transaction], - account_updates: Vec, -) -> Result { - let mut modified_accounts = BTreeMap::new(); - for account_update in account_updates { - let nonce_diff = get_nonce_diff(&account_update, db)?; - - modified_accounts.insert( - account_update.address, - AccountStateDiff { - new_balance: account_update.info.clone().map(|info| info.balance), - nonce_diff, - storage: account_update.added_storage.clone().into_iter().collect(), - bytecode: account_update.code.map(|b| b.bytecode).clone(), - bytecode_hash: None, - }, - ); - } - - let state_diff = StateDiff { - modified_accounts, - version: StateDiff::default().version, - last_header, - l1_messages: l1messages.to_vec(), - privileged_transactions: privileged_transactions - .iter() - .map(|tx| PrivilegedTransactionLog { - address: match tx.to { - TxKind::Call(address) => address, - TxKind::Create => Address::zero(), - }, - amount: tx.value, - nonce: tx.nonce, - }) - .collect(), - }; - - Ok(state_diff) -} - -#[cfg(test)] -#[allow(clippy::as_conversions)] -mod tests { - use ethrex_common::U256; - - use super::*; - #[test] - fn test_l1_message_size() { - let l1_message_size = L1Message::default().encode().len() as u64; - assert_eq!(L1MESSAGE_LOG_LEN, l1_message_size); - } - - #[test] - fn test_privileged_tx_log_size() { - let privileged_tx_size = PrivilegedTransactionLog::default().encode().len() as u64; - assert_eq!(PRIVILEGED_TX_LOG_LEN, privileged_tx_size); - } - - #[test] - fn test_block_header_size() { - let block_header_size = encode_block_header(&BlockHeader::default()).len() as u64; - assert_eq!(BLOCK_HEADER_LEN, block_header_size); - } - - #[test] - fn test_accounts_diff_size() { - let empty_storage = BTreeMap::new(); - - let account_diff_1 = AccountStateDiff { - new_balance: Some(U256::from(1000)), - nonce_diff: 1, - storage: empty_storage.clone(), - bytecode: None, - bytecode_hash: None, - }; - - let account_diff_2 = AccountStateDiff { - new_balance: Some(U256::from(1000)), - nonce_diff: 0, - storage: empty_storage, - bytecode: None, - bytecode_hash: None, - }; - - let account_diff_1_size = account_diff_1.encode(&Address::zero()).unwrap().len() as u64; - let account_diff_2_size = account_diff_2.encode(&Address::zero()).unwrap().len() as u64; - assert_eq!( - SIMPLE_TX_STATE_DIFF_SIZE, - account_diff_1_size + account_diff_2_size - ); - } -} diff --git a/crates/l2/contracts/src/l1/OnChainProposer.sol b/crates/l2/contracts/src/l1/OnChainProposer.sol index 5a0c791ce5c..0db25e5832f 100644 --- a/crates/l2/contracts/src/l1/OnChainProposer.sol +++ b/crates/l2/contracts/src/l1/OnChainProposer.sol @@ -31,7 +31,7 @@ contract OnChainProposer is /// all the withdrawals that were processed in the batch being committed struct BatchCommitmentInfo { bytes32 newStateRoot; - bytes32 stateDiffKZGVersionedHash; + bytes32 blobKZGVersionedHash; bytes32 processedPrivilegedTransactionsRollingHash; bytes32 withdrawalsLogsMerkleRoot; bytes32 lastBlockHash; @@ -509,7 +509,7 @@ contract OnChainProposer is } bytes32 blobVersionedHash = bytes32(publicData[128:160]); if ( - batchCommitments[batchNumber].stateDiffKZGVersionedHash != + batchCommitments[batchNumber].blobKZGVersionedHash != blobVersionedHash ) { return diff --git a/crates/l2/prover/src/guest_program/src/execution.rs b/crates/l2/prover/src/guest_program/src/execution.rs index 9622beff371..9873b3112c1 100644 --- a/crates/l2/prover/src/guest_program/src/execution.rs +++ b/crates/l2/prover/src/guest_program/src/execution.rs @@ -12,14 +12,12 @@ use ethrex_common::types::{ block_execution_witness::GuestProgramState, block_execution_witness::GuestProgramStateError, }; use ethrex_common::{Address, U256}; -use ethrex_common::{ - H256, - types::{Block, BlockHeader}, -}; +use ethrex_common::{H256, types::Block}; #[cfg(feature = "l2")] use ethrex_l2_common::l1_messages::L1Message; +use ethrex_rlp::encode::RLPEncode; use ethrex_vm::{Evm, EvmError, GuestProgramStateWrapper, VmDatabase}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; #[cfg(feature = "l2")] use ethrex_common::types::{ @@ -32,7 +30,6 @@ use ethrex_l2_common::{ PrivilegedTransactionError, compute_privileged_transactions_hash, get_block_privileged_transactions, }, - state_diff::{StateDiff, StateDiffError, prepare_state_diff}, }; #[derive(Debug, thiserror::Error)] @@ -51,9 +48,6 @@ pub enum StatelessExecutionError { #[error("Privileged Transaction calculation error: {0}")] PrivilegedTransactionError(#[from] PrivilegedTransactionError), #[cfg(feature = "l2")] - #[error("State diff error: {0}")] - StateDiffError(#[from] StateDiffError), - #[cfg(feature = "l2")] #[error("Blobs bundle error: {0}")] BlobsBundleError(#[from] BlobsBundleError), #[cfg(feature = "l2")] @@ -63,9 +57,6 @@ pub enum StatelessExecutionError { #[error("Invalid KZG blob proof")] InvalidBlobProof, #[cfg(feature = "l2")] - #[error("Invalid state diff")] - InvalidStateDiff, - #[cfg(feature = "l2")] #[error("FeeConfig not provided for L2 execution")] FeeConfigNotFound, #[error("Batch has no blocks")] @@ -94,6 +85,8 @@ pub enum StatelessExecutionError { InvalidPrivilegedTransaction, #[error("Internal error: {0}")] Internal(String), + #[error("Failed to convert integer")] + TryIntoError(#[from] std::num::TryFromIntError), } pub fn execution_program(input: ProgramInput) -> Result { @@ -165,24 +158,17 @@ pub fn stateless_validation_l2( blob_proof: Proof, chain_id: u64, ) -> Result { - let initial_db = execution_witness.clone(); - let StatelessResult { receipts, initial_state_hash, final_state_hash, - account_updates, - last_block_header, last_block_hash, non_privileged_count, - nodes_hashed, - codes_hashed, - parent_block_header, } = execute_stateless( blocks, execution_witness, elasticity_multiplier, - fee_configs, + fee_configs.clone(), )?; let (l1messages, privileged_transactions) = @@ -197,42 +183,10 @@ pub fn stateless_validation_l2( // TODO: this could be replaced with something like a ProverConfig in the future. let validium = (blob_commitment, &blob_proof) == ([0; 48], &[0; 48]); - // Check state diffs are valid + // Check blobs are valid let blob_versioned_hash = if !validium { - use bytes::Bytes; - use ethrex_common::types::Code; - - let mut guest_program_state = GuestProgramState { - codes_hashed: codes_hashed - .into_iter() - .map(|(h, c)| (h, Code::from_bytecode(Bytes::from_owner(c)))) - .collect(), - parent_block_header, - first_block_number: initial_db.first_block_number, - chain_config: initial_db.chain_config, - nodes_hashed, - state_trie: None, - // The following fields are not needed for blob validation. - storage_tries: BTreeMap::new(), - block_headers: BTreeMap::new(), - account_hashes_by_address: BTreeMap::new(), - }; - - guest_program_state - .rebuild_state_trie() - .map_err(|_| StatelessExecutionError::InvalidInitialStateTrie)?; - - let wrapped_db = GuestProgramStateWrapper::new(guest_program_state); - - let state_diff = prepare_state_diff( - last_block_header, - &wrapped_db, - &l1messages, - &privileged_transactions, - account_updates.values().cloned().collect(), - )?; - - verify_blob(state_diff, blob_commitment, blob_proof)? + let fee_configs = fee_configs.ok_or_else(|| StatelessExecutionError::FeeConfigNotFound)?; + verify_blob(blocks, &fee_configs, blob_commitment, blob_proof)? } else { H256::zero() }; @@ -253,20 +207,8 @@ struct StatelessResult { receipts: Vec>, initial_state_hash: H256, final_state_hash: H256, - account_updates: HashMap, - last_block_header: BlockHeader, last_block_hash: H256, non_privileged_count: U256, - - // These fields are only used in L2 to validate state diff blobs. - // We return them to avoid recomputing when comparing the initial state - // with the final state after block execution. - #[cfg(feature = "l2")] - pub nodes_hashed: BTreeMap>, - #[cfg(feature = "l2")] - pub codes_hashed: BTreeMap>, - #[cfg(feature = "l2")] - pub parent_block_header: BlockHeader, } fn execute_stateless( @@ -279,19 +221,6 @@ fn execute_stateless( .try_into() .map_err(StatelessExecutionError::GuestProgramState)?; - // Cache these L2-specific state fields for later state diff blob validation - // to avoid expensive recomputation after the guest_program_state is moved - // to the wrapper - #[cfg(feature = "l2")] - let nodes_hashed = guest_program_state.nodes_hashed.clone(); - #[cfg(feature = "l2")] - let codes_hashed = guest_program_state - .codes_hashed - .iter() - .map(|(h, c)| (*h, c.bytecode.to_vec())) - .collect(); - #[cfg(feature = "l2")] - let parent_block_header_clone = guest_program_state.parent_block_header.clone(); #[cfg(feature = "l2")] let fee_configs = fee_configs.ok_or_else(|| StatelessExecutionError::FeeConfigNotFound)?; @@ -411,16 +340,8 @@ fn execute_stateless( receipts: acc_receipts, initial_state_hash, final_state_hash, - account_updates: acc_account_updates, - last_block_header: last_block.header.clone(), last_block_hash, non_privileged_count: non_privileged_count.into(), - #[cfg(feature = "l2")] - nodes_hashed, - #[cfg(feature = "l2")] - codes_hashed, - #[cfg(feature = "l2")] - parent_block_header: parent_block_header_clone, }) } @@ -465,14 +386,28 @@ fn compute_l1messages_and_privileged_transactions_digests( #[cfg(feature = "l2")] fn verify_blob( - state_diff: StateDiff, + blocks: &[Block], + fee_configs: &[FeeConfig], commitment: Commitment, proof: Proof, ) -> Result { + use bytes::Bytes; use ethrex_crypto::kzg::verify_blob_kzg_proof; - let encoded_state_diff = state_diff.encode()?; - let blob_data = blob_from_bytes(encoded_state_diff)?; + let len: u64 = blocks.len().try_into()?; + let mut blob_data = Vec::new(); + + blob_data.extend(len.to_be_bytes()); + + for block in blocks { + blob_data.extend(block.encode_to_vec()); + } + + for fee_config in fee_configs { + blob_data.extend(fee_config.to_vec()); + } + + let blob_data = blob_from_bytes(Bytes::from(blob_data))?; if !verify_blob_kzg_proof(blob_data, commitment, proof)? { return Err(StatelessExecutionError::InvalidBlobProof); diff --git a/crates/l2/sequencer/block_producer/payload_builder.rs b/crates/l2/sequencer/block_producer/payload_builder.rs index 32eb17d8591..e77148e13d5 100644 --- a/crates/l2/sequencer/block_producer/payload_builder.rs +++ b/crates/l2/sequencer/block_producer/payload_builder.rs @@ -2,33 +2,21 @@ use crate::sequencer::errors::BlockProducerError; use ethrex_blockchain::{ Blockchain, constants::TX_GAS_COST, - payload::{ - HeadTransaction, PayloadBuildContext, PayloadBuildResult, TransactionQueue, - apply_plain_transaction, - }, + payload::{PayloadBuildContext, PayloadBuildResult, TransactionQueue, apply_plain_transaction}, }; -use ethrex_common::{ - Address, U256, - types::{ - Block, Receipt, SAFE_BYTES_PER_BLOB, Transaction, TxType, - account_diff::{AccountStateDiff, get_accounts_diff_size}, - }, +use ethrex_common::types::{ + Block, EIP1559_DEFAULT_SERIALIZED_LENGTH, SAFE_BYTES_PER_BLOB, Transaction, }; -use ethrex_l2_common::state_diff::{ - BLOCK_HEADER_LEN, L1MESSAGE_LOG_LEN, PRIVILEGED_TX_LOG_LEN, SIMPLE_TX_STATE_DIFF_SIZE, -}; -use ethrex_l2_common::{ - l1_messages::get_block_l1_messages, privileged_transactions::PRIVILEGED_TX_BUDGET, -}; -use ethrex_levm::utils::get_account_diffs_in_tx; +use ethrex_l2_common::privileged_transactions::PRIVILEGED_TX_BUDGET; +use ethrex_levm::vm::VMType; use ethrex_metrics::metrics; #[cfg(feature = "metrics")] use ethrex_metrics::{ metrics_blocks::METRICS_BLOCKS, metrics_transactions::{METRICS_TX, MetricsTxType}, }; +use ethrex_rlp::encode::RLPEncode; use ethrex_storage::Store; -use std::collections::HashMap; use std::ops::Div; use std::sync::Arc; use tokio::time::Instant; @@ -96,8 +84,8 @@ pub async fn build_payload( Ok(context.into()) } -/// Same as `blockchain::fill_transactions` but enforces that the `StateDiff` size -/// stays within the blob size limit after processing each transaction. +/// Same as `blockchain::fill_transactions` but enforces that the block encoded size +/// does not exceed `SAFE_BYTES_PER_BLOB`. /// Also, uses a configured `block_gas_limit` to limit the gas used in the block, /// which can be lower than the block gas limit specified in the payload header. pub async fn fill_transactions( @@ -107,13 +95,12 @@ pub async fn fill_transactions( last_privileged_nonce: &mut Option, configured_block_gas_limit: u64, ) -> Result<(), BlockProducerError> { - // version (u8) + header fields (struct) + messages_len (u16) + privileged_tx_len (u16) + accounts_diffs_len (u16) - let mut acc_size_without_accounts = 1 + BLOCK_HEADER_LEN + 2 + 2 + 2; - let mut size_accounts_diffs = 0; - let mut account_diffs = HashMap::new(); - let safe_bytes_per_blob: u64 = SAFE_BYTES_PER_BLOB.try_into()?; let mut privileged_tx_count = 0; - + let VMType::L2(fee_config) = context.vm.vm_type else { + return Err(BlockProducerError::Custom("invalid VM type".to_string())); + }; + let mut acc_encoded_size = context.payload.encode_to_vec().len(); + let fee_config_len = fee_config.to_vec().len(); let chain_config = store.get_chain_config(); debug!("Fetching transactions from mempool"); @@ -135,11 +122,11 @@ pub async fn fill_transactions( break; } - // Check if we have enough space for the StateDiff to run more transactions - if acc_size_without_accounts + size_accounts_diffs + SIMPLE_TX_STATE_DIFF_SIZE - > safe_bytes_per_blob + // Check if we have enough blob space to run more transactions + if acc_encoded_size + fee_config_len + EIP1559_DEFAULT_SERIALIZED_LENGTH + > SAFE_BYTES_PER_BLOB { - debug!("No more StateDiff space to run transactions"); + debug!("No more blob space to run transactions"); break; }; @@ -164,6 +151,31 @@ pub async fn fill_transactions( continue; } + // Check if we have enough blob space to add this transaction + let tx: Transaction = head_tx.clone().into(); + let tx_size = tx.encode_to_vec().len(); + if acc_encoded_size + fee_config_len + tx_size > SAFE_BYTES_PER_BLOB { + debug!("No more blob space to run transactions"); + break; + }; + + // Check we don't have an excessive number of privileged transactions + if head_tx.is_privileged() { + if privileged_tx_count >= PRIVILEGED_TX_BUDGET { + debug!("Ran out of space for privileged transactions"); + // We break here because if we have expired privileged transactions + // in the contract, our batch will be rejected if non-privileged txs + // are included. + break; + } + let id = head_tx.nonce(); + if last_privileged_nonce.is_some_and(|last_nonce| id != last_nonce + 1) { + debug!("Ignoring out-of-order privileged transaction"); + txs.pop(); + continue; + } + } + // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); @@ -190,10 +202,6 @@ pub async fn fill_transactions( continue; } - // Copy remaining gas and block value before executing the transaction - let previous_remaining_gas = context.remaining_gas; - let previous_block_value = context.block_value; - // Execute tx let receipt = match apply_plain_transaction(&head_tx, context) { Ok(receipt) => receipt, @@ -206,66 +214,25 @@ pub async fn fill_transactions( } }; - let tx_backup = context.vm.db.get_tx_backup().map_err(|e| { - BlockProducerError::FailedToGetDataFrom(format!("transaction backup: {e}")) - })?; - let account_diffs_in_tx = - get_account_diffs_in_tx(&context.vm.db, tx_backup).map_err(|e| { - BlockProducerError::Custom(format!("Failed to get account diffs from tx: {e}")) - })?; - let merged_diffs = merge_diffs(&account_diffs, account_diffs_in_tx); - - let (tx_size_without_accounts, new_accounts_diff_size) = - calculate_tx_diff_size(&merged_diffs, &head_tx, &receipt)?; - - if acc_size_without_accounts + tx_size_without_accounts + new_accounts_diff_size - > safe_bytes_per_blob - { - debug!( - "No more StateDiff space to run this transactions. Skipping transaction: {:?}", - tx_hash - ); - txs.pop(); - - // This transaction state change is too big, we need to undo it. - undo_last_tx(context, previous_remaining_gas, previous_block_value)?; - continue; - } - - // Check we don't have an excessive number of privileged transactions - if head_tx.tx_type() == TxType::Privileged { - if privileged_tx_count >= PRIVILEGED_TX_BUDGET { - debug!("Ran out of space for privileged transactions"); - txs.pop(); - undo_last_tx(context, previous_remaining_gas, previous_block_value)?; - continue; - } - let id = head_tx.nonce(); - if last_privileged_nonce.is_some_and(|last_nonce| id != last_nonce + 1) { - debug!("Ignoring out-of-order privileged transaction"); - txs.pop(); - undo_last_tx(context, previous_remaining_gas, previous_block_value)?; - continue; - } - last_privileged_nonce.replace(id); + // Update last privileged nonce and count + if head_tx.is_privileged() { + last_privileged_nonce.replace(head_tx.nonce()); privileged_tx_count += 1; } + // Update acc_encoded_size + acc_encoded_size += tx_size; + txs.shift()?; // Pull transaction from the mempool blockchain.remove_transaction_from_pool(&head_tx.tx.hash())?; - // We only add the messages and privileged transaction length because the accounts diffs may change - acc_size_without_accounts += tx_size_without_accounts; - size_accounts_diffs = new_accounts_diff_size; - // Include the new accounts diffs - account_diffs = merged_diffs; // Add transaction to block - debug!("Adding transaction: {} to payload", tx_hash); - context.payload.body.transactions.push(head_tx.into()); + context.payload.body.transactions.push(tx); + // Save receipt for hash calculation context.receipts.push(receipt); - } + } // end loop metrics!( context @@ -292,79 +259,3 @@ fn fetch_mempool_transactions( } Ok(plain_txs) } - -/// Combines the diffs from the current transaction with the existing block diffs. -/// Transaction diffs represent state changes from the latest transaction execution, -/// while previous diffs accumulate all changes included in the block so far. -fn merge_diffs( - previous_diffs: &HashMap, - tx_diffs: HashMap, -) -> HashMap { - let mut merged_diffs = previous_diffs.clone(); - for (address, diff) in tx_diffs { - if let Some(existing_diff) = merged_diffs.get_mut(&address) { - // New balance could be None if a transaction didn't change the balance - // but we want to keep the previous changes made in a transaction included in the block - existing_diff.new_balance = diff.new_balance.or(existing_diff.new_balance); - - // We add the nonce diff to the existing one to keep track of the total nonce diff - existing_diff.nonce_diff += diff.nonce_diff; - - // we need to overwrite only the new storage storage slot with the new values - existing_diff.storage.extend(diff.storage); - - // Take the bytecode from the tx diff if present, avoiding clone if not needed - if diff.bytecode.is_some() { - existing_diff.bytecode = diff.bytecode; - } - - // Take the new bytecode hash if it is present - existing_diff.bytecode_hash = diff.bytecode_hash.or(existing_diff.bytecode_hash); - } else { - merged_diffs.insert(address, diff); - } - } - merged_diffs -} - -/// Calculates the size of the state diffs introduced by the transaction, including -/// the size of messages and privileged transactions, and the total -/// size of all account diffs accumulated so far in the block. -/// This is necessary because each transaction can modify accounts that were already -/// changed by previous transactions, so we must recalculate the total diff size each time. -fn calculate_tx_diff_size( - merged_diffs: &HashMap, - head_tx: &HeadTransaction, - receipt: &Receipt, -) -> Result<(u64, u64), BlockProducerError> { - let new_accounts_diff_size = get_accounts_diff_size(merged_diffs).map_err(|e| { - BlockProducerError::Custom(format!("Failed to calculate account diffs size: {}", e)) - })?; - - let mut tx_state_diff_size = 0; - - if is_privileged_tx(head_tx) { - tx_state_diff_size += PRIVILEGED_TX_LOG_LEN; - } - let l1_message_count: u64 = get_block_l1_messages(std::slice::from_ref(receipt)) - .len() - .try_into()?; - tx_state_diff_size += l1_message_count * L1MESSAGE_LOG_LEN; - - Ok((tx_state_diff_size, new_accounts_diff_size)) -} - -fn is_privileged_tx(tx: &Transaction) -> bool { - matches!(tx, Transaction::PrivilegedL2Transaction(_tx)) -} - -fn undo_last_tx( - context: &mut PayloadBuildContext, - previous_remaining_gas: u64, - previous_block_value: U256, -) -> Result<(), BlockProducerError> { - context.vm.undo_last_tx()?; - context.remaining_gas = previous_remaining_gas; - context.block_value = previous_block_value; - Ok(()) -} diff --git a/crates/l2/sequencer/errors.rs b/crates/l2/sequencer/errors.rs index b71f79edd91..5af6644a26a 100644 --- a/crates/l2/sequencer/errors.rs +++ b/crates/l2/sequencer/errors.rs @@ -9,7 +9,6 @@ use ethrex_common::Address; use ethrex_common::types::{BlobsBundleError, FakeExponentialError}; use ethrex_l2_common::privileged_transactions::PrivilegedTransactionError; use ethrex_l2_common::prover::ProverType; -use ethrex_l2_common::state_diff::StateDiffError; use ethrex_l2_rpc::signer::SignerError; use ethrex_metrics::MetricsError; use ethrex_rpc::clients::EngineClientError; @@ -108,8 +107,6 @@ pub enum ProofCoordinatorError { InternalError(String), #[error("ProofCoordinator failed when (de)serializing JSON: {0}")] JsonError(#[from] serde_json::Error), - #[error("ProofCoordinator encountered a StateDiffError")] - StateDiffError(#[from] StateDiffError), #[error("ProofCoordinator encountered a ExecutionCacheError")] ExecutionCacheError(#[from] ExecutionCacheError), #[error("ProofCoordinator encountered a BlobsBundleError: {0}")] @@ -237,7 +234,7 @@ pub enum CommitterError { EthClientError(#[from] EthClientError), #[error("Committer failed to {0}")] FailedToParseLastCommittedBlock(#[from] FromStrRadixErr), - #[error("Committer failed retrieve block from storage: {0}")] + #[error("Committer Store Error: {0}")] StoreError(#[from] StoreError), #[error("Committer failed retrieve block from rollup storage: {0}")] RollupStoreError(#[from] RollupStoreError), @@ -249,8 +246,6 @@ pub enum CommitterError { FailedToGenerateBlobsBundle(#[from] BlobsBundleError), #[error("Committer failed to get information from storage: {0}")] FailedToGetInformationFromStorage(String), - #[error("Committer failed to encode state diff: {0}")] - FailedToEncodeStateDiff(#[from] StateDiffError), #[error("Committer failed to open Points file: {0}")] FailedToOpenPointsFile(#[from] std::io::Error), #[error("Committer failed to re-execute block: {0}")] diff --git a/crates/l2/sequencer/l1_committer.rs b/crates/l2/sequencer/l1_committer.rs index dd979b21390..944f2b5d3f7 100644 --- a/crates/l2/sequencer/l1_committer.rs +++ b/crates/l2/sequencer/l1_committer.rs @@ -16,8 +16,9 @@ use ethrex_blockchain::{ use ethrex_common::{ Address, H256, U256, types::{ - AccountUpdate, BLOB_BASE_FEE_UPDATE_FRACTION, BlobsBundle, Block, BlockNumber, Fork, - Genesis, MIN_BASE_FEE_PER_BLOB_GAS, TxType, batch::Batch, blobs_bundle, fake_exponential, + BLOB_BASE_FEE_UPDATE_FRACTION, BlobsBundle, Block, BlockNumber, Fork, Genesis, + MIN_BASE_FEE_PER_BLOB_GAS, TxType, batch::Batch, blobs_bundle, fake_exponential, + fee_config::FeeConfig, }, }; use ethrex_l2_common::{ @@ -29,7 +30,6 @@ use ethrex_l2_common::{ get_block_privileged_transactions, }, prover::ProverInputData, - state_diff::{StateDiff, prepare_state_diff}, }; use ethrex_l2_rpc::signer::{Signer, SignerHealth}; use ethrex_l2_sdk::{ @@ -51,7 +51,7 @@ use ethrex_vm::{BlockExecutionResult, Evm}; use rand::Rng; use serde::Serialize; use std::{ - collections::{BTreeMap, HashMap}, + collections::BTreeMap, fs::remove_dir_all, path::{Path, PathBuf}, sync::Arc, @@ -437,12 +437,13 @@ impl L1Committer { let mut acc_messages = vec![]; let mut acc_privileged_txs = vec![]; - let mut acc_account_updates: HashMap = HashMap::new(); let mut message_hashes = vec![]; let mut privileged_transactions_hashes = vec![]; let mut new_state_root = H256::default(); let mut acc_gas_used = 0_u64; - let mut blocks = vec![]; + let mut acc_blocks = vec![]; + let mut current_blocks = vec![]; + let mut current_fee_configs = vec![]; #[cfg(feature = "metrics")] let mut tx_count = 0_u64; @@ -451,7 +452,7 @@ impl L1Committer { #[cfg(feature = "metrics")] let mut batch_gas_used = 0_u64; - info!("Preparing state diff from block {first_block_of_batch}, {batch_number}"); + info!("Preparing batch from block {first_block_of_batch}, {batch_number}"); loop { let block_to_commit_number = last_added_block_number + 1; @@ -585,33 +586,6 @@ impl L1Committer { // Accumulate block data with the rest of the batch. acc_messages.extend(messages.clone()); acc_privileged_txs.extend(privileged_transactions.clone()); - for account in account_updates { - let address = account.address; - if let Some(existing) = acc_account_updates.get_mut(&address) { - existing.merge(account); - } else { - acc_account_updates.insert(address, account); - } - } - - // It is safe to retrieve this from the main store because blocks - // are available there. What's not available is the state - let parent_block_hash = self - .store - .get_block_header(first_block_of_batch)? - .ok_or(CommitterError::FailedToGetInformationFromStorage( - "Failed to get_block_header() of the last added block".to_owned(), - ))? - .parent_hash; - - let parent_header = self - .store - .get_block_header_by_hash(parent_block_hash)? - .ok_or(CommitterError::ChainError(ChainError::ParentNotFound))?; - - // Again, here the VM database should be instantiated from the checkpoint - // store to have access to the previous state - let parent_db = StoreVmDatabase::new(checkpoint_store.clone(), parent_header); let acc_privileged_txs_len: u64 = acc_privileged_txs.len().try_into()?; if acc_privileged_txs_len > PRIVILEGED_TX_BUDGET { @@ -623,18 +597,21 @@ impl L1Committer { } let result = if !self.validium { - // Prepare current state diff. - let state_diff: StateDiff = prepare_state_diff( - potential_batch_block.header.clone(), - &parent_db, - &acc_messages, - &acc_privileged_txs, - acc_account_updates.clone().into_values().collect(), - )?; - let l1_fork = get_l1_active_fork(&self.eth_client, self.osaka_activation_time) - .await - .map_err(CommitterError::EthClientError)?; - generate_blobs_bundle(&state_diff, l1_fork) + // Prepare blob + let fee_config = self + .rollup_store + .get_fee_config_by_block(block_to_commit_number) + .await? + .ok_or(CommitterError::FailedToGetInformationFromStorage( + "Failed to get fee config for re-execution".to_owned(), + ))?; + + current_blocks.push(potential_batch_block.clone()); + current_fee_configs.push(fee_config); + let l1_fork = + get_l1_active_fork(&self.eth_client, self.osaka_activation_time).await?; + + generate_blobs_bundle(¤t_blocks, ¤t_fee_configs, l1_fork) } else { Ok((BlobsBundle::default(), 0_usize)) }; @@ -679,7 +656,7 @@ impl L1Committer { last_added_block_number += 1; acc_gas_used += current_block_gas_used; - blocks.push((last_added_block_number, potential_batch_block.hash())); + acc_blocks.push((last_added_block_number, potential_batch_block.hash())); } // end loop metrics!(if let (Ok(privileged_transaction_count), Ok(messages_count)) = ( @@ -713,7 +690,7 @@ impl L1Committer { let privileged_transactions_hash = compute_privileged_transactions_hash(privileged_transactions_hashes)?; - let last_block_hash = blocks + let last_block_hash = acc_blocks .last() .ok_or(CommitterError::Unreachable( "There should always be blocks".to_string(), @@ -722,7 +699,7 @@ impl L1Committer { checkpoint_store .forkchoice_update( - Some(blocks), + Some(acc_blocks), last_added_block_number, last_block_hash, None, @@ -1182,14 +1159,35 @@ impl GenServer for L1Committer { /// Generate the blob bundle necessary for the EIP-4844 transaction. pub fn generate_blobs_bundle( - state_diff: &StateDiff, + blocks: &[Block], + fee_configs: &[FeeConfig], fork: Fork, ) -> Result<(BlobsBundle, usize), CommitterError> { - let blob_data = state_diff.encode().map_err(CommitterError::from)?; + let blocks_len: u64 = blocks.len().try_into()?; + let fee_configs_len: u64 = fee_configs.len().try_into()?; + + if blocks_len != fee_configs_len { + return Err(CommitterError::UnexpectedError( + "Blocks and fee configs length mismatch".to_string(), + )); + } + + let mut blob_data = Vec::new(); + + blob_data.extend(blocks_len.to_be_bytes()); + + for block in blocks { + blob_data.extend(block.encode_to_vec()); + } + + for fee_config in fee_configs { + blob_data.extend(fee_config.to_vec()); + } let blob_size = blob_data.len(); - let blob = blobs_bundle::blob_from_bytes(blob_data).map_err(CommitterError::from)?; + let blob = + blobs_bundle::blob_from_bytes(Bytes::from(blob_data)).map_err(CommitterError::from)?; let wrapper_version = if fork <= Fork::Prague { None } else { Some(1) }; Ok(( diff --git a/crates/l2/storage/src/store_db/sql.rs b/crates/l2/storage/src/store_db/sql.rs index 4916ff2559e..9c96d8d6f76 100644 --- a/crates/l2/storage/src/store_db/sql.rs +++ b/crates/l2/storage/src/store_db/sql.rs @@ -207,14 +207,14 @@ impl SQLStore { async fn store_blob_bundle_by_batch_number_in_tx( &self, batch_number: u64, - state_diff: Vec, + blobs: Vec, db_tx: Option<&Transaction>, ) -> Result<(), RollupStoreError> { let mut queries = vec![( "DELETE FROM blob_bundles WHERE batch = ?1", vec![batch_number].into_params()?, )]; - for (index, blob) in state_diff.iter().enumerate() { + for (index, blob) in blobs.iter().enumerate() { let index = u64::try_from(index) .map_err(|e| RollupStoreError::Custom(format!("conversion error: {e}")))?; queries.push(( diff --git a/crates/l2/tests/tests.rs b/crates/l2/tests/tests.rs index acbbfb72a9a..741f9e4938c 100644 --- a/crates/l2/tests/tests.rs +++ b/crates/l2/tests/tests.rs @@ -3,8 +3,10 @@ use anyhow::{Context, Result}; use bytes::Bytes; use ethrex_common::constants::GAS_PER_BLOB; -use ethrex_common::types::account_diff::{AccountStateDiff, get_accounts_diff_size}; -use ethrex_common::types::{SAFE_BYTES_PER_BLOB, TxType}; +use ethrex_common::types::{ + EIP1559_DEFAULT_SERIALIZED_LENGTH, EIP1559Transaction, SAFE_BYTES_PER_BLOB, Transaction, + TxKind, TxType, +}; use ethrex_common::utils::keccak; use ethrex_common::{Address, H160, H256, U256}; use ethrex_l2::monitor::widget::l2_to_l1_messages::{L2ToL1MessageKind, L2ToL1MessageStatus}; @@ -12,7 +14,6 @@ use ethrex_l2::monitor::widget::{L2ToL1MessagesTable, l2_to_l1_messages::L2ToL1M use ethrex_l2::sequencer::l1_watcher::PrivilegedTransactionData; use ethrex_l2_common::calldata::Value; use ethrex_l2_common::l1_messages::L1MessageProof; -use ethrex_l2_common::state_diff::SIMPLE_TX_STATE_DIFF_SIZE; use ethrex_l2_common::utils::get_address_from_secret_key; use ethrex_l2_rpc::clients::{ get_base_fee_vault_address, get_l1_blob_base_fee_per_gas, get_l1_fee_vault_address, @@ -26,8 +27,10 @@ use ethrex_l2_sdk::{ wait_for_transaction_receipt, }; use ethrex_l2_sdk::{ - build_generic_tx, get_last_verified_batch, send_generic_transaction, wait_for_message_proof, + L2_WITHDRAW_SIGNATURE, build_generic_tx, get_last_verified_batch, send_generic_transaction, + wait_for_message_proof, }; +use ethrex_rlp::encode::RLPEncode; use ethrex_rpc::{ clients::eth::{EthClient, Overrides}, types::{ @@ -39,7 +42,6 @@ use hex::FromHexError; use reqwest::Url; use secp256k1::SecretKey; use std::cmp::min; -use std::collections::{BTreeMap, HashMap}; use std::ops::{Add, AddAssign}; use std::{ fs::{File, read_to_string}, @@ -305,7 +307,6 @@ async fn test_upgrade(l1_client: EthClient, l2_client: EthClient) -> Result, ) -> Result<(Address, FeesDetails)> { println!("{test_name}: Deploying contract on L2"); @@ -1807,14 +1828,16 @@ async fn test_deploy( "{test_name}: Deploy transaction failed" ); - let contract_bytecode = l2_client - .get_code(contract_address, BlockIdentifier::Tag(BlockTag::Latest)) - .await?; + // Calculate transaction size + let deploy_tx = Transaction::EIP1559Transaction(EIP1559Transaction { + to: TxKind::Create, + data: init_code.to_vec().into(), + ..Default::default() + }); - let account_diff_size = - get_account_diff_size_for_deploy(&contract_bytecode, storage_after_deploy); + let transaction_size: u64 = deploy_tx.encode_to_vec().len().try_into().unwrap(); - let deploy_fees = get_fees_details_l2(&deploy_tx_receipt, l2_client, account_diff_size).await?; + let deploy_fees = get_fees_details_l2(&deploy_tx_receipt, l2_client, transaction_size).await?; let deployer_balance_after_deploy = l2_client .get_balance(deployer.address(), BlockIdentifier::Tag(BlockTag::Latest)) @@ -2328,107 +2351,3 @@ async fn wait_for_verified_proof( proof } - -// ====================================================================== -// Auxiliary functions to calculate account diff size for different tx -// ====================================================================== - -fn get_account_diff_size_for_deploy( - bytecode: &Bytes, - storage_after_deploy: BTreeMap, -) -> u64 { - let mut account_diffs = HashMap::new(); - // tx sender - account_diffs.insert(Address::random(), sender_account_diff()); - // Deployed contract account - account_diffs.insert( - Address::random(), - AccountStateDiff { - nonce_diff: 1, - bytecode: Some(bytecode.clone()), - storage: storage_after_deploy, - ..Default::default() - }, - ); - get_accounts_diff_size(&account_diffs).unwrap() -} - -fn get_account_diff_size_for_withdraw() -> u64 { - let mut account_diffs = HashMap::new(); - // tx sender - account_diffs.insert(Address::random(), sender_account_diff()); - // L2_TO_L1_MESSENGER - account_diffs.insert( - Address::random(), - AccountStateDiff { - storage: dummy_modified_storage_slots(1), - ..Default::default() - }, - ); - // zero address - account_diffs.insert( - Address::zero(), - AccountStateDiff { - new_balance: Some(U256::zero()), - ..Default::default() - }, - ); - get_accounts_diff_size(&account_diffs).unwrap() -} - -fn get_account_diff_size_for_erc20withdraw() -> u64 { - let mut account_diffs = HashMap::new(); - // tx sender - account_diffs.insert(Address::random(), sender_account_diff()); - // L2_TO_L1_MESSENGER - account_diffs.insert( - Address::random(), - AccountStateDiff { - storage: dummy_modified_storage_slots(1), - ..Default::default() - }, - ); - // ERC20 contract - account_diffs.insert( - Address::random(), - AccountStateDiff { - storage: dummy_modified_storage_slots(2), - ..Default::default() - }, - ); - get_accounts_diff_size(&account_diffs).unwrap() -} - -fn get_account_diff_size_for_erc20approve() -> u64 { - let mut account_diffs = HashMap::new(); - // tx sender - account_diffs.insert(Address::random(), sender_account_diff()); - - // ERC20 contract - account_diffs.insert( - Address::random(), - AccountStateDiff { - storage: dummy_modified_storage_slots(1), - ..Default::default() - }, - ); - - get_accounts_diff_size(&account_diffs).unwrap() -} - -// Account diff for the sender of the transaction -fn sender_account_diff() -> AccountStateDiff { - AccountStateDiff { - nonce_diff: 1, - new_balance: Some(U256::zero()), - ..Default::default() - } -} - -fn dummy_modified_storage_slots(modified_storage_slots: u64) -> BTreeMap { - let mut storage = BTreeMap::new(); - for _ in 0..modified_storage_slots { - storage.insert(H256::random(), U256::zero()); - } - storage -} diff --git a/crates/l2/utils/error.rs b/crates/l2/utils/error.rs index df12656fd8f..d2702af4994 100644 --- a/crates/l2/utils/error.rs +++ b/crates/l2/utils/error.rs @@ -1,5 +1,6 @@ use ethrex_blockchain::error::ChainError; -use ethrex_common::H256; +use ethrex_common::{H256, types::BlobsBundleError}; +use ethrex_l2_common::privileged_transactions::PrivilegedTransactionError; use ethrex_storage::error::StoreError; #[derive(Debug, thiserror::Error)] @@ -20,4 +21,16 @@ pub enum ProverInputError { pub enum UtilsError { #[error("Unable to parse withdrawal_event_selector: {0}")] WithdrawalSelectorError(String), + #[error("Failed to retrieve data: {0}")] + RetrievalError(String), + #[error("Inconsistent Storage: {0}")] + InconsistentStorage(String), + #[error("Conversion Error: {0}")] + ConversionError(String), + #[error("Failed due to a Store error: {0}")] + StoreError(#[from] ethrex_storage::error::StoreError), + #[error("Failed to produce the blob bundle")] + BlobBundleError(#[from] BlobsBundleError), + #[error("Failed to compute deposit logs hash: {0}")] + PrivilegedTransactionError(#[from] PrivilegedTransactionError), } diff --git a/crates/l2/utils/mod.rs b/crates/l2/utils/mod.rs index d65ea6be061..ac3be8a7e00 100644 --- a/crates/l2/utils/mod.rs +++ b/crates/l2/utils/mod.rs @@ -1,3 +1,4 @@ pub mod error; pub mod parse; +pub mod state_reconstruct; pub mod test_data_io; diff --git a/crates/l2/utils/state_reconstruct.rs b/crates/l2/utils/state_reconstruct.rs new file mode 100644 index 00000000000..76cb011854d --- /dev/null +++ b/crates/l2/utils/state_reconstruct.rs @@ -0,0 +1,114 @@ +/// Utility functions for state reconstruction. +/// Used by the based block fetcher and reconstruct command. +use ethereum_types::H256; +use ethrex_common::types::BlobsBundle; +use ethrex_common::{ + U256, + types::{Block, BlockNumber, PrivilegedL2Transaction, Transaction, batch::Batch}, +}; +use ethrex_l2_common::{ + l1_messages::{L1Message, get_block_l1_messages, get_l1_message_hash}, + privileged_transactions::compute_privileged_transactions_hash, +}; +use ethrex_storage::Store; + +use crate::utils::error::UtilsError; + +pub async fn get_batch( + store: &Store, + batch: &[Block], + batch_number: U256, + commit_tx: Option, + blobs_bundle: BlobsBundle, +) -> Result { + let privileged_transactions: Vec = batch + .iter() + .flat_map(|block| { + block.body.transactions.iter().filter_map(|tx| { + if let Transaction::PrivilegedL2Transaction(tx) = tx { + Some(tx.clone()) + } else { + None + } + }) + }) + .collect(); + let privileged_transaction_hashes = privileged_transactions + .iter() + .filter_map(|tx| tx.get_privileged_hash()) + .collect(); + + let privileged_transactions_hash = + compute_privileged_transactions_hash(privileged_transaction_hashes)?; + + let first_block = batch.first().ok_or(UtilsError::RetrievalError( + "Batch is empty. This shouldn't happen.".to_owned(), + ))?; + + let last_block = batch.last().ok_or(UtilsError::RetrievalError( + "Batch is empty. This shouldn't happen.".to_owned(), + ))?; + + let new_state_root = store + .state_trie(last_block.hash())? + .ok_or(UtilsError::InconsistentStorage( + "This block should be in the store".to_owned(), + ))? + .hash_no_commit(); + + Ok(Batch { + number: batch_number.as_u64(), + first_block: first_block.header.number, + last_block: last_block.header.number, + state_root: new_state_root, + privileged_transactions_hash, + message_hashes: get_batch_message_hashes(store, batch).await?, + blobs_bundle, + commit_tx, + verify_tx: None, + }) +} + +async fn get_batch_message_hashes(store: &Store, batch: &[Block]) -> Result, UtilsError> { + let mut message_hashes = Vec::new(); + + for block in batch { + let block_messages = extract_block_messages(store, block.header.number).await?; + + for msg in &block_messages { + message_hashes.push(get_l1_message_hash(msg)); + } + } + + Ok(message_hashes) +} + +async fn extract_block_messages( + store: &Store, + block_number: BlockNumber, +) -> Result, UtilsError> { + let Some(block_body) = store.get_block_body(block_number).await? else { + return Err(UtilsError::InconsistentStorage(format!( + "Block {block_number} is supposed to be in store at this point" + ))); + }; + + let mut txs = vec![]; + let mut receipts = vec![]; + for (index, tx) in block_body.transactions.iter().enumerate() { + let receipt = store + .get_receipt( + block_number, + index.try_into().map_err(|_| { + UtilsError::ConversionError("Failed to convert index to u64".to_owned()) + })?, + ) + .await? + .ok_or(UtilsError::RetrievalError( + "Transactions in a block should have a receipt".to_owned(), + ))?; + txs.push(tx.clone()); + receipts.push(receipt); + } + Ok(get_block_l1_messages(&receipts)) +} diff --git a/crates/networking/rpc/eth/transaction.rs b/crates/networking/rpc/eth/transaction.rs index de456083e05..87ea64fb372 100644 --- a/crates/networking/rpc/eth/transaction.rs +++ b/crates/networking/rpc/eth/transaction.rs @@ -18,7 +18,7 @@ use ethrex_common::{ use ethrex_rlp::encode::RLPEncode; use ethrex_storage::Store; -use ethrex_vm::ExecutionResult; +use ethrex_vm::{ExecutionResult, backends::levm::get_max_allowed_gas_limit}; use serde::Serialize; use serde_json::Value; @@ -438,6 +438,8 @@ impl RpcHandler for EstimateGasRequest { let storage = &context.storage; let blockchain = &context.blockchain; let block = self.block.clone().unwrap_or_default(); + let chain_config = storage.get_chain_config(); + debug!("Requested estimate on block: {}", block); let block_header = match block.resolve_block_header(storage).await? { Some(header) => header, @@ -445,6 +447,8 @@ impl RpcHandler for EstimateGasRequest { _ => return Ok(Value::Null), }; + let current_fork = chain_config.fork(block_header.timestamp); + let transaction = match self.transaction.nonce { Some(_nonce) => self.transaction.clone(), None => { @@ -482,9 +486,10 @@ impl RpcHandler for EstimateGasRequest { } // Prepare binary search + let highest_gas_limit = get_max_allowed_gas_limit(block_header.gas_limit, current_fork); let mut highest_gas_limit = match transaction.gas { - Some(gas) => gas.min(block_header.gas_limit), - None => block_header.gas_limit, + Some(gas) => gas.min(highest_gas_limit), + None => highest_gas_limit, }; if transaction.gas_price != 0 { diff --git a/crates/vm/backends/levm/mod.rs b/crates/vm/backends/levm/mod.rs index 6c1669d75df..1a260e7e05a 100644 --- a/crates/vm/backends/levm/mod.rs +++ b/crates/vm/backends/levm/mod.rs @@ -9,17 +9,20 @@ use crate::system_contracts::{ use crate::{EvmError, ExecutionResult}; use bytes::Bytes; use ethrex_common::types::fee_config::FeeConfig; +use ethrex_common::types::{AuthorizationTuple, EIP7702Transaction}; use ethrex_common::{ Address, U256, types::{ - AccessList, AccountUpdate, AuthorizationTuple, Block, BlockHeader, EIP1559Transaction, - EIP7702Transaction, Fork, GWEI_TO_WEI, GenericTransaction, INITIAL_BASE_FEE, Receipt, - Transaction, TxKind, Withdrawal, requests::Requests, + AccessList, AccountUpdate, Block, BlockHeader, EIP1559Transaction, Fork, GWEI_TO_WEI, + GenericTransaction, INITIAL_BASE_FEE, Receipt, Transaction, TxKind, Withdrawal, + requests::Requests, }, }; use ethrex_levm::EVMConfig; use ethrex_levm::call_frame::Stack; -use ethrex_levm::constants::{STACK_LIMIT, SYS_CALL_GAS_LIMIT, TX_BASE_COST}; +use ethrex_levm::constants::{ + POST_OSAKA_GAS_LIMIT_CAP, STACK_LIMIT, SYS_CALL_GAS_LIMIT, TX_BASE_COST, +}; use ethrex_levm::db::gen_db::GeneralizedDatabase; use ethrex_levm::errors::{InternalError, TxValidationError}; use ethrex_levm::tracing::LevmCallTracer; @@ -685,7 +688,9 @@ fn env_from_generic( let config = EVMConfig::new_from_chain_config(&chain_config, header); Ok(Environment { origin: tx.from.0.into(), - gas_limit: tx.gas.unwrap_or(header.gas_limit), // Ensure tx doesn't fail due to gas limit + gas_limit: tx + .gas + .unwrap_or(get_max_allowed_gas_limit(header.gas_limit, config.fork)), // Ensure tx doesn't fail due to gas limit config, block_number: header.number.into(), coinbase: header.coinbase, @@ -746,6 +751,15 @@ fn vm_from_generic<'a>( ..Default::default() }), }; + let vm_type = adjust_disabled_l2_fees(&env, vm_type); VM::new(env, db, &tx, LevmCallTracer::disabled(), vm_type) } + +pub fn get_max_allowed_gas_limit(block_gas_limit: u64, fork: Fork) -> u64 { + if fork >= Fork::Osaka { + POST_OSAKA_GAS_LIMIT_CAP + } else { + block_gas_limit + } +} diff --git a/crates/vm/levm/src/hooks/hook.rs b/crates/vm/levm/src/hooks/hook.rs index 1116c97cd33..d79c4fb5a79 100644 --- a/crates/vm/levm/src/hooks/hook.rs +++ b/crates/vm/levm/src/hooks/hook.rs @@ -29,10 +29,7 @@ pub fn l1_hooks() -> Vec>> { pub fn l2_hooks(fee_config: FeeConfig) -> Vec>> { vec![ - Rc::new(RefCell::new(L2Hook { - fee_config, - pre_execution_backup: Default::default(), - })), + Rc::new(RefCell::new(L2Hook { fee_config })), Rc::new(RefCell::new(BackupHook::default())), ] } diff --git a/crates/vm/levm/src/hooks/l2_hook.rs b/crates/vm/levm/src/hooks/l2_hook.rs index e21af7a0604..6dd2af18787 100644 --- a/crates/vm/levm/src/hooks/l2_hook.rs +++ b/crates/vm/levm/src/hooks/l2_hook.rs @@ -1,5 +1,4 @@ use crate::{ - call_frame::CallFrameBackup, errors::{ContextResult, InternalError, TxValidationError}, hooks::{ DefaultHook, @@ -10,7 +9,6 @@ use crate::{ hook::Hook, }, opcodes::Opcode, - utils::get_account_diffs_in_tx, vm::VM, }; @@ -22,11 +20,11 @@ use ethrex_common::{ Code, { SAFE_BYTES_PER_BLOB, - account_diff::get_accounts_diff_size, fee_config::{FeeConfig, L1FeeConfig, OperatorFeeConfig}, }, }, }; +use ethrex_rlp::encode::RLPEncode; pub const COMMON_BRIDGE_L2_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -35,7 +33,6 @@ pub const COMMON_BRIDGE_L2_ADDRESS: Address = H160([ pub struct L2Hook { pub fee_config: FeeConfig, - pub pre_execution_backup: CallFrameBackup, } impl Hook for L2Hook { @@ -48,8 +45,6 @@ impl Hook for L2Hook { // Max fee per gas must be sufficient to cover base fee + operator fee validate_sufficient_max_fee_per_gas_l2(vm, &self.fee_config.operator_fee_config)?; - // Backup the callframe to calculate the tx state diff later - self.pre_execution_backup = vm.current_call_frame.call_frame_backup.clone(); return Ok(()); } @@ -151,12 +146,7 @@ impl Hook for L2Hook { let actual_gas_used = compute_actual_gas_used(vm, gas_refunded, ctx_result.gas_used)?; // Different from L1: - - let mut l1_gas = calculate_l1_fee_gas( - vm, - std::mem::take(&mut self.pre_execution_backup), - &self.fee_config.l1_fee_config, - )?; + let mut l1_gas = calculate_l1_fee_gas(vm, &self.fee_config.l1_fee_config)?; let mut total_gas = actual_gas_used .checked_add(l1_gas) @@ -305,7 +295,7 @@ fn pay_operator_fee( fn calculate_l1_fee( fee_config: &L1FeeConfig, - account_diffs_size: u64, + transaction_size: usize, ) -> Result { let l1_fee_per_blob: U256 = fee_config .l1_fee_per_blob_gas @@ -318,7 +308,7 @@ fn calculate_l1_fee( .ok_or(InternalError::DivisionByZero)?; let l1_fee = l1_fee_per_blob_byte - .checked_mul(U256::from(account_diffs_size)) + .checked_mul(U256::from(transaction_size)) .ok_or(InternalError::Overflow)?; Ok(l1_fee) @@ -326,7 +316,6 @@ fn calculate_l1_fee( fn calculate_l1_fee_gas( vm: &mut VM<'_>, - pre_execution_backup: CallFrameBackup, l1_fee_config: &Option, ) -> Result { let Some(fee_config) = l1_fee_config else { @@ -334,13 +323,9 @@ fn calculate_l1_fee_gas( return Ok(0); }; - let mut execution_backup = vm.current_call_frame.call_frame_backup.clone(); - execution_backup.extend(pre_execution_backup); - let account_diffs_in_tx = get_account_diffs_in_tx(vm.db, execution_backup)?; - let account_diffs_size = get_accounts_diff_size(&account_diffs_in_tx) - .map_err(|e| InternalError::Custom(format!("Failed to get account diffs size: {}", e)))?; + let tx_size = vm.tx.encode_to_vec().len(); - let l1_fee = calculate_l1_fee(fee_config, account_diffs_size)?; + let l1_fee = calculate_l1_fee(fee_config, tx_size)?; let mut l1_fee_gas = l1_fee .checked_div(vm.env.gas_price) .ok_or(InternalError::DivisionByZero)?; diff --git a/crates/vm/levm/src/utils.rs b/crates/vm/levm/src/utils.rs index 7b6e95739b2..7acbbc00479 100644 --- a/crates/vm/levm/src/utils.rs +++ b/crates/vm/levm/src/utils.rs @@ -4,7 +4,7 @@ use crate::{ call_frame::CallFrameBackup, constants::*, db::gen_db::GeneralizedDatabase, - errors::{DatabaseError, ExceptionalHalt, InternalError, TxValidationError, VMError}, + errors::{ExceptionalHalt, InternalError, TxValidationError, VMError}, gas_cost::{ self, ACCESS_LIST_ADDRESS_COST, ACCESS_LIST_STORAGE_KEY_COST, BLOB_GAS_PER_BLOB, COLD_ADDRESS_ACCESS_COST, CREATE_BASE_COST, STANDARD_TOKEN_COST, @@ -19,10 +19,7 @@ use bytes::Bytes; use ethrex_common::{ Address, H256, U256, evm::calculate_create_address, - types::{ - Account, Code, Fork, Transaction, account_diff::AccountStateDiff, fake_exponential, - tx_fields::*, - }, + types::{Account, Code, Fork, Transaction, fake_exponential, tx_fields::*}, utils::{keccak, u256_to_big_endian}, }; use ethrex_common::{types::TxKind, utils::u256_from_big_endian_const}; @@ -33,7 +30,7 @@ use secp256k1::{ Message, ecdsa::{RecoverableSignature, RecoveryId}, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; pub type Storage = HashMap; // ================== Address related functions ====================== @@ -167,99 +164,6 @@ pub fn restore_cache_state( Ok(()) } -/// Returns the state diffs introduced by the transaction by comparing the call frame backup -/// (which holds the state before executing the transaction) with the current state of the cache -/// (which contains all the writes performed by the transaction). -pub fn get_account_diffs_in_tx( - db: &GeneralizedDatabase, - transaction_backup: CallFrameBackup, -) -> Result, VMError> { - let mut modified_accounts = HashMap::new(); - - // First we add the account info - for (address, original_account) in transaction_backup.original_accounts_info.iter() { - let new_account = db - .current_accounts_state - .get(address) - .ok_or(DatabaseError::Custom("DB Cache".to_owned()))?; - - let nonce_diff: u16 = new_account - .info - .nonce - .checked_sub(original_account.info.nonce) - .ok_or(InternalError::TypeConversion)? - .try_into() - .map_err(|_| InternalError::TypeConversion)?; - - let new_balance = if new_account.info.balance != original_account.info.balance { - Some(new_account.info.balance) - } else { - None - }; - - let bytecode = if new_account.info.code_hash != original_account.info.code_hash { - // After execution the code should be in db.codes - let code = db - .codes - .get(&new_account.info.code_hash) - .ok_or_else(|| DatabaseError::Custom("Code DB Cache".to_owned()))?; - Some(code.clone()) - } else { - None - }; - - let account_state_diff = AccountStateDiff { - new_balance, - nonce_diff, - storage: BTreeMap::new(), // We add the storage later - bytecode: bytecode.map(|c| c.bytecode), - bytecode_hash: None, - }; - - modified_accounts.insert(*address, account_state_diff); - } - - // Then if there is any storage change, we add it to the account state diff - for (address, original_storage_slots) in - transaction_backup.original_account_storage_slots.iter() - { - let account_info = db - .current_accounts_state - .get(address) - .ok_or(DatabaseError::Custom("DB Cache".to_owned()))?; - - let mut added_storage = BTreeMap::new(); - for key in original_storage_slots.keys() { - added_storage.insert( - *key, - *account_info - .storage - .get(key) - .ok_or(DatabaseError::Custom("Account info Storage".to_owned()))?, - ); - } - if let Some(account_state_diff) = modified_accounts.get_mut(address) { - account_state_diff.storage = added_storage; - } else { - // If the account is not in the modified accounts, we create a new one - let account_state_diff = AccountStateDiff { - new_balance: None, - nonce_diff: 0, - storage: added_storage, - bytecode: None, - bytecode_hash: None, - }; - - // If account state diff is NOT empty - if account_state_diff != AccountStateDiff::default() { - modified_accounts.insert(*address, account_state_diff); - } - } - } - - Ok(modified_accounts) -} - // ================= Blob hash related functions ===================== pub fn get_base_fee_per_blob_gas( block_excess_blob_gas: Option, diff --git a/docs/CLI.md b/docs/CLI.md index 409d66f7357..471832f73ea 100644 --- a/docs/CLI.md +++ b/docs/CLI.md @@ -537,7 +537,7 @@ Admin server options: L2 options: --validium - If true, L2 will run on validium mode as opposed to the default rollup mode, meaning it will not publish state diffs to the L1. + If true, L2 will run on validium mode as opposed to the default rollup mode, meaning it will not publish blobs to the L1. [env: ETHREX_L2_VALIDIUM=] diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index d684a658acf..e84c710022a 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -47,6 +47,7 @@ - [Deploy a contract](./l2/interacting/deploy_contracts.md) - [Fundamentals](./l2/fundamentals/README.md) - [State diffs](./l2/fundamentals/state_diffs.md) + - [Block vs StateDiff](./l2/fundamentals/block_vs_state_diff_measurements.md) - [Deposits](./l2/fundamentals/deposits.md) - [Withdrawals](./l2/fundamentals/withdrawals.md) - [Smart contracts](./l2/fundamentals/contracts.md) diff --git a/docs/l2/architecture/overview.md b/docs/l2/architecture/overview.md index ee4f5a86353..72ccdd814c2 100644 --- a/docs/l2/architecture/overview.md +++ b/docs/l2/architecture/overview.md @@ -49,6 +49,11 @@ These two ideas will be used extensively throughout the rest of the documentatio ## Reconstructing state/Data Availability +> [!WARNING] +> The **state diff** mechanism is retained here for historical and conceptual reference. +> Ethrex now publishes **RLP-encoded blocks** (with fee configs) in blobs. +> The principles of verification and compression described below still apply conceptually to this new model. + While using a merkle root as a public input for the proof works well, there is still a need to have the state on L1. If the only thing that's published to it is the state root, then the sequencer could withhold data on the state of the chain. Because it is the one proposing and executing blocks, if it refuses to deliver certain data (like a merkle path to prove a withdrawal on L1), people may not have any place to get it from and get locked out of the chain or some of their funds. This is called the **Data Availability** problem. As discussed before, sending the entire state of the chain on every new L2 batch is impossible; state is too big. As a first next step, what we could do is: @@ -77,6 +82,9 @@ Because state diffs are compressed to save space on L1, this compression needs t ## EIP 4844 (a.k.a. Blobs) +> [!WARNING] +> The explanations below originally refer to *state diffs*, but the same blob-based mechanism now carries **RLP-encoded block data** and their associated **fee configs**. + While we could send state diffs through calldata, there is a (hopefully) cheaper way to do it: blobs. The Ethereum Cancun upgrade introduced a new type of transaction where users can submit a list of opaque blobs of data, each one of size at most 128 KB. The main purpose of this new type of transaction is precisely to be used by rollups for data availability; they are priced separately through a `blob_gas` market instead of the regular `gas` one and for all intents and purposes should be much cheaper than calldata. Using EIP 4844, our state diffs would now be sent through blobs. While this is cheaper, there's a new problem to address with it. The whole point of blobs is that they're cheaper because they are only kept around for approximately two weeks and ONLY in the beacon chain, i.e. the consensus side. The execution side (and thus the EVM when running contracts) does not have access to the contents of a blob. Instead, the only thing it has access to is a **KZG commitment** of it. @@ -108,6 +116,12 @@ Our proof of equivalence implementation follows Method 1 [here](https://notes.et - The commitment's hash is equal to the versioned hash for that blob. - The evaluation is correct. +## Transition to RLP-encoded Blocks + +The state diff approach has been deprecated. While it provided a more compact representation, it only guaranteed the availability of the modified state, not the original transactions themselves. To ensure that transactions are also publicly available, Ethrex now publishes **RLP-encoded blocks**, together with their corresponding **fee configurations**, directly in blobs (see [Transaction fees](../fundamentals/transaction_fees.md)). + +This new approach guarantees both transaction and state availability, at the cost of higher data size. According to our internal measurements ([`block_vs_state_diff_measurements.md`](../fundamentals/block_vs_state_diff_measurements.md)), sending block lists in blobs instead of state diffs decreases the number of transactions that can fit in a single blob by approximately **2× for ETH transfers** and **3× for ERC20 transfers**. + ## L1<->L2 communication To communicate between L1 and L2, we use two mechanisms called _Privileged transactions_, and _L1 messages_. diff --git a/docs/l2/fundamentals/block_vs_state_diff_measurements.md b/docs/l2/fundamentals/block_vs_state_diff_measurements.md new file mode 100644 index 00000000000..7f841a74bc5 --- /dev/null +++ b/docs/l2/fundamentals/block_vs_state_diff_measurements.md @@ -0,0 +1,467 @@ +# Comparative Analysis: Transaction Volume in Blobs Using State Diffs and Transaction Lists + +The following are results from measurements conducted to understand the efficiency of blob utilization in an ethrex L2 network through the simulation of different scenarios with varying transaction complexities (e.g., ETH transfers, ERC20 transfers, and other complex smart contract interactions) and data encoding strategies, with the final goal of estimating the approximate number of transactions that can be packed into a single blob using state diffs versus full transaction lists, thereby optimizing calldata costs and achieving greater scalability. + +## Measurements (Amount of transactions per batch) + +### ETH Transfers + +| Blob Payload | Batch 2 | Batch 3 | Batch 4 | Batch 5 | Batch 6 | Batch 7 | Batch 8 | Batch 9 | Batch 10 | Batch 11 | +| ------------ | ------- | ------- | ------- | ------- | ------- | ------- | ------- | ------- | -------- | -------- | +| State Diff | 2373 | 2134 | 2367 | 2141 | 2191 | 2370 | 2309 | 2361 | 2375 | 2367 | +| Block List | 913 | 871 | 886 | 935 | 1019 | 994 | 1002 | 1011 | 1012 | 1015 | + +### ERC20 Transfers + +| Blob Payload | Batch 2 | Batch 3 | Batch 4 | Batch 5 | Batch 6 | Batch 7 | Batch 8 | Batch 9 | Batch 10 | Batch 11 | +| ------------ | ------- | ------- | ------- | ------- | ------- | ------- | ------- | ------- | -------- | -------- | +| State Diff | 1942 | 1897 | 1890 | 1900 | 1915 | 1873 | 1791 | 1773 | 1867 | 1858 | +| Block List | 655 | 661 | 638 | 638 | 645 | 644 | 615 | 530 | 532 | 532 | + +### Summary + +| Blob Payload | Avg. ETH Transfers per Batch | Avg. ERC20 Transfers per Batch | +| ------------ | ---------------------------- | ------------------------------ | +| State Diff | 2298 | 1870 | +| Block List | 965 | 609 | + +## Conclusion + +Sending block lists in blobs instead of state diffs decreases the number of transactions that can fit in a single blob by approximately 2x for ETH transfers and 3x for ERC20 transfers. + +## How this measurements were done + +### Prerequisites + +- Fresh cloned ethrex repository +- The spammer and measurer code provided in the appendix set up for running (you can create a new cargo project and copy the code there) + +### Steps + +#### 1. Run an L2 ethrex: + +For running the measurements, we need to run an ethrex L2 node. For doing that, change your current directory to `ethrex/crates/l2` in your fresh-cloned ethrex and run the following in a terminal: + +```shell +ETHREX_COMMITTER_COMMIT_TIME=120000 MEMPOOL_MAX_SIZE=1000000 make init-l2-dev +``` + +This will set up and run an ethrex L2 node in dev mode with a mempool size big-enough to be able to handle the spammer transactions. And after this you should see the ethrex L2 monitor running. + +#### 2. Run the desired transactions spammer + +> [!IMPORTANT] +> Wait a few seconds after running the L2 node to make sure it's fully up and running before starting the spammer, and to ensure that the rich account used by the spammer has funds. + +In another terminal, change your current directory to the spammer code you want to run (either ETH or ERC20) and run: + +```shell +cargo run +``` + +It's ok not to see any logs or prints as output, since the spammer code doesn't print anything. + +If you go back to the terminal where the L2 node is running, you should start seeing the following: + +1. The mempool table growing in size as transactions are being sent to the L2 node. +2. In the L2 Blocks table, new blocks with `#Txs` greater than 0 being created as the spammer transactions are included in blocks. +3. Every 2 minutes (or the time you set in `ETHREX_COMMITTER_COMMIT_TIME`), new batches being created in the L2 Batches table. + +#### 3. Run the measurer + +> [!IMPORTANT] +> +> - Wait until enough batches are created before running the measurer. +> - Ignore the results of the first 2/3 batches, since they contain other transactions created during the L2 node initialization. + +In another terminal, change your current directory to the measurer code and run: + +```shell +cargo run +``` + +This will start printing the total number of transactions included in each batch until the last committed one. + +> [!NOTE] +> +> - The measurer will query batches starting from batch 1 and will continue indefinitely until it fails to find a batch (e.g. because the L2 node hasn't created it yet), so it is ok to see an error at the end of the output once the measurer reaches a batch that hasn't been created yet. + +## Appendix + +- [ETH Transactions Spammer](#eth-transactions-spammer) + - [`main.rs`](#mainrs) + - [`Cargo.toml`](#cargotoml) +- [Measurer](#measurer) + - [`main.rs`](#mainrs-1) + - [`Cargo.toml`](#cargotoml-1) +- [ERC20 Transactions Spammer](#erc20-transactions-spammer) + - [`main.rs`](#mainrs-2) + - [`Cargo.toml`](#cargotoml-2) + +### ETH Transactions Spammer + +> [!NOTE] +> This is using ethrex v6.0.0 + +#### `main.rs` + +```rs +use ethrex_common::{ + Address, U256, + types::{EIP1559Transaction, Transaction, TxKind}, +}; +use ethrex_l2_rpc::signer::{LocalSigner, Signable, Signer}; +use ethrex_l2_sdk::send_generic_transaction; +use ethrex_rpc::EthClient; +use tokio::time::sleep; +use url::Url; + +#[tokio::main] +async fn main() { + let chain_id = 65536999; + let senders = vec![ + "7a738a3a8ee9cdbb5ee8dfc1fc5d97847eaba4d31fd94f89e57880f8901fa029", + "8cfe380955165dd01f4e33a3c68f4e08881f238fbbea71a2ab407f4a3759705b", + "5bb463c0e64039550de4f95b873397b36d76b2f1af62454bb02cf6024d1ea703", + "3c0924743b33b5f06b056bed8170924ca12b0d52671fb85de1bb391201709aaf", + "6aeeda1e7eda6d618de89496fce01fb6ec685c38f1c5fccaa129ec339d33ff87", + ] + .iter() + .map(|s| Signer::Local(LocalSigner::new(s.parse().expect("invalid private key")))) + .collect::>(); + let eth_client: EthClient = + EthClient::new(Url::parse("http://localhost:1729").expect("Invalid URL")) + .expect("Failed to create EthClient"); + let mut nonce = 0; + loop { + for sender in senders.clone() { + let signed_tx = generate_signed_transaction(nonce, chain_id, &sender).await; + send_generic_transaction(ð_client, signed_tx.into(), &sender) + .await + .expect("Failed to send transaction"); + sleep(std::time::Duration::from_millis(10)).await; + } + nonce += 1; + } +} + +async fn generate_signed_transaction(nonce: u64, chain_id: u64, signer: &Signer) -> Transaction { + Transaction::EIP1559Transaction(EIP1559Transaction { + nonce, + value: U256::one(), + gas_limit: 250000, + max_fee_per_gas: u64::MAX, + max_priority_fee_per_gas: 10, + chain_id, + to: TxKind::Call(Address::random()), + ..Default::default() + }) + .sign(&signer) + .await + .expect("failed to sign transaction") +} +``` + +#### `Cargo.toml` + +```toml +[package] +name = "tx_spammer" +version = "0.1.0" +edition = "2024" + +[dependencies] +ethrex-sdk = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-common = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-l2-rpc = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-rpc = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } + +tokio = { version = "1", features = ["full"] } +url = "2" +hex = "0.4" +``` + +### Measurer + +A simple program that queries the L2 node for batches and blocks, counting the number of transactions in each block, and summing them up per batch. + +#### `main.rs` + +```rs +use reqwest::Client; +use serde_json::{Value, json}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let mut batch = 1; + + loop { + let (first, last) = fetch_batch(batch).await; + let mut txs = 0u64; + for number in first as u64..=last as u64 { + txs += fetch_block(number).await; + } + println!("Total transactions in batch {}: {}", batch, txs); + + batch += 1; + } +} + +async fn fetch_batch(number: u64) -> (i64, i64) { + // Create the JSON body equivalent to the --data in curl + let body = json!({ + "method": "ethrex_getBatchByNumber", + "params": [format!("0x{:x}", number), false], + "id": 1, + "jsonrpc": "2.0" + }); + + // Create a blocking HTTP client + let client = Client::new(); + + // Send the POST request + let response = client + .post("http://localhost:1729") + .header("Content-Type", "application/json") + .json(&body) + .send() + .await + .expect("Failed to send request") + .json::() + .await + .unwrap(); + + let result = &response["result"]; + let first_block = &result["first_block"].as_i64().unwrap(); + let last_block = &result["last_block"].as_i64().unwrap(); + (*first_block, *last_block) +} + +async fn fetch_block(number: u64) -> u64 { + // Create the JSON body equivalent to the --data in curl + let body = json!({ + "method": "eth_getBlockByNumber", + "params": [format!("0x{:x}", number), false], + "id": 1, + "jsonrpc": "2.0" + }); + + // Create a blocking HTTP client + let client = Client::new(); + + // Send the POST request + let response = client + .post("http://localhost:1729") + .header("Content-Type", "application/json") + .json(&body) + .send() + .await + .expect("Failed to send request") + .json::() + .await + .unwrap(); + + let result = &response["result"]; + let transactions = &result["transactions"]; + transactions.as_array().unwrap().len() as u64 +} +``` + +#### `Cargo.toml` + +```toml +[package] +name = "measurer" +version = "0.1.0" +edition = "2024" + +[dependencies] +reqwest = { version = "0.11", features = ["json"] } +serde_json = "1.0" +tokio = { version = "1", features = ["full"] } +``` + +### ERC20 Transactions Spammer + +#### `main.rs` + +```rs +use ethrex_blockchain::constants::TX_GAS_COST; +use ethrex_common::{ + Address, U256, + types::{EIP1559Transaction, GenericTransaction, Transaction, TxKind, TxType}, +}; +use ethrex_l2_rpc::signer::{LocalSigner, Signable, Signer}; +use ethrex_l2_sdk::{ + build_generic_tx, calldata::encode_calldata, create_deploy, send_generic_transaction, + wait_for_transaction_receipt, +}; +use ethrex_rpc::{EthClient, clients::Overrides}; +use tokio::time::sleep; +use url::Url; + +// ERC20 compiled artifact generated from this tutorial: +// https://medium.com/@kaishinaw/erc20-using-hardhat-a-comprehensive-guide-3211efba98d4 +// If you want to modify the behaviour of the contract, edit the ERC20.sol file, +// and compile it with solc. +const ERC20: &str = include_str!("./TestToken.bin").trim_ascii(); + +#[tokio::main] +async fn main() { + let chain_id = 65536999; + let signer = Signer::Local(LocalSigner::new( + "39725efee3fb28614de3bacaffe4cc4bd8c436257e2c8bb887c4b5c4be45e76d" + .parse() + .expect("invalid private key"), + )); + let eth_client: EthClient = + EthClient::new(Url::parse("http://localhost:1729").expect("Invalid URL")) + .expect("Failed to create EthClient"); + let contract_address = erc20_deploy(eth_client.clone(), &signer) + .await + .expect("Failed to deploy ERC20 contract"); + + let senders = vec![ + "7a738a3a8ee9cdbb5ee8dfc1fc5d97847eaba4d31fd94f89e57880f8901fa029", + "8cfe380955165dd01f4e33a3c68f4e08881f238fbbea71a2ab407f4a3759705b", + "5bb463c0e64039550de4f95b873397b36d76b2f1af62454bb02cf6024d1ea703", + "3c0924743b33b5f06b056bed8170924ca12b0d52671fb85de1bb391201709aaf", + "6aeeda1e7eda6d618de89496fce01fb6ec685c38f1c5fccaa129ec339d33ff87", + ] + .iter() + .map(|s| Signer::Local(LocalSigner::new(s.parse().expect("invalid private key")))) + .collect::>(); + claim_erc20_balances(contract_address, eth_client.clone(), senders.clone()) + .await + .expect("Failed to claim ERC20 balances"); + let mut nonce = 1; + loop { + for sender in senders.clone() { + let signed_tx = + generate_erc20_transaction(nonce, chain_id, &sender, ð_client, contract_address) + .await; + send_generic_transaction(ð_client, signed_tx.into(), &sender) + .await + .expect("Failed to send transaction"); + println!( + "Sent transaction with nonce {} for address {}", + nonce, + sender.address() + ); + sleep(std::time::Duration::from_millis(10)).await; + } + nonce += 1; + } +} + +// Given an account vector and the erc20 contract address, claim balance for all accounts. +async fn claim_erc20_balances( + contract_address: Address, + client: EthClient, + accounts: Vec, +) -> eyre::Result<()> { + for account in accounts { + let claim_balance_calldata = encode_calldata("freeMint()", &[]).unwrap(); + + let claim_tx = build_generic_tx( + &client, + TxType::EIP1559, + contract_address, + account.address(), + claim_balance_calldata.into(), + Default::default(), + ) + .await + .unwrap(); + let tx_hash = send_generic_transaction(&client, claim_tx, &account) + .await + .unwrap(); + wait_for_transaction_receipt(tx_hash, &client, 1000) + .await + .unwrap(); + } + + Ok(()) +} + +async fn deploy_contract( + client: EthClient, + deployer: &Signer, + contract: Vec, +) -> eyre::Result
{ + let (_, contract_address) = + create_deploy(&client, deployer, contract.into(), Overrides::default()).await?; + + eyre::Ok(contract_address) +} + +async fn erc20_deploy(client: EthClient, deployer: &Signer) -> eyre::Result
{ + let erc20_bytecode = hex::decode(ERC20).expect("Failed to decode ERC20 bytecode"); + deploy_contract(client, deployer, erc20_bytecode).await +} + +async fn generate_erc20_transaction( + nonce: u64, + chain_id: u64, + signer: &Signer, + client: &EthClient, + contract_address: Address, +) -> GenericTransaction { + let send_calldata = encode_calldata( + "transfer(address,uint256)", + &[ + ethrex_l2_common::calldata::Value::Address(Address::random()), + ethrex_l2_common::calldata::Value::Uint(U256::one()), + ], + ) + .unwrap(); + + let tx = build_generic_tx( + client, + TxType::EIP1559, + contract_address, + signer.address(), + send_calldata.into(), + Overrides { + chain_id: Some(chain_id), + value: None, + nonce: Some(nonce), + max_fee_per_gas: Some(i64::MAX as u64), + max_priority_fee_per_gas: Some(10_u64), + gas_limit: Some(TX_GAS_COST * 100), + ..Default::default() + }, + ) + .await + .unwrap(); + + tx +} +``` + +#### `Cargo.toml` + +```toml +[package] +name = "tx_spammer" +version = "0.1.0" +edition = "2024" + +[dependencies] +ethrex-sdk = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-common = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-l2-rpc = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-rpc = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +tokio = { version = "1", features = ["full"] } +ethrex-l2-common = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +ethrex-blockchain = { git = "https://github.com/lambdaclass/ethrex.git", tag = "v6.0.0" } +url = "2" +hex = "0.4" +eyre = "0.6" +``` + +#### `TestToken.bin` + +``` +608060405234801561000f575f5ffd5b506040518060400160405280600881526020017f46756e546f6b656e0000000000000000000000000000000000000000000000008152506040518060400160405280600381526020017f46554e0000000000000000000000000000000000000000000000000000000000815250816003908161008b9190610598565b50806004908161009b9190610598565b5050506100b83369d3c21bcecceda10000006100bd60201b60201c565b61077c565b5f73ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff160361012d575f6040517fec442f0500000000000000000000000000000000000000000000000000000000815260040161012491906106a6565b60405180910390fd5b61013e5f838361014260201b60201c565b5050565b5f73ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603610192578060025f82825461018691906106ec565b92505081905550610260565b5f5f5f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205490508181101561021b578381836040517fe450d38c0000000000000000000000000000000000000000000000000000000081526004016102129392919061072e565b60405180910390fd5b8181035f5f8673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2081905550505b5f73ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16036102a7578060025f82825403925050819055506102f1565b805f5f8473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f82825401925050819055505b8173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161034e9190610763565b60405180910390a3505050565b5f81519050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b5f60028204905060018216806103d657607f821691505b6020821081036103e9576103e8610392565b5b50919050565b5f819050815f5260205f209050919050565b5f6020601f8301049050919050565b5f82821b905092915050565b5f6008830261044b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82610410565b6104558683610410565b95508019841693508086168417925050509392505050565b5f819050919050565b5f819050919050565b5f61049961049461048f8461046d565b610476565b61046d565b9050919050565b5f819050919050565b6104b28361047f565b6104c66104be826104a0565b84845461041c565b825550505050565b5f5f905090565b6104dd6104ce565b6104e88184846104a9565b505050565b5b8181101561050b576105005f826104d5565b6001810190506104ee565b5050565b601f82111561055057610521816103ef565b61052a84610401565b81016020851015610539578190505b61054d61054585610401565b8301826104ed565b50505b505050565b5f82821c905092915050565b5f6105705f1984600802610555565b1980831691505092915050565b5f6105888383610561565b9150826002028217905092915050565b6105a18261035b565b67ffffffffffffffff8111156105ba576105b9610365565b5b6105c482546103bf565b6105cf82828561050f565b5f60209050601f831160018114610600575f84156105ee578287015190505b6105f8858261057d565b86555061065f565b601f19841661060e866103ef565b5f5b8281101561063557848901518255600182019150602085019450602081019050610610565b86831015610652578489015161064e601f891682610561565b8355505b6001600288020188555050505b505050505050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f61069082610667565b9050919050565b6106a081610686565b82525050565b5f6020820190506106b95f830184610697565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f6106f68261046d565b91506107018361046d565b9250828201905080821115610719576107186106bf565b5b92915050565b6107288161046d565b82525050565b5f6060820190506107415f830186610697565b61074e602083018561071f565b61075b604083018461071f565b949350505050565b5f6020820190506107765f83018461071f565b92915050565b610e8c806107895f395ff3fe608060405234801561000f575f5ffd5b506004361061009c575f3560e01c80635b70ea9f116100645780635b70ea9f1461015a57806370a082311461016457806395d89b4114610194578063a9059cbb146101b2578063dd62ed3e146101e25761009c565b806306fdde03146100a0578063095ea7b3146100be57806318160ddd146100ee57806323b872dd1461010c578063313ce5671461013c575b5f5ffd5b6100a8610212565b6040516100b59190610b05565b60405180910390f35b6100d860048036038101906100d39190610bb6565b6102a2565b6040516100e59190610c0e565b60405180910390f35b6100f66102c4565b6040516101039190610c36565b60405180910390f35b61012660048036038101906101219190610c4f565b6102cd565b6040516101339190610c0e565b60405180910390f35b6101446102fb565b6040516101519190610cba565b60405180910390f35b610162610303565b005b61017e60048036038101906101799190610cd3565b610319565b60405161018b9190610c36565b60405180910390f35b61019c61035e565b6040516101a99190610b05565b60405180910390f35b6101cc60048036038101906101c79190610bb6565b6103ee565b6040516101d99190610c0e565b60405180910390f35b6101fc60048036038101906101f79190610cfe565b610410565b6040516102099190610c36565b60405180910390f35b60606003805461022190610d69565b80601f016020809104026020016040519081016040528092919081815260200182805461024d90610d69565b80156102985780601f1061026f57610100808354040283529160200191610298565b820191905f5260205f20905b81548152906001019060200180831161027b57829003601f168201915b5050505050905090565b5f5f6102ac610492565b90506102b9818585610499565b600191505092915050565b5f600254905090565b5f5f6102d7610492565b90506102e48582856104ab565b6102ef85858561053e565b60019150509392505050565b5f6012905090565b6103173369d3c21bcecceda100000061062e565b565b5f5f5f8373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f20549050919050565b60606004805461036d90610d69565b80601f016020809104026020016040519081016040528092919081815260200182805461039990610d69565b80156103e45780601f106103bb576101008083540402835291602001916103e4565b820191905f5260205f20905b8154815290600101906020018083116103c757829003601f168201915b5050505050905090565b5f5f6103f8610492565b905061040581858561053e565b600191505092915050565b5f60015f8473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f8373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2054905092915050565b5f33905090565b6104a683838360016106ad565b505050565b5f6104b68484610410565b90507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8110156105385781811015610529578281836040517ffb8f41b200000000000000000000000000000000000000000000000000000000815260040161052093929190610da8565b60405180910390fd5b61053784848484035f6106ad565b5b50505050565b5f73ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff16036105ae575f6040517f96c6fd1e0000000000000000000000000000000000000000000000000000000081526004016105a59190610ddd565b60405180910390fd5b5f73ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff160361061e575f6040517fec442f050000000000000000000000000000000000000000000000000000000081526004016106159190610ddd565b60405180910390fd5b61062983838361087c565b505050565b5f73ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff160361069e575f6040517fec442f050000000000000000000000000000000000000000000000000000000081526004016106959190610ddd565b60405180910390fd5b6106a95f838361087c565b5050565b5f73ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff160361071d575f6040517fe602df050000000000000000000000000000000000000000000000000000000081526004016107149190610ddd565b60405180910390fd5b5f73ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff160361078d575f6040517f94280d620000000000000000000000000000000000000000000000000000000081526004016107849190610ddd565b60405180910390fd5b8160015f8673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f20819055508015610876578273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b9258460405161086d9190610c36565b60405180910390a35b50505050565b5f73ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff16036108cc578060025f8282546108c09190610e23565b9250508190555061099a565b5f5f5f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2054905081811015610955578381836040517fe450d38c00000000000000000000000000000000000000000000000000000000815260040161094c93929190610da8565b60405180910390fd5b8181035f5f8673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2081905550505b5f73ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16036109e1578060025f8282540392505081905550610a2b565b805f5f8473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f82825401925050819055505b8173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef83604051610a889190610c36565b60405180910390a3505050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f610ad782610a95565b610ae18185610a9f565b9350610af1818560208601610aaf565b610afa81610abd565b840191505092915050565b5f6020820190508181035f830152610b1d8184610acd565b905092915050565b5f5ffd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610b5282610b29565b9050919050565b610b6281610b48565b8114610b6c575f5ffd5b50565b5f81359050610b7d81610b59565b92915050565b5f819050919050565b610b9581610b83565b8114610b9f575f5ffd5b50565b5f81359050610bb081610b8c565b92915050565b5f5f60408385031215610bcc57610bcb610b25565b5b5f610bd985828601610b6f565b9250506020610bea85828601610ba2565b9150509250929050565b5f8115159050919050565b610c0881610bf4565b82525050565b5f602082019050610c215f830184610bff565b92915050565b610c3081610b83565b82525050565b5f602082019050610c495f830184610c27565b92915050565b5f5f5f60608486031215610c6657610c65610b25565b5b5f610c7386828701610b6f565b9350506020610c8486828701610b6f565b9250506040610c9586828701610ba2565b9150509250925092565b5f60ff82169050919050565b610cb481610c9f565b82525050565b5f602082019050610ccd5f830184610cab565b92915050565b5f60208284031215610ce857610ce7610b25565b5b5f610cf584828501610b6f565b91505092915050565b5f5f60408385031215610d1457610d13610b25565b5b5f610d2185828601610b6f565b9250506020610d3285828601610b6f565b9150509250929050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b5f6002820490506001821680610d8057607f821691505b602082108103610d9357610d92610d3c565b5b50919050565b610da281610b48565b82525050565b5f606082019050610dbb5f830186610d99565b610dc86020830185610c27565b610dd56040830184610c27565b949350505050565b5f602082019050610df05f830184610d99565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f610e2d82610b83565b9150610e3883610b83565b9250828201905080821115610e5057610e4f610df6565b5b9291505056fea2646970667358221220c2ace90351a6254148d1d6fc391d67d42f65e41f9290478674caf67a0ec34ec964736f6c634300081b0033 +``` diff --git a/docs/l2/fundamentals/state_diffs.md b/docs/l2/fundamentals/state_diffs.md index 7c894d8ede3..c554b6832e8 100644 --- a/docs/l2/fundamentals/state_diffs.md +++ b/docs/l2/fundamentals/state_diffs.md @@ -1,5 +1,9 @@ # State diffs +> [!WARNING] +> Data availability through `state diffs` has been deprecated in #5135. +> See the `Transition to RLP encoded blocks` section [here](../architecture/overview.md) for more details. + This architecture was inspired by [MatterLabs' ZKsync pubdata architecture](https://github.com/matter-labs/zksync-era/blob/main/docs/src/specs/contracts/settlement_contracts/data_availability/pubdata.md). To provide data availability for our blockchain, we need to publish enough information on every commit transaction to be able to reconstruct the entire state of the L2 from the beginning by querying the L1. @@ -74,4 +78,4 @@ The sequencer will then make a commitment to this encoded state diff (explained - Through the blob, the encoded state diff. > [!NOTE] -> As the blob is encoded as 4096 BLS12-381 field elements, every 32-bytes chunk cannot be greater than the subgroup `r` size: `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001`. _i.e._, the most significant byte must be less than `0x73`. To avoid conflicts, we insert a `0x00` byte before every 31-bytes chunk to ensure this condition is met. +> As the blob is encoded as 4096 BLS12-381 field elements, every 32-bytes chunk cannot be greater than the subgroup `r` size: `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001`. _i.e._, the most significant byte must be less than `0x73`. To avoid conflicts, we insert a `0x00` byte before every 31-bytes chunk to ensure this condition is met. \ No newline at end of file diff --git a/docs/l2/fundamentals/transaction_fees.md b/docs/l2/fundamentals/transaction_fees.md index 29ad1f69ba8..e18a5ccca5c 100644 --- a/docs/l2/fundamentals/transaction_fees.md +++ b/docs/l2/fundamentals/transaction_fees.md @@ -83,13 +83,13 @@ This behavior ensures that transaction senders **never pay more than `max_fee_pe ## L1 Fees L1 fees represent the cost of posting data from the L2 to the L1. -Each transaction is charged based on the amount of **L1 Blob space** it occupies (the size of the transaction’s **stateDiff**). +Each transaction is charged based on the amount of **L1 Blob space** it occupies (the size of the transaction when RLP-encoded). -After each transaction is executed, the sequencer calculates the `stateDiff` generated by that transaction. -The L1 fee for that transaction is computed as: +Each time a transaction is executed, the sequencer calculates its RLP-encoded size. +Then, the L1 fee for that transaction is computed as: ``` -l1_fee = blob_base_fee_per_byte * tx_state_diff_size +l1_fee = blob_base_fee_per_byte * tx_encoded_size ``` An additional amount of gas (`l1_gas`) is added to the transaction execution so that: @@ -111,7 +111,7 @@ The `L1Watcher` periodically fetches the `BlobBaseFee` from L1 (at a configured blob_base_fee_per_byte = (l1_fee_per_blob_gas * GAS_PER_BLOB) / SAFE_BYTES_PER_BLOB ``` -See [State Diffs](./state_diffs.md) for more information about how `stateDiffs` works. +See the `Data availability` section [here](../architecture/overview.md) for more information about how data availability works. L1 fee is deactivated by default. To activate it, configure the **L1 fee vault address**: diff --git a/fixtures/blobs/1-1.blob b/fixtures/blobs/1-1.blob index 755a382677e..25f9fe9d071 100644 Binary files a/fixtures/blobs/1-1.blob and b/fixtures/blobs/1-1.blob differ diff --git a/fixtures/blobs/2-1.blob b/fixtures/blobs/2-1.blob index 84e1f00cf55..6c8831f7d4a 100644 Binary files a/fixtures/blobs/2-1.blob and b/fixtures/blobs/2-1.blob differ diff --git a/fixtures/blobs/3-1.blob b/fixtures/blobs/3-1.blob index 35d8f324b00..ef597bd60ae 100644 Binary files a/fixtures/blobs/3-1.blob and b/fixtures/blobs/3-1.blob differ diff --git a/fixtures/blobs/4-1.blob b/fixtures/blobs/4-1.blob index a1f7230c8c3..29d3c81d36c 100644 Binary files a/fixtures/blobs/4-1.blob and b/fixtures/blobs/4-1.blob differ diff --git a/fixtures/blobs/5-1.blob b/fixtures/blobs/5-1.blob index 99c37a5945d..e874a286551 100644 Binary files a/fixtures/blobs/5-1.blob and b/fixtures/blobs/5-1.blob differ diff --git a/fixtures/blobs/6-1.blob b/fixtures/blobs/6-1.blob new file mode 100644 index 00000000000..c2a0c19e352 Binary files /dev/null and b/fixtures/blobs/6-1.blob differ