Skip to content

Commit de9a0b8

Browse files
authored
fix(l1): revert "fix hive ReOrg tests by requesting headers from new to old" (#4784)
Reverts #4676 Related to #4577 This reverts commit f552509, which was causing problems. Commented tests are from #4673
1 parent e72a9fd commit de9a0b8

File tree

3 files changed

+28
-132
lines changed

3 files changed

+28
-132
lines changed

.github/workflows/pr-main_l1.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ jobs:
203203
ethrex_flags: ""
204204
- name: "Engine withdrawal tests"
205205
simulation: ethereum/engine
206-
test_pattern: "engine-withdrawals"
206+
test_pattern: "engine-withdrawals/Corrupted Block Hash Payload|Empty Withdrawals|engine-withdrawals test loader|GetPayloadBodies|GetPayloadV2 Block Value|Max Initcode Size|Sync after 2 blocks - Withdrawals on Genesis|Withdraw many accounts|Withdraw to a single account|Withdraw to two accounts|Withdraw zero amount|Withdraw many accounts|Withdrawals Fork on Block 1 - 1 Block Re-Org|Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload|Withdrawals Fork on Block 2|Withdrawals Fork on Block 3|Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload|Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org [^S]|Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org [^S]"
207207
- name: "Sync full"
208208
simulation: ethereum/sync
209209
test_pattern: ""

crates/networking/p2p/sync.rs

Lines changed: 9 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,6 @@ const BYTECODE_CHUNK_SIZE: usize = 50_000;
5757
/// that are unlikely to be re-orged.
5858
const MISSING_SLOTS_PERCENTAGE: f64 = 0.8;
5959

60-
/// Searching for pending blocks to include during syncing can potentially slow down block processing if we have a large chain of pending blocks.
61-
/// For example, if we are syncing from genesis, it might take a while until this chain connects, but the searching for pending blocks loop will
62-
/// happen each time we process a batch of blocks, and can potentially take long, that's why we just limit this check to 32 blocks.
63-
/// Eventually we will implement some in-memory structure to make this check easy.
64-
const PENDING_BLOCKS_RETRIEVAL_LIMIT: usize = 32;
65-
6660
#[cfg(feature = "sync-test")]
6761
lazy_static::lazy_static! {
6862
static ref EXECUTE_BATCH_SIZE: usize = std::env::var("EXECUTE_BATCH_SIZE").map(|var| var.parse().expect("Execute batch size environmental variable is not a number")).unwrap_or(EXECUTE_BATCH_SIZE_DEFAULT);
@@ -343,16 +337,6 @@ impl Syncer {
343337
current_head, sync_head
344338
);
345339

346-
// Try syncing backwards from the sync_head to find a common ancestor
347-
if self
348-
.synced_new_to_old(&mut block_sync_state, sync_head, store)
349-
.await?
350-
{
351-
return Ok(());
352-
}
353-
// synced_new_to_old returns true in case syncing was finished from NewToOld or if the requested headers were None
354-
// if sync_finished is false that means we are more than 1024 blocks behind so, for now, we go back to syncing as it follows.
355-
// TODO: Have full syncing always be from NewToOld, issue: https://github.com/lambdaclass/ethrex/issues/4717
356340
loop {
357341
debug!("Sync Log 1: In Full Sync");
358342
debug!(
@@ -364,7 +348,7 @@ impl Syncer {
364348
block_sync_state.current_blocks.len()
365349
);
366350

367-
debug!("Requesting Block Headers from OldToNew from current_head {current_head}");
351+
debug!("Requesting Block Headers from {current_head}");
368352

369353
let Some(mut block_headers) = self
370354
.peers
@@ -387,7 +371,6 @@ impl Syncer {
387371
Some(header) => (header.hash(), header.number),
388372
None => continue,
389373
};
390-
391374
// TODO(#2126): This is just a temporary solution to avoid a bug where the sync would get stuck
392375
// on a loop when the target head is not found, i.e. on a reorg with a side-chain.
393376
if first_block_hash == last_block_hash
@@ -443,88 +426,9 @@ impl Syncer {
443426
break;
444427
};
445428
}
446-
447429
Ok(())
448430
}
449431

450-
/// Tries to perform syncing going backwards from the sync_head with one batch of requested headers.
451-
/// This is to cover the case where we are on a sidechain and the peer doesn't have our current_head
452-
/// so when requesting headers from our current_head on we get None and we never get to finish syncing.
453-
/// For more context go to the PR https://github.com/lambdaclass/ethrex/pull/4676
454-
///
455-
/// # Returns
456-
///
457-
/// Returns an error if the sync fails at any given step and aborts all active processes
458-
/// otherwise returns true whether syncing was finished or in case the request of headers returned None,
459-
/// otherwise returns false, this means we couldn't find a common ancestor within the requested headers,
460-
/// which in turn means the chain is more than 1024 blocks behind.
461-
async fn synced_new_to_old(
462-
&mut self,
463-
block_sync_state: &mut FullBlockSyncState,
464-
sync_head: H256,
465-
store: Store,
466-
) -> Result<bool, SyncError> {
467-
debug!("Sync Log 1: In Full Sync");
468-
debug!(
469-
"Sync Log 3: State current headers len {}",
470-
block_sync_state.current_headers.len()
471-
);
472-
debug!(
473-
"Sync Log 4: State current blocks len {}",
474-
block_sync_state.current_blocks.len()
475-
);
476-
477-
debug!("Requesting Block Headers from NewToOld from sync_head {sync_head}");
478-
479-
// Get oldest pending block to use in the request for headers
480-
let mut requested_header = sync_head;
481-
while let Some(block) = store.get_pending_block(requested_header).await? {
482-
requested_header = block.header.parent_hash;
483-
}
484-
485-
let Some(mut block_headers) = self
486-
.peers
487-
.request_block_headers_from_hash(requested_header, BlockRequestOrder::NewToOld)
488-
.await?
489-
else {
490-
// sync_head or sync_head parent was not found
491-
warn!("Sync failed to find target block header, aborting");
492-
debug!("Sync Log 8: Sync failed to find target block header, aborting");
493-
return Ok(true);
494-
};
495-
496-
debug!("Sync Log 9: Received {} block headers", block_headers.len());
497-
498-
let mut found_common_ancestor = false;
499-
for i in 0..block_headers.len() {
500-
if store
501-
.get_block_by_hash(block_headers[i].hash())
502-
.await?
503-
.is_some()
504-
{
505-
block_headers.drain(i..);
506-
found_common_ancestor = true;
507-
break;
508-
}
509-
}
510-
511-
if found_common_ancestor {
512-
block_headers.reverse();
513-
block_sync_state
514-
.process_incoming_headers(
515-
block_headers,
516-
sync_head,
517-
true, // sync_head_found is true because of the NewToOld headers request
518-
self.blockchain.clone(),
519-
self.peers.clone(),
520-
self.cancel_token.clone(),
521-
)
522-
.await?;
523-
return Ok(true);
524-
}
525-
Ok(false)
526-
}
527-
528432
/// Executes the given blocks and stores them
529433
/// If sync_head_found is true, they will be executed one by one
530434
/// If sync_head_found is false, they will be executed in a single batch
@@ -718,32 +622,16 @@ impl FullBlockSyncState {
718622
self.current_blocks.extend(blocks);
719623
// }
720624

721-
// We check if we have pending blocks we didn't request that are needed for syncing
722-
// Then we add it in current_blocks for execution.
723-
let mut pending_block_to_sync = vec![];
724-
let mut last_header_to_sync = sync_head;
725-
let mut pending_blocks_retieved = 0;
726-
while let Some(block) = self.store.get_pending_block(last_header_to_sync).await? {
727-
let block_parent = block.header.parent_hash;
728-
if self
729-
.current_blocks
730-
.last()
731-
.is_some_and(|block| block.hash() == block_parent)
732-
{
733-
pending_block_to_sync.push(block);
734-
sync_head_found = true;
735-
pending_block_to_sync.reverse();
736-
self.current_blocks.extend(pending_block_to_sync);
737-
break;
738-
}
739-
pending_block_to_sync.push(block);
740-
last_header_to_sync = block_parent;
741-
if pending_blocks_retieved > PENDING_BLOCKS_RETRIEVAL_LIMIT {
742-
break;
625+
// If we have the sync_head as a pending block from a new_payload request and its parent_hash matches the hash of the latest received header
626+
// we set the sync_head as found. Then we add it in current_blocks for execution.
627+
if let Some(block) = self.store.get_pending_block(sync_head).await? {
628+
if let Some(last_block) = self.current_blocks.last() {
629+
if last_block.hash() == block.header.parent_hash {
630+
self.current_blocks.push(block);
631+
sync_head_found = true;
632+
}
743633
}
744-
pending_blocks_retieved += 1;
745634
}
746-
747635
// Execute full blocks
748636
// while self.current_blocks.len() >= *EXECUTE_BATCH_SIZE
749637
// || (!self.current_blocks.is_empty() && sync_head_found)

tooling/reorgs/src/main.rs

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,17 +33,19 @@ async fn main() {
3333
info!("");
3434

3535
run_test(&cmd_path, no_reorgs_full_sync_smoke_test).await;
36-
run_test(&cmd_path, test_reorg_back_to_base).await;
37-
// This test is flaky 50% of the time, check that it runs correctly 30 times in a row
38-
// TODO(#4775): make it deterministic
39-
for _ in 0..30 {
40-
run_test(&cmd_path, test_chain_split).await;
41-
}
42-
run_test(&cmd_path, test_one_block_reorg_and_back).await;
43-
run_test(&cmd_path, test_reorg_back_to_base_with_common_ancestor).await;
44-
run_test(&cmd_path, test_storage_slots_reorg).await;
4536

46-
run_test(&cmd_path, test_many_blocks_reorg).await;
37+
// TODO: uncomment once #4676 is fixed
38+
// run_test(&cmd_path, test_reorg_back_to_base).await;
39+
// // This test is flaky 50% of the time, check that it runs correctly 30 times in a row
40+
// // TODO(#4775): make it deterministic
41+
// for _ in 0..30 {
42+
// run_test(&cmd_path, test_chain_split).await;
43+
// }
44+
// run_test(&cmd_path, test_one_block_reorg_and_back).await;
45+
// run_test(&cmd_path, test_reorg_back_to_base_with_common_ancestor).await;
46+
// run_test(&cmd_path, test_storage_slots_reorg).await;
47+
48+
// run_test(&cmd_path, test_many_blocks_reorg).await;
4749
}
4850

4951
async fn get_ethrex_version(cmd_path: &Path) -> String {
@@ -102,6 +104,7 @@ async fn no_reorgs_full_sync_smoke_test(simulator: Arc<Mutex<Simulator>>) {
102104
node1.update_forkchoice(&base_chain).await;
103105
}
104106

107+
#[expect(unused)]
105108
async fn test_reorg_back_to_base(simulator: Arc<Mutex<Simulator>>) {
106109
let mut simulator = simulator.lock().await;
107110

@@ -121,6 +124,7 @@ async fn test_reorg_back_to_base(simulator: Arc<Mutex<Simulator>>) {
121124
node0.update_forkchoice(&base_chain).await;
122125
}
123126

127+
#[expect(unused)]
124128
async fn test_reorg_back_to_base_with_common_ancestor(simulator: Arc<Mutex<Simulator>>) {
125129
let mut simulator = simulator.lock().await;
126130

@@ -145,6 +149,7 @@ async fn test_reorg_back_to_base_with_common_ancestor(simulator: Arc<Mutex<Simul
145149
node0.update_forkchoice(&base_chain).await;
146150
}
147151

152+
#[expect(unused)]
148153
async fn test_chain_split(simulator: Arc<Mutex<Simulator>>) {
149154
let mut simulator = simulator.lock().await;
150155

@@ -167,6 +172,7 @@ async fn test_chain_split(simulator: Arc<Mutex<Simulator>>) {
167172
node0.update_forkchoice(&base_chain).await;
168173
}
169174

175+
#[expect(unused)]
170176
async fn test_one_block_reorg_and_back(simulator: Arc<Mutex<Simulator>>) {
171177
let mut simulator = simulator.lock().await;
172178
let signer: Signer = LocalSigner::new(
@@ -237,6 +243,7 @@ async fn test_one_block_reorg_and_back(simulator: Arc<Mutex<Simulator>>) {
237243
assert_eq!(new_balance, initial_balance);
238244
}
239245

246+
#[expect(unused)]
240247
async fn test_many_blocks_reorg(simulator: Arc<Mutex<Simulator>>) {
241248
let mut simulator = simulator.lock().await;
242249
let signer: Signer = LocalSigner::new(
@@ -308,6 +315,7 @@ async fn test_many_blocks_reorg(simulator: Arc<Mutex<Simulator>>) {
308315
assert_eq!(new_balance, initial_balance + transfer_amount);
309316
}
310317

318+
#[expect(unused)]
311319
async fn test_storage_slots_reorg(simulator: Arc<Mutex<Simulator>>) {
312320
let mut simulator = simulator.lock().await;
313321
// Initcode for deploying a contract that receives two `bytes32` parameters and sets `storage[param0] = param1`

0 commit comments

Comments
 (0)