From 2da17d65b5f1f53ce0c0614bde28842569a92f45 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 26 Feb 2026 18:05:57 +0200 Subject: [PATCH 01/20] less rounds per epoch --- cmd/node/config/config.toml | 14 +++++++------- cmd/node/config/enableRounds.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 8b7c7f43df0..840637a346e 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -43,9 +43,9 @@ # ChainParametersByEpoch defines chain operation configurable values that can be modified based on epochs ChainParametersByEpoch = [ - { EnableEpoch = 0, RoundDuration = 6000, RoundsPerEpoch = 200, MinRoundsBetweenEpochs = 20, ShardConsensusGroupSize = 7, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false, Offset = 0 }, - { EnableEpoch = 1, RoundDuration = 6000, RoundsPerEpoch = 200, MinRoundsBetweenEpochs = 20, ShardConsensusGroupSize = 10, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false, Offset = 0 }, - { EnableEpoch = 2, RoundDuration = 600, RoundsPerEpoch = 2000, MinRoundsBetweenEpochs = 50, ShardConsensusGroupSize = 10, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false, Offset = 2 } + { EnableEpoch = 0, RoundDuration = 6000, RoundsPerEpoch = 100, MinRoundsBetweenEpochs = 20, ShardConsensusGroupSize = 7, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false, Offset = 0 }, + { EnableEpoch = 1, RoundDuration = 6000, RoundsPerEpoch = 100, MinRoundsBetweenEpochs = 20, ShardConsensusGroupSize = 10, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false, Offset = 0 }, + { EnableEpoch = 2, RoundDuration = 600, RoundsPerEpoch = 1000, MinRoundsBetweenEpochs = 50, ShardConsensusGroupSize = 10, ShardMinNumNodes = 10, MetachainConsensusGroupSize = 10, MetachainMinNumNodes = 10, Hysteresis = 0.2, Adaptivity = false, Offset = 2 } ] # EpochChangeGracePeriodEnableEpoch represents the configuration of different grace periods for epoch change with their activation epochs @@ -81,7 +81,7 @@ NumHeadersToRequestInAdvance = 10 }, { - EnableRound = 440, + EnableRound = 260, MaxRoundsWithoutNewBlockReceived = 100, MaxRoundsWithoutCommittedBlock = 100, RoundModulusTriggerWhenSyncIsStuck = 200, @@ -102,7 +102,7 @@ EpochStartConfigsByRound = [ { EnableRound = 0, MaxRoundsWithoutCommittedStartInEpochBlock = 50 }, - { EnableRound = 440, MaxRoundsWithoutCommittedStartInEpochBlock = 500 }, + { EnableRound = 260, MaxRoundsWithoutCommittedStartInEpochBlock = 500 }, ] ConsensusConfigsByEpoch = [ @@ -121,7 +121,7 @@ { StartEpoch = 1, StartRound = 0, Version = "2" }, # TODO The value of StartEpoch parameter for version 3 should be the same with the SupernovaEnableEpoch flag from enableEpoch.toml file and StartRound the same with the SupernovaEnableRound flag from enableRounds.toml - { StartEpoch = 2, StartRound = 440, Version = "3" }, + { StartEpoch = 2, StartRound = 260, Version = "3" }, ] [StoragePruning] @@ -713,7 +713,7 @@ MaxDeviationTimeInMilliseconds = 25 [[Antiflood.ConfigsByRound]] - Round = 440 + Round = 260 NumConcurrentResolverJobs = 50 # TODO: analyse for meta on bootstrap there was a constant used with value 10 NumConcurrentResolvingTrieNodesJobs = 3 diff --git a/cmd/node/config/enableRounds.toml b/cmd/node/config/enableRounds.toml index d8676ffe3d8..33d440b1f9d 100644 --- a/cmd/node/config/enableRounds.toml +++ b/cmd/node/config/enableRounds.toml @@ -14,4 +14,4 @@ [RoundActivations.SupernovaEnableRound] Options = [] - Round = "440" + Round = "260" From 95ba5fe99bfa5fc3735ae107d9178dcee008224e Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 24 Mar 2026 15:23:27 +0200 Subject: [PATCH 02/20] trigger trie prunning from async execution flow --- integrationTests/mock/blockProcessorMock.go | 8 +++ process/asyncExecution/headersExecutor.go | 2 + process/asyncExecution/interface.go | 1 + process/block/baseProcess.go | 49 +++++++++++++++++++ process/block/export_test.go | 4 +- process/block/interface.go | 1 + process/block/metablock.go | 15 +++--- process/block/shardblock.go | 14 ++++-- process/block/shardblock_test.go | 4 +- process/common.go | 29 +++++++++++ process/interface.go | 1 + testscommon/blockProcessorStub.go | 9 ++++ .../processMocks/blockProcessorStub.go | 8 +++ 13 files changed, 133 insertions(+), 12 deletions(-) diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 34b8a06e02d..80355b428f1 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -43,6 +43,7 @@ type BlockProcessorMock struct { ) error OnExecutedBlockCalled func(header data.HeaderHandler, rootHash []byte) error ProposedDirectSentTransactionsToBroadcastCalled func(proposedBody data.BodyHandler) map[string][][]byte + PruneTrieAsyncHeaderCalled func(header data.HeaderHandler) } // ProcessBlock mocks processing a block @@ -231,6 +232,13 @@ func (bpm *BlockProcessorMock) RemoveHeaderFromPool(headerHash []byte) { } } +// PruneTrieAsyncHeader - +func (bpm *BlockProcessorMock) PruneTrieAsyncHeader(header data.HeaderHandler) { + if bpm.PruneTrieAsyncHeaderCalled != nil { + bpm.PruneTrieAsyncHeaderCalled(header) + } +} + // VerifyBlockProposal - func (bpm *BlockProcessorMock) VerifyBlockProposal( headerHandler data.HeaderHandler, diff --git a/process/asyncExecution/headersExecutor.go b/process/asyncExecution/headersExecutor.go index 7318915f49d..bc596828b20 100644 --- a/process/asyncExecution/headersExecutor.go +++ b/process/asyncExecution/headersExecutor.go @@ -364,6 +364,8 @@ func (he *headersExecutor) process(pair cache.HeaderBodyPair) error { return nil } + he.blockProcessor.PruneTrieAsyncHeader(he.blockChain.GetCurrentBlockHeader()) + he.blockChain.SetFinalBlockInfo( executionResult.GetHeaderNonce(), executionResult.GetHeaderHash(), diff --git a/process/asyncExecution/interface.go b/process/asyncExecution/interface.go index 819cadb530c..17e16456861 100644 --- a/process/asyncExecution/interface.go +++ b/process/asyncExecution/interface.go @@ -25,5 +25,6 @@ type BlockProcessor interface { ProcessBlockProposal(header data.HeaderHandler, headerHash []byte, body data.BodyHandler) (data.BaseExecutionResultHandler, error) CommitBlockProposalState(headerHandler data.HeaderHandler) error RevertBlockProposalState() + PruneTrieAsyncHeader(header data.HeaderHandler) IsInterfaceNil() bool } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 81b0e1ad9d1..0ecf15a9644 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -152,6 +152,9 @@ type baseProcessor struct { aotSelector process.AOTTransactionSelector maxProposalNonceGap uint64 closingNodeStarted *atomic.Bool + + lastPrunedHeaderNonce uint64 + mutLastPrunedHeader sync.RWMutex } type bootStorerDataArgs struct { @@ -4032,3 +4035,49 @@ func (bp *baseProcessor) saveEpochStartEconomicsMetrics(epochStartMetaBlock data bp.appStatusHandler.SetStringValue(common.MetricTotalFees, epochStartMetaBlock.GetAccumulatedFeesInEpoch().String()) bp.appStatusHandler.SetStringValue(common.MetricDevRewardsInEpoch, epochStartMetaBlock.GetDevFeesInEpoch().String()) } + +// PruneTrieAsyncHeader will trigger trie pruning for header from async execution flow +func (bp *baseProcessor) PruneTrieAsyncHeader( + header data.HeaderHandler, +) { + bp.mutLastPrunedHeader.Lock() + defer bp.mutLastPrunedHeader.Unlock() + + if bp.lastPrunedHeaderNonce == 0 { + // last pruned header nonce not set, trigger prune trie for the provided header + bp.blockProcessor.pruneTrieHeaderV3(header) + bp.lastPrunedHeaderNonce = header.GetNonce() + return + } + + if header.GetNonce() <= bp.lastPrunedHeaderNonce { + return + } + + // prune trie for intermediate headers + for nonce := bp.lastPrunedHeaderNonce; nonce < header.GetNonce(); nonce++ { + // headers pool is cleaned on consensus flow based on last execution result + // included on the committed header (plus some delta), so intermediate header + // should be available in pool, since trie prunning is triggered from + // execution flow; if there are no included blocks from execution flow + // (and not prunning triggerd) headers will not be removed from pool + intermHeader, _, err := process.GetHeaderWithNonce( + nonce, + header.GetShardID(), + bp.dataPool.Headers(), + bp.marshalizer, + bp.store, + bp.uint64Converter, + ) + if err != nil { + log.Warn("failed to get intermediate header for prunning", "error", err) + continue + } + + bp.blockProcessor.pruneTrieHeaderV3(intermHeader) + } + + // prune trie for the provided header + bp.blockProcessor.pruneTrieHeaderV3(header) + bp.lastPrunedHeaderNonce = header.GetNonce() +} diff --git a/process/block/export_test.go b/process/block/export_test.go index df76ca3b359..10f4f01a459 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -135,8 +135,8 @@ func (sp *shardProcessor) UpdateStateStorage(finalHeaders []data.HeaderHandler, } // PruneTrieHeaderV3 - -func (sp *shardProcessor) PruneTrieHeaderV3(executionResultsHandlers []data.BaseExecutionResultHandler) { - sp.pruneTrieHeaderV3(executionResultsHandlers) +func (sp *shardProcessor) PruneTrieHeaderV3(header data.HeaderHandler) { + sp.pruneTrieHeaderV3(header) } // NewShardProcessorEmptyWith3shards - diff --git a/process/block/interface.go b/process/block/interface.go index 895ea893db1..ac7dc952416 100644 --- a/process/block/interface.go +++ b/process/block/interface.go @@ -12,6 +12,7 @@ import ( type blockProcessor interface { removeStartOfEpochBlockDataFromPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error + pruneTrieHeaderV3(header data.HeaderHandler) } type gasConsumedProvider interface { diff --git a/process/block/metablock.go b/process/block/metablock.go index 55b22f59eca..a5fe4091f47 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1634,7 +1634,7 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock mp.accountsDB[state.PeerAccountsState], ) } else { - mp.pruneTriesHeaderV3(metaBlock, prevMetaBlock) + mp.pruneTrieHeaderV3(metaBlock) } outportFinalizedHeaderHash := metaBlockHash @@ -1646,9 +1646,8 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock mp.blockChain.SetFinalBlockInfo(metaBlock.GetNonce(), metaBlockHash, rootHash) } -func (mp *metaProcessor) pruneTriesHeaderV3( - metaBlock data.MetaHeaderHandler, - prevMetaBlock data.MetaHeaderHandler, +func (mp *metaProcessor) pruneTrieHeaderV3( + metaBlock data.HeaderHandler, ) { accountsDb := mp.accountsDB[state.UserAccountsState] peerAccountsDb := mp.accountsDB[state.PeerAccountsState] @@ -1666,7 +1665,7 @@ func (mp *metaProcessor) pruneTriesHeaderV3( "currentExecResType", fmt.Sprintf("%T", execResults[i])) continue } - prevExecRes, err := mp.getPreviousExecutionResult(i, execResults, prevMetaBlock, prevMetaBlockHash) + prevExecRes, err := mp.getPreviousExecutionResult(i, execResults, prevMetaBlockHash) if err != nil { log.Warn("failed to get previous execution result for pruning", "err", err, @@ -1708,7 +1707,6 @@ func (mp *metaProcessor) pruneTriesHeaderV3( func (mp *metaProcessor) getPreviousExecutionResult( index int, executionResultsHandlers []data.BaseExecutionResultHandler, - prevMetaBlock data.MetaHeaderHandler, prevMetaBlockHash []byte, ) (data.BaseMetaExecutionResultHandler, error) { if index > 0 { @@ -1719,6 +1717,11 @@ func (mp *metaProcessor) getPreviousExecutionResult( return metaExecRes, nil } + prevMetaBlock, err := process.GetMetaHeader(prevMetaBlockHash, mp.dataPool.Headers(), mp.marshalizer, mp.store) + if err != nil { + return nil, err + } + if prevMetaBlock.IsHeaderV3() { lastExecRes := prevMetaBlock.GetLastExecutionResultHandler() if check.IfNil(lastExecRes) { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 95ab872561c..f905a0a453c 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1247,7 +1247,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade if !currentHeader.IsHeaderV3() { sp.pruneTrieLegacy(headers) } else { - sp.pruneTrieHeaderV3(currentHeader.GetExecutionResultsHandlers()) + sp.pruneTrieHeaderV3(currentHeader) if currentHeader.IsStartOfEpochBlock() { sp.nodesCoordinator.ShuffleOutForEpoch(currentHeader.GetEpoch()) @@ -1265,7 +1265,11 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade sp.setFinalBlockInfo(currentHeader, currentHeaderHash, scheduledHeaderRootHash) } -func (sp *shardProcessor) pruneTrieHeaderV3(executionResultsHandlers []data.BaseExecutionResultHandler) { +func (sp *shardProcessor) pruneTrieHeaderV3( + header data.HeaderHandler, +) { + executionResultsHandlers := header.GetExecutionResultsHandlers() + accountsDb := sp.accountsDB[state.UserAccountsState] if !accountsDb.IsPruningEnabled() { return @@ -1303,11 +1307,15 @@ func (sp *shardProcessor) pruneTrieHeaderV3(executionResultsHandlers []data.Base } } -func (sp *shardProcessor) getPreviousExecutionResult(index int, executionResultsHandlers []data.BaseExecutionResultHandler) (data.BaseExecutionResultHandler, error) { +func (sp *shardProcessor) getPreviousExecutionResult( + index int, + executionResultsHandlers []data.BaseExecutionResultHandler, +) (data.BaseExecutionResultHandler, error) { if index > 0 { return executionResultsHandlers[index-1], nil } + // TODO: analyse based on current header to be committed, not last committed header prevHeaderHash := sp.getCurrentBlockHeader().GetPrevHash() prevHeader, err := process.GetShardHeader(prevHeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index dca5c377789..11425cae07b 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -7143,8 +7143,10 @@ func pruneTrieHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootHash }, }, } + header1 := &block.HeaderV3{} + header1.SetExecutionResultsHandlers(executionResultsHandlers) - sp.PruneTrieHeaderV3(executionResultsHandlers) + sp.PruneTrieHeaderV3(header1) assert.Equal(t, 2, pruneCalled) assert.Equal(t, 2, cancelPruneCalled) diff --git a/process/common.go b/process/common.go index dddbde0e2ec..294ac56dc8b 100644 --- a/process/common.go +++ b/process/common.go @@ -775,6 +775,35 @@ func GetSortedStorageUpdates(account *vmcommon.OutputAccount) []*vmcommon.Storag return storageUpdates } +// GetHeaderWithNonce tries to get the header by nonce from pool first and if not found, searches for it through storer +func GetHeaderWithNonce( + nonce uint64, + shardId uint32, + headersCacher dataRetriever.HeadersPool, + marshalizer marshal.Marshalizer, + storageService dataRetriever.StorageService, + uint64Converter typeConverters.Uint64ByteSliceConverter, +) (data.HeaderHandler, []byte, error) { + if shardId == core.MetachainShardId { + return GetMetaHeaderWithNonce( + nonce, + headersCacher, + marshalizer, + storageService, + uint64Converter, + ) + } + + return GetShardHeaderWithNonce( + nonce, + shardId, + headersCacher, + marshalizer, + storageService, + uint64Converter, + ) +} + // GetHeader tries to get the header from pool first and if not found, searches for it through storer func GetHeader( headerHash []byte, diff --git a/process/interface.go b/process/interface.go index 0b168bb6529..dce8c80e2ad 100644 --- a/process/interface.go +++ b/process/interface.go @@ -319,6 +319,7 @@ type BlockProcessor interface { ) error OnExecutedBlock(header data.HeaderHandler, rootHash []byte) error ProposedDirectSentTransactionsToBroadcast(proposedBody data.BodyHandler) map[string][][]byte + PruneTrieAsyncHeader(header data.HeaderHandler) Close() error IsInterfaceNil() bool } diff --git a/testscommon/blockProcessorStub.go b/testscommon/blockProcessorStub.go index 6ae430fbea9..aedd02f7118 100644 --- a/testscommon/blockProcessorStub.go +++ b/testscommon/blockProcessorStub.go @@ -48,6 +48,7 @@ type BlockProcessorStub struct { OnExecutedBlockCalled func(header data.HeaderHandler, rootHash []byte) error RemoveHeaderFromPoolCalled func(headerHash []byte) ProposedDirectSentTransactionsToBroadcastCalled func(proposedBody data.BodyHandler) map[string][][]byte + PruneTrieAsyncHeaderCalled func(header data.HeaderHandler) } // SetNumProcessedObj - @@ -293,6 +294,14 @@ func (bps *BlockProcessorStub) ProposedDirectSentTransactionsToBroadcast(propose return nil } +// PruneTrieAsyncHeader - +func (bps *BlockProcessorStub) PruneTrieAsyncHeader(header data.HeaderHandler) { + if bps.PruneTrieAsyncHeaderCalled != nil { + bps.PruneTrieAsyncHeaderCalled(header) + } + +} + // IsInterfaceNil returns true if there is no value under the interface func (bps *BlockProcessorStub) IsInterfaceNil() bool { return bps == nil diff --git a/testscommon/processMocks/blockProcessorStub.go b/testscommon/processMocks/blockProcessorStub.go index 63810b5a430..aad70a3a8bb 100644 --- a/testscommon/processMocks/blockProcessorStub.go +++ b/testscommon/processMocks/blockProcessorStub.go @@ -7,6 +7,7 @@ type BlockProcessorStub struct { ProcessBlockProposalCalled func(handler data.HeaderHandler, headerHash []byte, body data.BodyHandler) (data.BaseExecutionResultHandler, error) CommitBlockProposalStateCalled func(headerHandler data.HeaderHandler) error RevertBlockProposalStateCalled func() + PruneTrieAsyncHeaderCalled func(header data.HeaderHandler) } // ProcessBlockProposal - @@ -34,6 +35,13 @@ func (bp *BlockProcessorStub) RevertBlockProposalState() { } } +// PruneTrieAsyncHeader - +func (bp *BlockProcessorStub) PruneTrieAsyncHeader(header data.HeaderHandler) { + if bp.PruneTrieAsyncHeaderCalled != nil { + bp.PruneTrieAsyncHeaderCalled(header) + } +} + // IsInterfaceNil - func (bp *BlockProcessorStub) IsInterfaceNil() bool { return bp == nil From 1be030252348a1704da5b662e60f4e5645cfb621 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 24 Mar 2026 15:28:05 +0200 Subject: [PATCH 03/20] fix intermediate nonce index --- process/block/baseProcess.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 0ecf15a9644..f68d94a38e1 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -4054,8 +4054,8 @@ func (bp *baseProcessor) PruneTrieAsyncHeader( return } - // prune trie for intermediate headers - for nonce := bp.lastPrunedHeaderNonce; nonce < header.GetNonce(); nonce++ { + // prune trie for intermediate headers if needed + for nonce := bp.lastPrunedHeaderNonce + 1; nonce < header.GetNonce(); nonce++ { // headers pool is cleaned on consensus flow based on last execution result // included on the committed header (plus some delta), so intermediate header // should be available in pool, since trie prunning is triggered from From 322b33c7d142c49bc491302e5a4e9d9a444031c6 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 24 Mar 2026 16:14:47 +0200 Subject: [PATCH 04/20] add unit tests --- process/block/baseProcess.go | 2 +- process/block/baseProcess_test.go | 200 ++++++++++++++++++++++++++++++ process/block/export_test.go | 16 +++ process/block/shardblock.go | 2 +- 4 files changed, 218 insertions(+), 2 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index f68d94a38e1..65606de51be 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -4054,7 +4054,7 @@ func (bp *baseProcessor) PruneTrieAsyncHeader( return } - // prune trie for intermediate headers if needed + // prune trie for intermediate headers for nonce := bp.lastPrunedHeaderNonce + 1; nonce < header.GetNonce(); nonce++ { // headers pool is cleaned on consensus flow based on last execution result // included on the committed header (plus some delta), so intermediate header diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index bba9a14e690..740ba4e8877 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -5904,3 +5904,203 @@ func TestBaseProcessor_WaitForExecutionResultsVerification(t *testing.T) { require.Equal(t, int32(1), callCount.Load()) }) } + +func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { + t.Parallel() + + t.Run("last pruned header not set, should trigger provided header", func(t *testing.T) { + t.Parallel() + + cancelPruneCalled := false + pruneTrieCalled := false + + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { + return true + }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalled = true + }, + PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { + pruneTrieCalled = true + }, + } + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + require.Equal(t, uint64(0), bp.GetLastPrunedNonce()) + + rootHash1 := []byte("rootHash1") + + executionResultsHandlers := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: rootHash1, + }, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: []byte("some other root hash"), + }, + }, + } + header1 := &block.HeaderV3{ + Nonce: 10, + } + header1.SetExecutionResultsHandlers(executionResultsHandlers) + bp.PruneTrieAsyncHeader(header1) + + require.True(t, cancelPruneCalled) + require.True(t, pruneTrieCalled) + + require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) + }) + + t.Run("header nonce lower than last pruned header, should not trigger", func(t *testing.T) { + t.Parallel() + + cancelPruneCalled := false + pruneTrieCalled := false + + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { + return true + }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalled = true + }, + PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { + pruneTrieCalled = true + }, + } + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + bp.SetLastPrunedNonce(10) + + header2 := &block.HeaderV3{ + Nonce: 9, + } + bp.PruneTrieAsyncHeader(header2) + require.False(t, cancelPruneCalled) + require.False(t, pruneTrieCalled) + + require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) + }) + + t.Run("should trigger multiple times for intermediate headers", func(t *testing.T) { + t.Parallel() + + cancelPruneCalled := 0 + pruneTrieCalled := 0 + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { + return true + }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalled++ + }, + PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { + pruneTrieCalled++ + }, + } + + rootHash1 := []byte("rootHash1") + executionResultsHandlers := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: rootHash1, + }, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: []byte("some other root hash"), + }, + }, + } + header1 := &block.HeaderV3{ + Nonce: 8, + LastExecutionResult: &block.ExecutionResultInfo{}, + } + header1.SetExecutionResultsHandlers(executionResultsHandlers) + + rootHash2 := []byte("rootHash2") + executionResultsHandlers = []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: rootHash2, + }, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: []byte("some other root hash6"), + }, + }, + } + header2 := &block.HeaderV3{ + Nonce: 9, + } + header2.SetExecutionResultsHandlers(executionResultsHandlers) + + headerCalls := 0 + headerHashCalls := 0 + headersPool := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if headerCalls == 0 { + headerCalls++ + return []data.HeaderHandler{header2}, [][]byte{[]byte("hash1")}, nil + } + + return []data.HeaderHandler{}, [][]byte{}, nil + }, + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + if headerHashCalls == 0 { + headerHashCalls++ + return header1, nil + } + return header2, nil + }, + } + dataPool := initDataPool() + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { + return headersPool + } + dataComponents.DataPool = dataPool + + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + bp.SetLastPrunedNonce(8) + + rootHash3 := []byte("rootHash3") + + executionResultsHandlers = []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: rootHash3, + }, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: []byte("some other root hash2"), + }, + }, + } + header3 := &block.HeaderV3{ + Nonce: 10, + } + header1.SetExecutionResultsHandlers(executionResultsHandlers) + bp.PruneTrieAsyncHeader(header3) + + require.Equal(t, 2, cancelPruneCalled) + require.Equal(t, 2, pruneTrieCalled) + + require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) + }) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index 10f4f01a459..37ca53c5acb 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -1188,3 +1188,19 @@ func (bp *baseProcessor) WaitForExecutionResultsVerification( ) error { return bp.waitForExecutionResultsVerification(header, haveTime) } + +// SetLastPrunedNonce - +func (bp *baseProcessor) SetLastPrunedNonce(nonce uint64) { + bp.mutLastPrunedHeader.Lock() + bp.lastPrunedHeaderNonce = nonce + bp.mutLastPrunedHeader.Unlock() +} + +// GetLastPrunedNonce - +func (bp *baseProcessor) GetLastPrunedNonce() uint64 { + bp.mutLastPrunedHeader.RLock() + lastPrunedNonce := bp.lastPrunedHeaderNonce + bp.mutLastPrunedHeader.RUnlock() + + return lastPrunedNonce +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index f905a0a453c..724e99020be 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1247,7 +1247,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade if !currentHeader.IsHeaderV3() { sp.pruneTrieLegacy(headers) } else { - sp.pruneTrieHeaderV3(currentHeader) + // for header v3, trie prunning is triggered in async mode from headers executor if currentHeader.IsStartOfEpochBlock() { sp.nodesCoordinator.ShuffleOutForEpoch(currentHeader.GetEpoch()) From 247357fb85fb8854f7969e67c66120239effdc31 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 24 Mar 2026 16:22:17 +0200 Subject: [PATCH 05/20] tests fix --- process/block/baseProcess_test.go | 8 ++++---- testscommon/blockProcessorStub.go | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 740ba4e8877..6f969a155f2 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -5948,7 +5948,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { header1 := &block.HeaderV3{ Nonce: 10, } - header1.SetExecutionResultsHandlers(executionResultsHandlers) + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) bp.PruneTrieAsyncHeader(header1) require.True(t, cancelPruneCalled) @@ -6028,7 +6028,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { Nonce: 8, LastExecutionResult: &block.ExecutionResultInfo{}, } - header1.SetExecutionResultsHandlers(executionResultsHandlers) + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) rootHash2 := []byte("rootHash2") executionResultsHandlers = []data.BaseExecutionResultHandler{ @@ -6046,7 +6046,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { header2 := &block.HeaderV3{ Nonce: 9, } - header2.SetExecutionResultsHandlers(executionResultsHandlers) + _ = header2.SetExecutionResultsHandlers(executionResultsHandlers) headerCalls := 0 headerHashCalls := 0 @@ -6095,7 +6095,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { header3 := &block.HeaderV3{ Nonce: 10, } - header1.SetExecutionResultsHandlers(executionResultsHandlers) + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) bp.PruneTrieAsyncHeader(header3) require.Equal(t, 2, cancelPruneCalled) diff --git a/testscommon/blockProcessorStub.go b/testscommon/blockProcessorStub.go index aedd02f7118..d8ca065de28 100644 --- a/testscommon/blockProcessorStub.go +++ b/testscommon/blockProcessorStub.go @@ -299,7 +299,6 @@ func (bps *BlockProcessorStub) PruneTrieAsyncHeader(header data.HeaderHandler) { if bps.PruneTrieAsyncHeaderCalled != nil { bps.PruneTrieAsyncHeaderCalled(header) } - } // IsInterfaceNil returns true if there is no value under the interface From e77ce87c9f945e5a7eb7da45f4e8670f41c613af Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 24 Mar 2026 16:28:01 +0200 Subject: [PATCH 06/20] tests fix --- process/block/shardblock_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 11425cae07b..3b1d081460c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -7144,7 +7144,7 @@ func pruneTrieHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootHash }, } header1 := &block.HeaderV3{} - header1.SetExecutionResultsHandlers(executionResultsHandlers) + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) sp.PruneTrieHeaderV3(header1) From 1714b9332e0779798961acc25b4a097b1c8e5577 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 25 Mar 2026 17:38:07 +0200 Subject: [PATCH 07/20] remove prune trie for metablock v2 flow --- process/block/metablock.go | 4 +- process/block/metablock_test.go | 68 +++++++++++++++++---------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index a5fe4091f47..b5512de5db7 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1633,10 +1633,10 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock prevMetaBlock.GetValidatorStatsRootHash(), mp.accountsDB[state.PeerAccountsState], ) - } else { - mp.pruneTrieHeaderV3(metaBlock) } + // for header v3, trie prunning is triggered in async mode from headers executor + outportFinalizedHeaderHash := metaBlockHash if !common.IsFlagEnabledAfterEpochsStartBlock(metaBlock, mp.enableEpochsHandler, common.AndromedaFlag) { outportFinalizedHeaderHash = metaBlock.GetPrevHash() diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 5be250723d9..97df7385557 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -4061,6 +4061,41 @@ func TestMetaProcessor_CrossChecksBlockHeightsMetrics(t *testing.T) { requireInstance.Equal(uint64(39), savedMetrics["erd_cross_check_block_height_2"]) } +func TestMetaProcessor_PruneTrieAsyncHeader(t *testing.T) { + t.Parallel() + + t.Run("prune trie for headerV3, prev header is headerV2", func(t *testing.T) { + t.Parallel() + + rootHash1 := []byte("state root hash 1") + validatorStatsRootHash1 := []byte("validator stats root hash 1") + + prevHeader := &block.MetaBlock{ + RootHash: rootHash1, + ValidatorStatsRootHash: validatorStatsRootHash1, + } + pruneTrieForHeaderV3Test(t, prevHeader, rootHash1, validatorStatsRootHash1) + }) + + t.Run("prune trie for headerV3, prev header is headerV3", func(t *testing.T) { + t.Parallel() + + rootHash1 := []byte("state root hash 1") + validatorStatsRootHash1 := []byte("validator stats root hash 1") + prevHeader := &block.MetaBlockV3{ + LastExecutionResult: &block.MetaExecutionResultInfo{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + RootHash: rootHash1, + }, + ValidatorStatsRootHash: validatorStatsRootHash1, + }, + }, + } + pruneTrieForHeaderV3Test(t, prevHeader, rootHash1, validatorStatsRootHash1) + }) +} + func TestMetaProcessor_UpdateState(t *testing.T) { t.Parallel() @@ -4127,36 +4162,6 @@ func TestMetaProcessor_UpdateState(t *testing.T) { assert.True(t, cancelPruneCalledForUserAccounts) assert.True(t, cancelPruneCalledForPeerAccounts) }) - t.Run("prune trie for headerV3, prev header is headerV2", func(t *testing.T) { - t.Parallel() - - rootHash1 := []byte("state root hash 1") - validatorStatsRootHash1 := []byte("validator stats root hash 1") - - prevHeader := &block.MetaBlock{ - RootHash: rootHash1, - ValidatorStatsRootHash: validatorStatsRootHash1, - } - pruneTrieForHeaderV3Test(t, prevHeader, rootHash1, validatorStatsRootHash1) - - }) - t.Run("prune trie for headerV3, prev header is headerV3", func(t *testing.T) { - t.Parallel() - - rootHash1 := []byte("state root hash 1") - validatorStatsRootHash1 := []byte("validator stats root hash 1") - prevHeader := &block.MetaBlockV3{ - LastExecutionResult: &block.MetaExecutionResultInfo{ - ExecutionResult: &block.BaseMetaExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: rootHash1, - }, - ValidatorStatsRootHash: validatorStatsRootHash1, - }, - }, - } - pruneTrieForHeaderV3Test(t, prevHeader, rootHash1, validatorStatsRootHash1) - }) } func pruneTrieForHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootHash1 []byte, validatorStatsRootHash1 []byte) { @@ -4238,7 +4243,6 @@ func pruneTrieForHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootH mp, _ := processBlock.NewMetaProcessor(arguments) - metaBlockHash := []byte("meta block hash") metaBlock := &block.MetaBlockV3{ PrevHash: []byte("hash"), ExecutionResults: []*block.MetaExecutionResult{ @@ -4269,7 +4273,7 @@ func pruneTrieForHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootH }, } - mp.UpdateState(metaBlock, metaBlockHash) + mp.PruneTrieAsyncHeader(metaBlock) assert.Equal(t, 2, pruneCalledForUserAccounts) assert.Equal(t, 2, pruneCalledForPeerAccounts) From 6ac73756b16b424aac8935e4201c79e59e931d6a Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 26 Mar 2026 11:05:09 +0200 Subject: [PATCH 08/20] handle intermediate blocks based on hashes instead of nonces --- process/asyncExecution/headersExecutor.go | 2 +- process/asyncExecution/interface.go | 2 +- process/block/baseProcess.go | 59 +++++++++++------ process/block/baseProcess_test.go | 79 +++++++++++++++++------ process/block/export_test.go | 14 ++-- process/block/metablock_test.go | 15 ++++- process/block/shardblock.go | 5 +- process/interface.go | 2 +- 8 files changed, 120 insertions(+), 58 deletions(-) diff --git a/process/asyncExecution/headersExecutor.go b/process/asyncExecution/headersExecutor.go index bc596828b20..088c10907f8 100644 --- a/process/asyncExecution/headersExecutor.go +++ b/process/asyncExecution/headersExecutor.go @@ -364,7 +364,7 @@ func (he *headersExecutor) process(pair cache.HeaderBodyPair) error { return nil } - he.blockProcessor.PruneTrieAsyncHeader(he.blockChain.GetCurrentBlockHeader()) + he.blockProcessor.PruneTrieAsyncHeader() he.blockChain.SetFinalBlockInfo( executionResult.GetHeaderNonce(), diff --git a/process/asyncExecution/interface.go b/process/asyncExecution/interface.go index 17e16456861..23012a6d742 100644 --- a/process/asyncExecution/interface.go +++ b/process/asyncExecution/interface.go @@ -25,6 +25,6 @@ type BlockProcessor interface { ProcessBlockProposal(header data.HeaderHandler, headerHash []byte, body data.BodyHandler) (data.BaseExecutionResultHandler, error) CommitBlockProposalState(headerHandler data.HeaderHandler) error RevertBlockProposalState() - PruneTrieAsyncHeader(header data.HeaderHandler) + PruneTrieAsyncHeader() IsInterfaceNil() bool } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 65606de51be..0a151d9668e 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -153,8 +153,8 @@ type baseProcessor struct { maxProposalNonceGap uint64 closingNodeStarted *atomic.Bool - lastPrunedHeaderNonce uint64 - mutLastPrunedHeader sync.RWMutex + lastPrunedHeaderHash []byte + mutLastPrunedHeader sync.RWMutex } type bootStorerDataArgs struct { @@ -4037,47 +4037,64 @@ func (bp *baseProcessor) saveEpochStartEconomicsMetrics(epochStartMetaBlock data } // PruneTrieAsyncHeader will trigger trie pruning for header from async execution flow -func (bp *baseProcessor) PruneTrieAsyncHeader( - header data.HeaderHandler, -) { +func (bp *baseProcessor) PruneTrieAsyncHeader() { bp.mutLastPrunedHeader.Lock() defer bp.mutLastPrunedHeader.Unlock() - if bp.lastPrunedHeaderNonce == 0 { - // last pruned header nonce not set, trigger prune trie for the provided header + header := bp.blockChain.GetCurrentBlockHeader() + headerHash := bp.blockChain.GetCurrentBlockHeaderHash() + + if len(bp.lastPrunedHeaderHash) == 0 { + // last pruned header hash not set, trigger prune trie for the provided header bp.blockProcessor.pruneTrieHeaderV3(header) - bp.lastPrunedHeaderNonce = header.GetNonce() + bp.lastPrunedHeaderHash = headerHash return } - if header.GetNonce() <= bp.lastPrunedHeaderNonce { + if bytes.Equal(headerHash, bp.lastPrunedHeaderHash) { return } - // prune trie for intermediate headers - for nonce := bp.lastPrunedHeaderNonce + 1; nonce < header.GetNonce(); nonce++ { + bp.pruneTrieForHeadersUnprotected(headerHash, header) + + bp.lastPrunedHeaderHash = headerHash +} + +func (bp *baseProcessor) pruneTrieForHeadersUnprotected( + headerHash []byte, + header data.HeaderHandler, +) { + headersToPrune := make([]data.HeaderHandler, 0) + + lastPrunedHeaderHash := bp.lastPrunedHeaderHash + walkerHash := headerHash + + for !bytes.Equal(walkerHash, lastPrunedHeaderHash) { // headers pool is cleaned on consensus flow based on last execution result - // included on the committed header (plus some delta), so intermediate header + // included on the committed header (plus some delta), so intermediate headers // should be available in pool, since trie prunning is triggered from // execution flow; if there are no included blocks from execution flow // (and not prunning triggerd) headers will not be removed from pool - intermHeader, _, err := process.GetHeaderWithNonce( - nonce, - header.GetShardID(), + header, err := process.GetHeader( + walkerHash, bp.dataPool.Headers(), - bp.marshalizer, bp.store, - bp.uint64Converter, + bp.marshalizer, + header.GetShardID(), ) if err != nil { + // TODO: handle pruning eviction list cleanup log.Warn("failed to get intermediate header for prunning", "error", err) continue } - bp.blockProcessor.pruneTrieHeaderV3(intermHeader) + headersToPrune = append(headersToPrune, header) + + walkerHash = header.GetPrevHash() } - // prune trie for the provided header - bp.blockProcessor.pruneTrieHeaderV3(header) - bp.lastPrunedHeaderNonce = header.GetNonce() + for i := len(headersToPrune) - 1; i >= 0; i-- { + header := headersToPrune[i] + bp.blockProcessor.pruneTrieHeaderV3(header) + } } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 6f969a155f2..38ee209d105 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -5914,7 +5914,9 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { cancelPruneCalled := false pruneTrieCalled := false - arguments := CreateMockArguments(createComponentHolderMocks()) + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ IsPruningEnabledCalled: func() bool { return true @@ -5926,10 +5928,6 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { pruneTrieCalled = true }, } - bp, err := blproc.NewShardProcessor(arguments) - require.Nil(t, err) - - require.Equal(t, uint64(0), bp.GetLastPrunedNonce()) rootHash1 := []byte("rootHash1") @@ -5948,13 +5946,28 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { header1 := &block.HeaderV3{ Nonce: 10, } + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return header1 + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return []byte("metaHash") + } + dataComponents.BlockChain = blkc + + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + require.Nil(t, bp.GetLastPrunedHash()) + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) - bp.PruneTrieAsyncHeader(header1) + bp.PruneTrieAsyncHeader() require.True(t, cancelPruneCalled) require.True(t, pruneTrieCalled) - require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) + require.Equal(t, []byte{}, bp.GetLastPrunedHash()) }) t.Run("header nonce lower than last pruned header, should not trigger", func(t *testing.T) { @@ -5963,7 +5976,9 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { cancelPruneCalled := false pruneTrieCalled := false - arguments := CreateMockArguments(createComponentHolderMocks()) + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ IsPruningEnabledCalled: func() bool { return true @@ -5975,19 +5990,30 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { pruneTrieCalled = true }, } - bp, err := blproc.NewShardProcessor(arguments) - require.Nil(t, err) - - bp.SetLastPrunedNonce(10) header2 := &block.HeaderV3{ Nonce: 9, } - bp.PruneTrieAsyncHeader(header2) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return header2 + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return []byte("metaHash") + } + dataComponents.BlockChain = blkc + + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + bp.SetLastPrunedHash([]byte{}) + + bp.PruneTrieAsyncHeader() require.False(t, cancelPruneCalled) require.False(t, pruneTrieCalled) - require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) + require.Equal(t, []byte{}, bp.GetLastPrunedHash()) }) t.Run("should trigger multiple times for intermediate headers", func(t *testing.T) { @@ -5997,6 +6023,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { pruneTrieCalled := 0 coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ @@ -6073,11 +6100,6 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { } dataComponents.DataPool = dataPool - bp, err := blproc.NewShardProcessor(arguments) - require.Nil(t, err) - - bp.SetLastPrunedNonce(8) - rootHash3 := []byte("rootHash3") executionResultsHandlers = []data.BaseExecutionResultHandler{ @@ -6095,12 +6117,27 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { header3 := &block.HeaderV3{ Nonce: 10, } + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return header3 + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return []byte("metaHash") + } + dataComponents.BlockChain = blkc + + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + bp.SetLastPrunedHash([]byte{}) + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) - bp.PruneTrieAsyncHeader(header3) + bp.PruneTrieAsyncHeader() require.Equal(t, 2, cancelPruneCalled) require.Equal(t, 2, pruneTrieCalled) - require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) + require.Equal(t, []byte{}, bp.GetLastPrunedHash()) }) } diff --git a/process/block/export_test.go b/process/block/export_test.go index 37ca53c5acb..78a5a389f03 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -1189,18 +1189,18 @@ func (bp *baseProcessor) WaitForExecutionResultsVerification( return bp.waitForExecutionResultsVerification(header, haveTime) } -// SetLastPrunedNonce - -func (bp *baseProcessor) SetLastPrunedNonce(nonce uint64) { +// SetLastPrunedHash - +func (bp *baseProcessor) SetLastPrunedHash(hash []byte) { bp.mutLastPrunedHeader.Lock() - bp.lastPrunedHeaderNonce = nonce + bp.lastPrunedHeaderHash = hash bp.mutLastPrunedHeader.Unlock() } -// GetLastPrunedNonce - -func (bp *baseProcessor) GetLastPrunedNonce() uint64 { +// GetLastPrunedHash - +func (bp *baseProcessor) GetLastPrunedHash() []byte { bp.mutLastPrunedHeader.RLock() - lastPrunedNonce := bp.lastPrunedHeaderNonce + lastPrunedHeaderHash := bp.lastPrunedHeaderHash bp.mutLastPrunedHeader.RUnlock() - return lastPrunedNonce + return lastPrunedHeaderHash } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 97df7385557..fd53a5f204f 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -4241,8 +4241,6 @@ func pruneTrieForHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootH }, } - mp, _ := processBlock.NewMetaProcessor(arguments) - metaBlock := &block.MetaBlockV3{ PrevHash: []byte("hash"), ExecutionResults: []*block.MetaExecutionResult{ @@ -4273,7 +4271,18 @@ func pruneTrieForHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootH }, } - mp.PruneTrieAsyncHeader(metaBlock) + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return metaBlock + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return []byte("metaHash") + } + dataComponents.BlockChain = blkc + + mp, _ := processBlock.NewMetaProcessor(arguments) + + mp.PruneTrieAsyncHeader() assert.Equal(t, 2, pruneCalledForUserAccounts) assert.Equal(t, 2, pruneCalledForPeerAccounts) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 724e99020be..7c5695a40bf 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1277,7 +1277,7 @@ func (sp *shardProcessor) pruneTrieHeaderV3( for i := range executionResultsHandlers { currentExecRes := executionResultsHandlers[i] - prevExecRes, err := sp.getPreviousExecutionResult(i, executionResultsHandlers) + prevExecRes, err := sp.getPreviousExecutionResult(i, executionResultsHandlers, header.GetPrevHash()) if err != nil { log.Warn("failed to get previous execution result for pruning", "err", err, @@ -1310,13 +1310,12 @@ func (sp *shardProcessor) pruneTrieHeaderV3( func (sp *shardProcessor) getPreviousExecutionResult( index int, executionResultsHandlers []data.BaseExecutionResultHandler, + prevHeaderHash []byte, ) (data.BaseExecutionResultHandler, error) { if index > 0 { return executionResultsHandlers[index-1], nil } - // TODO: analyse based on current header to be committed, not last committed header - prevHeaderHash := sp.getCurrentBlockHeader().GetPrevHash() prevHeader, err := process.GetShardHeader(prevHeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { return nil, err diff --git a/process/interface.go b/process/interface.go index dce8c80e2ad..d402a7cbf3b 100644 --- a/process/interface.go +++ b/process/interface.go @@ -319,7 +319,7 @@ type BlockProcessor interface { ) error OnExecutedBlock(header data.HeaderHandler, rootHash []byte) error ProposedDirectSentTransactionsToBroadcast(proposedBody data.BodyHandler) map[string][][]byte - PruneTrieAsyncHeader(header data.HeaderHandler) + PruneTrieAsyncHeader() Close() error IsInterfaceNil() bool } From 6d21b8ed2e1d32a9da9d14e7cfa9f7cc1a06c12e Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 26 Mar 2026 11:09:23 +0200 Subject: [PATCH 09/20] add Reset() for spm and ewl --- state/interface.go | 2 ++ .../disabled/disabledStoragePruningManager.go | 3 ++ .../memoryEvictionWaitingList.go | 13 +++++++ .../storagePruningManager.go | 6 ++++ .../storagePruningManager_test.go | 34 +++++++++++++++++++ testscommon/state/evictionWaitingListMock.go | 7 ++++ .../state/storagePruningManagerStub.go | 7 ++++ 7 files changed, 72 insertions(+) diff --git a/state/interface.go b/state/interface.go index 2e9893f1cc6..93d09ff9d0b 100644 --- a/state/interface.go +++ b/state/interface.go @@ -178,6 +178,7 @@ type DBRemoveCacher interface { Put([]byte, common.ModifiedHashes) error Evict([]byte) (common.ModifiedHashes, error) ShouldKeepHash(hash string, identifier TriePruningIdentifier) (bool, error) + Reset() IsInterfaceNil() bool Close() error } @@ -194,6 +195,7 @@ type StoragePruningManager interface { MarkForEviction([]byte, []byte, common.ModifiedHashes, common.ModifiedHashes) error PruneTrie(rootHash []byte, identifier TriePruningIdentifier, tsm common.StorageManager, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier, tsm common.StorageManager) + Reset() Close() error IsInterfaceNil() bool } diff --git a/state/storagePruningManager/disabled/disabledStoragePruningManager.go b/state/storagePruningManager/disabled/disabledStoragePruningManager.go index 6de7e2b0845..2e10c84a7f3 100644 --- a/state/storagePruningManager/disabled/disabledStoragePruningManager.go +++ b/state/storagePruningManager/disabled/disabledStoragePruningManager.go @@ -26,6 +26,9 @@ func (i *disabledStoragePruningManager) PruneTrie(_ []byte, _ state.TriePruningI func (i *disabledStoragePruningManager) CancelPrune(_ []byte, _ state.TriePruningIdentifier, _ common.StorageManager) { } +// Reset does nothing for this implementation +func (i *disabledStoragePruningManager) Reset() {} + // Close does nothing for this implementation func (i *disabledStoragePruningManager) Close() error { return nil diff --git a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go index 52aa401c5ba..df91bfcc525 100644 --- a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go +++ b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go @@ -205,6 +205,19 @@ func (mewl *memoryEvictionWaitingList) ShouldKeepHash(hash string, identifier st return false, nil } +// Reset will reinitialize the eviction waiting list, by emptying the cache and reversed cache. It will not change the sizes of the caches. +func (mewl *memoryEvictionWaitingList) Reset() { + mewl.opMutex.Lock() + + for key := range mewl.cache { + log.Debug("trie nodes eviction waiting list reset", "rootHash", []byte(key)) + } + + mewl.cache = make(map[string]*rootHashData) + mewl.reversedCache = make(map[string]*hashInfo) + mewl.opMutex.Unlock() +} + // Close returns nil func (mewl *memoryEvictionWaitingList) Close() error { return nil diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 757d04cc9ed..958de885b2f 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -234,6 +234,12 @@ func (spm *storagePruningManager) removeFromDb( return nil } +func (spm *storagePruningManager) Reset() { + log.Debug("storage pruning manager reset") + _ = spm.pruningBuffer.RemoveAll() + spm.dbEvictionWaitingList.Reset() +} + // Close will handle the closing of the underlying components func (spm *storagePruningManager) Close() error { return spm.dbEvictionWaitingList.Close() diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index 006a88e4d70..e8c1ba25328 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -260,3 +260,37 @@ func TestStoragePruningManager_MarkForEviction_removeDuplicatedKeys(t *testing.T _, ok = map2["hash4"] assert.False(t, ok) } + +func TestStoreagePruningManager_Reset(t *testing.T) { + t.Parallel() + + args := storage.GetStorageManagerArgs() + trieStorage, _ := trie.NewTrieStorageManager(args) + ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 100, + HashesSize: 10000, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) + spm, _ := NewStoragePruningManager(ewl, 1000) + + err := spm.MarkForEviction([]byte("rootHash"), []byte("newRootHash"), map[string]struct{}{"hash1": {}, "hash2": {}}, map[string]struct{}{"hash3": {}, "hash4": {}}) + assert.Nil(t, err) + err = spm.markForEviction([]byte("rootHash2"), map[string]struct{}{"hash5": {}, "hash6": {}}, state.NewRoot) + assert.Nil(t, err) + + trieStorage.EnterPruningBufferingMode() + spm.PruneTrie([]byte("rootHash"), state.OldRoot, trieStorage, state.NewPruningHandler(state.EnableDataRemoval)) + spm.CancelPrune([]byte("newRootHash"), state.NewRoot, trieStorage) + trieStorage.ExitPruningBufferingMode() + + assert.Equal(t, 2, spm.pruningBuffer.Len()) + + spm.Reset() + assert.Equal(t, 0, spm.pruningBuffer.Len()) + + // rootHash2 will should not be added to the pruning buffer because ewl was also reset when spm.Reset() was called + trieStorage.EnterPruningBufferingMode() + spm.PruneTrie([]byte("rootHash2"), state.NewRoot, trieStorage, state.NewPruningHandler(state.EnableDataRemoval)) + trieStorage.ExitPruningBufferingMode() + assert.Equal(t, 0, spm.pruningBuffer.Len()) +} diff --git a/testscommon/state/evictionWaitingListMock.go b/testscommon/state/evictionWaitingListMock.go index c071440d7b1..84d0e31e9e7 100644 --- a/testscommon/state/evictionWaitingListMock.go +++ b/testscommon/state/evictionWaitingListMock.go @@ -81,6 +81,13 @@ func (ewl *EvictionWaitingListMock) ShouldKeepHash(hash string, identifier state return false, nil } +// Reset will reinitialize the cache +func (ewl *EvictionWaitingListMock) Reset() { + ewl.OpMutex.Lock() + ewl.Cache = make(map[string]common.ModifiedHashes) + ewl.OpMutex.Unlock() +} + // Close - func (ewl *EvictionWaitingListMock) Close() error { return nil diff --git a/testscommon/state/storagePruningManagerStub.go b/testscommon/state/storagePruningManagerStub.go index 92c697c5224..20105a5b1b4 100644 --- a/testscommon/state/storagePruningManagerStub.go +++ b/testscommon/state/storagePruningManagerStub.go @@ -10,6 +10,7 @@ type StoragePruningManagerStub struct { MarkForEvictionCalled func(bytes []byte, bytes2 []byte, hashes common.ModifiedHashes, hashes2 common.ModifiedHashes) error PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, tsm common.StorageManager, handler state.PruningHandler) CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier, tsm common.StorageManager) + ResetCalled func() CloseCalled func() error } @@ -36,6 +37,12 @@ func (stub *StoragePruningManagerStub) CancelPrune(rootHash []byte, identifier s } } +func (stub *StoragePruningManagerStub) Reset() { + if stub.ResetCalled != nil { + stub.ResetCalled() + } +} + // Close - func (stub *StoragePruningManagerStub) Close() error { if stub.CloseCalled != nil { From 587f961f2e0c586e4238805459965287d86e1c85 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 26 Mar 2026 11:10:46 +0200 Subject: [PATCH 10/20] add missing comment --- testscommon/state/storagePruningManagerStub.go | 1 + 1 file changed, 1 insertion(+) diff --git a/testscommon/state/storagePruningManagerStub.go b/testscommon/state/storagePruningManagerStub.go index 20105a5b1b4..5b91046909c 100644 --- a/testscommon/state/storagePruningManagerStub.go +++ b/testscommon/state/storagePruningManagerStub.go @@ -37,6 +37,7 @@ func (stub *StoragePruningManagerStub) CancelPrune(rootHash []byte, identifier s } } +// Reset - func (stub *StoragePruningManagerStub) Reset() { if stub.ResetCalled != nil { stub.ResetCalled() From 75a6db210f3b6003bd77cb22c77bc1733cca21c6 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 26 Mar 2026 11:23:07 +0200 Subject: [PATCH 11/20] export pruning reset on accountsDB --- .../bootstrap/disabled/disabledAccountsAdapter.go | 4 ++++ .../transactionEvaluator/simulationAccountsDB.go | 4 ++++ state/accountsDB.go | 8 ++++++++ state/accountsDBApi.go | 4 ++++ state/accountsDBApiWithHistory.go | 4 ++++ state/interface.go | 1 + .../memoryEvictionWaitingList_test.go | 15 +++++++++++++++ .../storagePruningManager.go | 6 ++++-- .../storagePruningManager_test.go | 4 ++-- testscommon/state/accountsAdapterStub.go | 8 ++++++++ 10 files changed, 54 insertions(+), 4 deletions(-) diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index ce928b21fca..d6ace804ff9 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -99,6 +99,10 @@ func (a *accountsAdapter) RecreateTrieIfNeeded(_ common.RootHashHolder) error { func (a *accountsAdapter) CancelPrune(_ []byte, _ state.TriePruningIdentifier) { } +// ResetPruning - +func (a *accountsAdapter) ResetPruning() { +} + // SnapshotState - func (a *accountsAdapter) SnapshotState(_ []byte, _ uint32) { } diff --git a/process/transactionEvaluator/simulationAccountsDB.go b/process/transactionEvaluator/simulationAccountsDB.go index edad278c798..835a64252e5 100644 --- a/process/transactionEvaluator/simulationAccountsDB.go +++ b/process/transactionEvaluator/simulationAccountsDB.go @@ -145,6 +145,10 @@ func (r *simulationAccountsDB) PruneTrie(_ []byte, _ state.TriePruningIdentifier func (r *simulationAccountsDB) CancelPrune(_ []byte, _ state.TriePruningIdentifier) { } +// ResetPruning won't do anything as write operations are disabled on this component +func (r *simulationAccountsDB) ResetPruning() { +} + // SnapshotState won't do anything as write operations are disabled on this component func (r *simulationAccountsDB) SnapshotState(_ []byte, _ uint32) { } diff --git a/state/accountsDB.go b/state/accountsDB.go index b412bac97d6..52dcb16be87 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -1230,6 +1230,14 @@ func (adb *AccountsDB) CancelPrune(rootHash []byte, identifier TriePruningIdenti adb.storagePruningManager.CancelPrune(rootHash, identifier, adb.mainTrie.GetStorageManager()) } +// ResetPruning will reset all collected data needed for pruning +func (adb *AccountsDB) ResetPruning() { + adb.mutOp.Lock() + defer adb.mutOp.Unlock() + + adb.storagePruningManager.Reset() +} + // SnapshotState triggers the snapshotting process of the state trie func (adb *AccountsDB) SnapshotState(rootHash []byte, epoch uint32) { adb.snapshotsManger.SnapshotState(rootHash, epoch, adb.getMainTrie().GetStorageManager()) diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index 76ee0d506d8..915a9e9bea5 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -224,6 +224,10 @@ func (accountsDB *accountsDBApi) PruneTrie(_ []byte, _ TriePruningIdentifier, _ func (accountsDB *accountsDBApi) CancelPrune(_ []byte, _ TriePruningIdentifier) { } +// ResetPruning is a not permitted operation in this implementation and thus, does nothing +func (accountsDB *accountsDBApi) ResetPruning() { +} + // SnapshotState is a not permitted operation in this implementation and thus, does nothing func (accountsDB *accountsDBApi) SnapshotState(_ []byte, _ uint32) { } diff --git a/state/accountsDBApiWithHistory.go b/state/accountsDBApiWithHistory.go index e39a24ea7c7..048fbd44bbe 100644 --- a/state/accountsDBApiWithHistory.go +++ b/state/accountsDBApiWithHistory.go @@ -112,6 +112,10 @@ func (accountsDB *accountsDBApiWithHistory) PruneTrie(_ []byte, _ TriePruningIde func (accountsDB *accountsDBApiWithHistory) CancelPrune(_ []byte, _ TriePruningIdentifier) { } +// ResetPruning is a not permitted operation in this implementation and thus, does nothing +func (accountsDB *accountsDBApiWithHistory) ResetPruning() { +} + // SnapshotState is a not permitted operation in this implementation and thus, does nothing func (accountsDB *accountsDBApiWithHistory) SnapshotState(_ []byte, _ uint32) { } diff --git a/state/interface.go b/state/interface.go index 93d09ff9d0b..254c68ddd44 100644 --- a/state/interface.go +++ b/state/interface.go @@ -84,6 +84,7 @@ type AccountsAdapter interface { RecreateTrieIfNeeded(options common.RootHashHolder) error PruneTrie(rootHash []byte, identifier TriePruningIdentifier, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier) + ResetPruning() SnapshotState(rootHash []byte, epoch uint32) IsPruningEnabled() bool GetAllLeaves(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error diff --git a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go index 21099502f93..69ef10606d3 100644 --- a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go +++ b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go @@ -359,3 +359,18 @@ func TestMemoryEvictionWaitingList_RemoveFromInversedCache(t *testing.T) { assert.Nil(t, info) assert.False(t, exists) } + +func TestMemoryEvictionWaitingList_Reset(t *testing.T) { + t.Parallel() + + mewl, _ := NewMemoryEvictionWaitingList(getDefaultArgsForMemoryEvictionWaitingList()) + + _ = mewl.Put([]byte("root1"), common.ModifiedHashes{"hash1": {}, "hash2": {}}) + _ = mewl.Put([]byte("root2"), common.ModifiedHashes{"hash3": {}, "hash4": {}}) + + assert.Equal(t, 2, len(mewl.cache)) + assert.Equal(t, 4, len(mewl.reversedCache)) + mewl.Reset() + assert.Equal(t, 0, len(mewl.cache)) + assert.Equal(t, 0, len(mewl.reversedCache)) +} diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 958de885b2f..2478316a02a 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -235,8 +235,10 @@ func (spm *storagePruningManager) removeFromDb( } func (spm *storagePruningManager) Reset() { - log.Debug("storage pruning manager reset") - _ = spm.pruningBuffer.RemoveAll() + bufferedHashes := spm.pruningBuffer.RemoveAll() + for _, hash := range bufferedHashes { + log.Trace("trie storage manager reset", "hash", hash) + } spm.dbEvictionWaitingList.Reset() } diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index e8c1ba25328..2f656e6b556 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -261,7 +261,7 @@ func TestStoragePruningManager_MarkForEviction_removeDuplicatedKeys(t *testing.T assert.False(t, ok) } -func TestStoreagePruningManager_Reset(t *testing.T) { +func TestStoragePruningManager_Reset(t *testing.T) { t.Parallel() args := storage.GetStorageManagerArgs() @@ -288,7 +288,7 @@ func TestStoreagePruningManager_Reset(t *testing.T) { spm.Reset() assert.Equal(t, 0, spm.pruningBuffer.Len()) - // rootHash2 will should not be added to the pruning buffer because ewl was also reset when spm.Reset() was called + // rootHash2 should not be added to the pruning buffer because ewl was also reset when spm.Reset() was called trieStorage.EnterPruningBufferingMode() spm.PruneTrie([]byte("rootHash2"), state.NewRoot, trieStorage, state.NewPruningHandler(state.EnableDataRemoval)) trieStorage.ExitPruningBufferingMode() diff --git a/testscommon/state/accountsAdapterStub.go b/testscommon/state/accountsAdapterStub.go index aa3d41e1355..04608b50045 100644 --- a/testscommon/state/accountsAdapterStub.go +++ b/testscommon/state/accountsAdapterStub.go @@ -29,6 +29,7 @@ type AccountsStub struct { RecreateTrieIfNeededCalled func(options common.RootHashHolder) error PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier) + ResetPruningCalled func() SnapshotStateCalled func(rootHash []byte, epoch uint32) IsPruningEnabledCalled func() bool GetAllLeavesCalled func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error @@ -208,6 +209,13 @@ func (as *AccountsStub) CancelPrune(rootHash []byte, identifier state.TriePrunin } } +// ResetPruning - +func (as *AccountsStub) ResetPruning() { + if as.ResetPruningCalled != nil { + as.ResetPruningCalled() + } +} + // SnapshotState - func (as *AccountsStub) SnapshotState(rootHash []byte, epoch uint32) { if as.SnapshotStateCalled != nil { From ae2aad999d261f81526245546a0eac9c301932d0 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 26 Mar 2026 11:39:28 +0200 Subject: [PATCH 12/20] fix unit tests --- integrationTests/mock/blockProcessorMock.go | 6 +- process/block/baseProcess_test.go | 73 ++++++++++--------- process/block/shardblock_test.go | 6 +- testscommon/blockProcessorStub.go | 6 +- .../processMocks/blockProcessorStub.go | 6 +- 5 files changed, 52 insertions(+), 45 deletions(-) diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 80355b428f1..a985485e629 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -43,7 +43,7 @@ type BlockProcessorMock struct { ) error OnExecutedBlockCalled func(header data.HeaderHandler, rootHash []byte) error ProposedDirectSentTransactionsToBroadcastCalled func(proposedBody data.BodyHandler) map[string][][]byte - PruneTrieAsyncHeaderCalled func(header data.HeaderHandler) + PruneTrieAsyncHeaderCalled func() } // ProcessBlock mocks processing a block @@ -233,9 +233,9 @@ func (bpm *BlockProcessorMock) RemoveHeaderFromPool(headerHash []byte) { } // PruneTrieAsyncHeader - -func (bpm *BlockProcessorMock) PruneTrieAsyncHeader(header data.HeaderHandler) { +func (bpm *BlockProcessorMock) PruneTrieAsyncHeader() { if bpm.PruneTrieAsyncHeaderCalled != nil { - bpm.PruneTrieAsyncHeaderCalled(header) + bpm.PruneTrieAsyncHeaderCalled() } } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 38ee209d105..3f0ed6c7fe9 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -5943,6 +5943,8 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, }, } + + header1Hash := []byte("headerHash1") header1 := &block.HeaderV3{ Nonce: 10, } @@ -5952,7 +5954,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { return header1 } blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return []byte("metaHash") + return header1Hash } dataComponents.BlockChain = blkc @@ -5967,7 +5969,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { require.True(t, cancelPruneCalled) require.True(t, pruneTrieCalled) - require.Equal(t, []byte{}, bp.GetLastPrunedHash()) + require.Equal(t, header1Hash, bp.GetLastPrunedHash()) }) t.Run("header nonce lower than last pruned header, should not trigger", func(t *testing.T) { @@ -5991,6 +5993,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, } + header2Hash := []byte("headerHash2") header2 := &block.HeaderV3{ Nonce: 9, } @@ -6000,7 +6003,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { return header2 } blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return []byte("metaHash") + return header2Hash } dataComponents.BlockChain = blkc @@ -6013,7 +6016,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { require.False(t, cancelPruneCalled) require.False(t, pruneTrieCalled) - require.Equal(t, []byte{}, bp.GetLastPrunedHash()) + require.Equal(t, header2Hash, bp.GetLastPrunedHash()) }) t.Run("should trigger multiple times for intermediate headers", func(t *testing.T) { @@ -6051,6 +6054,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, }, } + headerHash1 := []byte("headerHash1") header1 := &block.HeaderV3{ Nonce: 8, LastExecutionResult: &block.ExecutionResultInfo{}, @@ -6070,36 +6074,13 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, }, } + headerHash2 := []byte("headerHash2") header2 := &block.HeaderV3{ - Nonce: 9, + Nonce: 9, + PrevHash: headerHash1, } _ = header2.SetExecutionResultsHandlers(executionResultsHandlers) - headerCalls := 0 - headerHashCalls := 0 - headersPool := &mock.HeadersCacherStub{ - GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { - if headerCalls == 0 { - headerCalls++ - return []data.HeaderHandler{header2}, [][]byte{[]byte("hash1")}, nil - } - - return []data.HeaderHandler{}, [][]byte{}, nil - }, - GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { - if headerHashCalls == 0 { - headerHashCalls++ - return header1, nil - } - return header2, nil - }, - } - dataPool := initDataPool() - dataPool.HeadersCalled = func() dataRetriever.HeadersPool { - return headersPool - } - dataComponents.DataPool = dataPool - rootHash3 := []byte("rootHash3") executionResultsHandlers = []data.BaseExecutionResultHandler{ @@ -6114,23 +6095,47 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, }, } + + headerHash3 := []byte("headerHash3") header3 := &block.HeaderV3{ - Nonce: 10, + Nonce: 10, + PrevHash: headerHash2, + } + + headersPool := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + if bytes.Equal(hash, headerHash1) { + return header1, nil + } + if bytes.Equal(hash, headerHash2) { + return header2, nil + } + if bytes.Equal(hash, headerHash3) { + return header3, nil + } + + return nil, errors.New("header not found") + }, } + dataPool := initDataPool() + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { + return headersPool + } + dataComponents.DataPool = dataPool blkc := createTestBlockchain() blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { return header3 } blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return []byte("metaHash") + return headerHash3 } dataComponents.BlockChain = blkc bp, err := blproc.NewShardProcessor(arguments) require.Nil(t, err) - bp.SetLastPrunedHash([]byte{}) + bp.SetLastPrunedHash(headerHash1) _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) bp.PruneTrieAsyncHeader() @@ -6138,6 +6143,6 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { require.Equal(t, 2, cancelPruneCalled) require.Equal(t, 2, pruneTrieCalled) - require.Equal(t, []byte{}, bp.GetLastPrunedHash()) + require.Equal(t, headerHash3, bp.GetLastPrunedHash()) }) } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 3b1d081460c..4ef50232c9c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -7097,7 +7097,7 @@ func pruneTrieHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootHash } } dataComponents.DataPool = dataPool - _ = dataComponents.BlockChain.SetCurrentBlockHeader(&block.Header{PrevHash: prevHeaderHash}) + arguments := CreateMockArguments(coreComponents, dataComponents, boostrapComponents, statusComponents) arguments.AccountsDB = map[state.AccountsDbIdentifier]state.AccountsAdapter{ state.UserAccountsState: &stateMock.AccountsStub{ @@ -7143,7 +7143,9 @@ func pruneTrieHeaderV3Test(t *testing.T, prevHeader data.HeaderHandler, rootHash }, }, } - header1 := &block.HeaderV3{} + header1 := &block.HeaderV3{ + PrevHash: prevHeaderHash, + } _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) sp.PruneTrieHeaderV3(header1) diff --git a/testscommon/blockProcessorStub.go b/testscommon/blockProcessorStub.go index d8ca065de28..2b06d3015b7 100644 --- a/testscommon/blockProcessorStub.go +++ b/testscommon/blockProcessorStub.go @@ -48,7 +48,7 @@ type BlockProcessorStub struct { OnExecutedBlockCalled func(header data.HeaderHandler, rootHash []byte) error RemoveHeaderFromPoolCalled func(headerHash []byte) ProposedDirectSentTransactionsToBroadcastCalled func(proposedBody data.BodyHandler) map[string][][]byte - PruneTrieAsyncHeaderCalled func(header data.HeaderHandler) + PruneTrieAsyncHeaderCalled func() } // SetNumProcessedObj - @@ -295,9 +295,9 @@ func (bps *BlockProcessorStub) ProposedDirectSentTransactionsToBroadcast(propose } // PruneTrieAsyncHeader - -func (bps *BlockProcessorStub) PruneTrieAsyncHeader(header data.HeaderHandler) { +func (bps *BlockProcessorStub) PruneTrieAsyncHeader() { if bps.PruneTrieAsyncHeaderCalled != nil { - bps.PruneTrieAsyncHeaderCalled(header) + bps.PruneTrieAsyncHeaderCalled() } } diff --git a/testscommon/processMocks/blockProcessorStub.go b/testscommon/processMocks/blockProcessorStub.go index aad70a3a8bb..ddf34cdf24b 100644 --- a/testscommon/processMocks/blockProcessorStub.go +++ b/testscommon/processMocks/blockProcessorStub.go @@ -7,7 +7,7 @@ type BlockProcessorStub struct { ProcessBlockProposalCalled func(handler data.HeaderHandler, headerHash []byte, body data.BodyHandler) (data.BaseExecutionResultHandler, error) CommitBlockProposalStateCalled func(headerHandler data.HeaderHandler) error RevertBlockProposalStateCalled func() - PruneTrieAsyncHeaderCalled func(header data.HeaderHandler) + PruneTrieAsyncHeaderCalled func() } // ProcessBlockProposal - @@ -36,9 +36,9 @@ func (bp *BlockProcessorStub) RevertBlockProposalState() { } // PruneTrieAsyncHeader - -func (bp *BlockProcessorStub) PruneTrieAsyncHeader(header data.HeaderHandler) { +func (bp *BlockProcessorStub) PruneTrieAsyncHeader() { if bp.PruneTrieAsyncHeaderCalled != nil { - bp.PruneTrieAsyncHeaderCalled(header) + bp.PruneTrieAsyncHeaderCalled() } } From b7fa2eeec7135a58c3ca98b63e5326917ce471c7 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 26 Mar 2026 12:44:44 +0200 Subject: [PATCH 13/20] added more unit tests --- process/block/baseProcess.go | 17 +- process/block/baseProcess_test.go | 483 ++++++++++++++++++++++++------ process/block/export_test.go | 7 + 3 files changed, 408 insertions(+), 99 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 0a151d9668e..f1af2d38b9b 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -153,8 +153,9 @@ type baseProcessor struct { maxProposalNonceGap uint64 closingNodeStarted *atomic.Bool - lastPrunedHeaderHash []byte - mutLastPrunedHeader sync.RWMutex + lastPrunedHeaderHash []byte + lastPrunedHeaderNonce uint64 + mutLastPrunedHeader sync.RWMutex } type bootStorerDataArgs struct { @@ -4048,26 +4049,34 @@ func (bp *baseProcessor) PruneTrieAsyncHeader() { // last pruned header hash not set, trigger prune trie for the provided header bp.blockProcessor.pruneTrieHeaderV3(header) bp.lastPrunedHeaderHash = headerHash + bp.lastPrunedHeaderNonce = header.GetNonce() return } - if bytes.Equal(headerHash, bp.lastPrunedHeaderHash) { + // extra check by nonce + if header.GetNonce() <= bp.lastPrunedHeaderNonce { return } bp.pruneTrieForHeadersUnprotected(headerHash, header) bp.lastPrunedHeaderHash = headerHash + bp.lastPrunedHeaderNonce = header.GetNonce() } func (bp *baseProcessor) pruneTrieForHeadersUnprotected( headerHash []byte, header data.HeaderHandler, ) { + if bytes.Equal(headerHash, bp.lastPrunedHeaderHash) { + return + } + headersToPrune := make([]data.HeaderHandler, 0) + headersToPrune = append(headersToPrune, header) lastPrunedHeaderHash := bp.lastPrunedHeaderHash - walkerHash := headerHash + walkerHash := header.GetPrevHash() for !bytes.Equal(walkerHash, lastPrunedHeaderHash) { // headers pool is cleaned on consensus flow based on last execution result diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 3f0ed6c7fe9..722fa3e1cb2 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -5908,6 +5908,203 @@ func TestBaseProcessor_WaitForExecutionResultsVerification(t *testing.T) { func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { t.Parallel() + // header 1 + + headerHash1 := []byte("headerHash1") + rootHash10 := []byte("rootHash10") + rootHash11 := []byte("rootHash11") + + baseExecRes10 := &block.BaseExecutionResult{RootHash: rootHash10} + baseExecRes11 := &block.BaseExecutionResult{RootHash: rootHash11} + executionResultsHandlers := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes10, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes11, + }, + } + header1 := &block.HeaderV3{ + Nonce: 8, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes11, + }, + } + _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) + + // header 2 + + headerHash2 := []byte("headerHash2") + rootHash20 := []byte("rootHash20") + rootHash21 := []byte("rootHash21") + rootHash22 := []byte("rootHash22") + rootHash23 := []byte("rootHash23") + + baseExecRes20 := &block.BaseExecutionResult{RootHash: rootHash20} + baseExecRes21 := &block.BaseExecutionResult{RootHash: rootHash21} + baseExecRes22 := &block.BaseExecutionResult{RootHash: rootHash22} + baseExecRes23 := &block.BaseExecutionResult{RootHash: rootHash23} + executionResultsHandlers2 := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes20, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes21, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes22, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes23, + }, + } + + header2 := &block.HeaderV3{ + Nonce: 9, + PrevHash: headerHash1, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes23, + }, + } + _ = header2.SetExecutionResultsHandlers(executionResultsHandlers2) + + // header 3 + + headerHash3 := []byte("headerHash3") + rootHash30 := []byte("rootHash30") + rootHash31 := []byte("rootHash31") + + baseExecRes30 := &block.BaseExecutionResult{RootHash: rootHash30} + baseExecRes31 := &block.BaseExecutionResult{RootHash: rootHash31} + executionResultsHandlers3 := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes30, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes31, + }, + } + + header3 := &block.HeaderV3{ + Nonce: 10, + PrevHash: headerHash2, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes31, + }, + } + _ = header3.SetExecutionResultsHandlers(executionResultsHandlers3) + + // header 4 + + headerHash4 := []byte("headerHash4") + header4 := &block.HeaderV3{ + Nonce: 11, + PrevHash: headerHash3, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes31, + }, + } + + // header 5 + + headerHash5 := []byte("headerHash5") + header5 := &block.HeaderV3{ + Nonce: 12, + PrevHash: headerHash4, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes31, + }, + } + + // header 6 + + headerHash6 := []byte("headerHash6") + rootHash60 := []byte("rootHash60") + rootHash61 := []byte("rootHash61") + + baseExecRes60 := &block.BaseExecutionResult{RootHash: rootHash60} + baseExecRes61 := &block.BaseExecutionResult{RootHash: rootHash61} + executionResultsHandlers6 := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes60, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes61, + }, + } + + header6 := &block.HeaderV3{ + Nonce: 13, + PrevHash: headerHash5, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes61, + }, + } + _ = header6.SetExecutionResultsHandlers(executionResultsHandlers6) + + // header 7 + + headerHash7 := []byte("headerHash7") + rootHash70 := []byte("rootHash70") + + baseExecRes70 := &block.BaseExecutionResult{RootHash: rootHash70} + baseExecRes71 := &block.BaseExecutionResult{RootHash: rootHash70} // no roothash change + executionResultsHandlers7 := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes70, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes71, + }, + } + + header7 := &block.HeaderV3{ + Nonce: 14, + PrevHash: headerHash6, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes71, + }, + } + _ = header7.SetExecutionResultsHandlers(executionResultsHandlers7) + + // header 8 + + headerHash8 := []byte("headerHash8") + + baseExecRes80 := &block.BaseExecutionResult{RootHash: rootHash70} // not roothash change + header8 := &block.HeaderV3{ + Nonce: 15, + PrevHash: headerHash7, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes80, + }, + } + + // header 9 + + headerHash9 := []byte("headerHash9") + rootHash91 := []byte("rootHash91") + + baseExecRes90 := &block.BaseExecutionResult{RootHash: rootHash70} // no roothash change + baseExecRes91 := &block.BaseExecutionResult{RootHash: rootHash91} // roothash changed + executionResultsHandlers9 := []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes90, + }, + &block.ExecutionResult{ + BaseExecutionResult: baseExecRes91, + }, + } + + header9 := &block.HeaderV3{ + Nonce: 16, + PrevHash: headerHash8, + LastExecutionResult: &block.ExecutionResultInfo{ + ExecutionResult: baseExecRes91, + }, + } + _ = header9.SetExecutionResultsHandlers(executionResultsHandlers9) + t.Run("last pruned header not set, should trigger provided header", func(t *testing.T) { t.Parallel() @@ -5929,32 +6126,12 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, } - rootHash1 := []byte("rootHash1") - - executionResultsHandlers := []data.BaseExecutionResultHandler{ - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: rootHash1, - }, - }, - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: []byte("some other root hash"), - }, - }, - } - - header1Hash := []byte("headerHash1") - header1 := &block.HeaderV3{ - Nonce: 10, - } - blkc := createTestBlockchain() blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { return header1 } blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return header1Hash + return headerHash1 } dataComponents.BlockChain = blkc @@ -5969,7 +6146,7 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { require.True(t, cancelPruneCalled) require.True(t, pruneTrieCalled) - require.Equal(t, header1Hash, bp.GetLastPrunedHash()) + require.Equal(t, headerHash1, bp.GetLastPrunedHash()) }) t.Run("header nonce lower than last pruned header, should not trigger", func(t *testing.T) { @@ -5993,37 +6170,33 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { }, } - header2Hash := []byte("headerHash2") - header2 := &block.HeaderV3{ - Nonce: 9, - } - blkc := createTestBlockchain() blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { return header2 } blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return header2Hash + return headerHash2 } dataComponents.BlockChain = blkc bp, err := blproc.NewShardProcessor(arguments) require.Nil(t, err) - bp.SetLastPrunedHash([]byte{}) + bp.SetLastPrunedNonce(10) + bp.SetLastPrunedHash(headerHash3) bp.PruneTrieAsyncHeader() require.False(t, cancelPruneCalled) require.False(t, pruneTrieCalled) - require.Equal(t, header2Hash, bp.GetLastPrunedHash()) + require.Equal(t, headerHash3, bp.GetLastPrunedHash()) }) - t.Run("should trigger multiple times for intermediate headers", func(t *testing.T) { + t.Run("intermediate headers with included execution results", func(t *testing.T) { t.Parallel() - cancelPruneCalled := 0 - pruneTrieCalled := 0 + cancelPruneRootHashes := make([][]byte, 0) + pruneTrieRootHashes := make([][]byte, 0) coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() @@ -6034,84 +6207,191 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { return true }, CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { - cancelPruneCalled++ + cancelPruneRootHashes = append(cancelPruneRootHashes, rootHash) }, PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { - pruneTrieCalled++ + pruneTrieRootHashes = append(pruneTrieRootHashes, rootHash) }, } - rootHash1 := []byte("rootHash1") - executionResultsHandlers := []data.BaseExecutionResultHandler{ - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: rootHash1, - }, - }, - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: []byte("some other root hash"), - }, + headersPool := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + if bytes.Equal(hash, headerHash1) { + return header1, nil + } + if bytes.Equal(hash, headerHash2) { + return header2, nil + } + if bytes.Equal(hash, headerHash3) { + return header3, nil + } + + return nil, errors.New("header not found") }, } - headerHash1 := []byte("headerHash1") - header1 := &block.HeaderV3{ - Nonce: 8, - LastExecutionResult: &block.ExecutionResultInfo{}, + dataPool := initDataPool() + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { + return headersPool } - _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) + dataComponents.DataPool = dataPool - rootHash2 := []byte("rootHash2") - executionResultsHandlers = []data.BaseExecutionResultHandler{ - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: rootHash2, - }, - }, - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: []byte("some other root hash6"), - }, - }, + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return header3 } - headerHash2 := []byte("headerHash2") - header2 := &block.HeaderV3{ - Nonce: 9, - PrevHash: headerHash1, + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return headerHash3 } - _ = header2.SetExecutionResultsHandlers(executionResultsHandlers) + dataComponents.BlockChain = blkc - rootHash3 := []byte("rootHash3") + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) - executionResultsHandlers = []data.BaseExecutionResultHandler{ - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: rootHash3, - }, + bp.SetLastPrunedHash(headerHash1) + + bp.PruneTrieAsyncHeader() + + require.Equal(t, 6, len(cancelPruneRootHashes)) + require.Equal(t, rootHash11, cancelPruneRootHashes[0]) + require.Equal(t, rootHash20, cancelPruneRootHashes[1]) + require.Equal(t, rootHash21, cancelPruneRootHashes[2]) + require.Equal(t, rootHash22, cancelPruneRootHashes[3]) + require.Equal(t, rootHash23, cancelPruneRootHashes[4]) + require.Equal(t, rootHash30, cancelPruneRootHashes[5]) + + require.Equal(t, 6, len(pruneTrieRootHashes)) + require.Equal(t, rootHash11, pruneTrieRootHashes[0]) + require.Equal(t, rootHash20, pruneTrieRootHashes[1]) + require.Equal(t, rootHash21, pruneTrieRootHashes[2]) + require.Equal(t, rootHash22, pruneTrieRootHashes[3]) + require.Equal(t, rootHash23, pruneTrieRootHashes[4]) + require.Equal(t, rootHash30, pruneTrieRootHashes[5]) + + require.Equal(t, headerHash3, bp.GetLastPrunedHash()) + + // another call for the same current header should not trigger prune + bp.PruneTrieAsyncHeader() + + require.Equal(t, 6, len(cancelPruneRootHashes)) + require.Equal(t, 6, len(pruneTrieRootHashes)) + require.Equal(t, headerHash3, bp.GetLastPrunedHash()) + }) + + t.Run("intermediate headers without included execution results", func(t *testing.T) { + t.Parallel() + + cancelPruneRootHashes := make([][]byte, 0) + pruneTrieRootHashes := make([][]byte, 0) + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { + return true }, - &block.ExecutionResult{ - BaseExecutionResult: &block.BaseExecutionResult{ - RootHash: []byte("some other root hash2"), - }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneRootHashes = append(cancelPruneRootHashes, rootHash) + }, + PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { + pruneTrieRootHashes = append(pruneTrieRootHashes, rootHash) }, } - headerHash3 := []byte("headerHash3") - header3 := &block.HeaderV3{ - Nonce: 10, - PrevHash: headerHash2, + headersPool := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + if bytes.Equal(hash, headerHash3) { + return header3, nil + } + if bytes.Equal(hash, headerHash4) { + return header4, nil + } + if bytes.Equal(hash, headerHash5) { + return header5, nil + } + if bytes.Equal(hash, headerHash6) { + return header6, nil + } + + return nil, errors.New("header not found") + }, + } + dataPool := initDataPool() + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { + return headersPool + } + dataComponents.DataPool = dataPool + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return header6 + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return headerHash6 + } + dataComponents.BlockChain = blkc + + bp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + bp.SetLastPrunedHash(headerHash3) + + bp.PruneTrieAsyncHeader() + + require.Equal(t, 2, len(cancelPruneRootHashes)) + require.Equal(t, rootHash31, cancelPruneRootHashes[0]) + require.Equal(t, rootHash60, cancelPruneRootHashes[1]) + + require.Equal(t, 2, len(pruneTrieRootHashes)) + require.Equal(t, rootHash31, pruneTrieRootHashes[0]) + require.Equal(t, rootHash60, pruneTrieRootHashes[1]) + + require.Equal(t, headerHash6, bp.GetLastPrunedHash()) + + // another call for the same current header should not trigger prune + bp.PruneTrieAsyncHeader() + + require.Equal(t, 2, len(cancelPruneRootHashes)) + require.Equal(t, 2, len(pruneTrieRootHashes)) + require.Equal(t, headerHash6, bp.GetLastPrunedHash()) + }) + + t.Run("intermediate headers with included execution results, no roothash change", func(t *testing.T) { + t.Parallel() + + cancelPruneRootHashes := make([][]byte, 0) + pruneTrieRootHashes := make([][]byte, 0) + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { + return true + }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneRootHashes = append(cancelPruneRootHashes, rootHash) + }, + PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { + pruneTrieRootHashes = append(pruneTrieRootHashes, rootHash) + }, } headersPool := &mock.HeadersCacherStub{ GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { - if bytes.Equal(hash, headerHash1) { - return header1, nil + if bytes.Equal(hash, headerHash6) { + return header6, nil } - if bytes.Equal(hash, headerHash2) { - return header2, nil + if bytes.Equal(hash, headerHash7) { + return header7, nil } - if bytes.Equal(hash, headerHash3) { - return header3, nil + if bytes.Equal(hash, headerHash8) { + return header8, nil + } + if bytes.Equal(hash, headerHash9) { + return header9, nil } return nil, errors.New("header not found") @@ -6125,24 +6405,37 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { blkc := createTestBlockchain() blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return header3 + return header9 } blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return headerHash3 + return headerHash9 } dataComponents.BlockChain = blkc bp, err := blproc.NewShardProcessor(arguments) require.Nil(t, err) - bp.SetLastPrunedHash(headerHash1) + bp.SetLastPrunedHash(headerHash5) - _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) bp.PruneTrieAsyncHeader() - require.Equal(t, 2, cancelPruneCalled) - require.Equal(t, 2, pruneTrieCalled) + require.Equal(t, 3, len(cancelPruneRootHashes)) + require.Equal(t, rootHash60, cancelPruneRootHashes[0]) + require.Equal(t, rootHash61, cancelPruneRootHashes[1]) + require.Equal(t, rootHash70, cancelPruneRootHashes[2]) - require.Equal(t, headerHash3, bp.GetLastPrunedHash()) + require.Equal(t, 3, len(pruneTrieRootHashes)) + require.Equal(t, rootHash60, pruneTrieRootHashes[0]) + require.Equal(t, rootHash61, pruneTrieRootHashes[1]) + require.Equal(t, rootHash70, pruneTrieRootHashes[2]) + + require.Equal(t, headerHash9, bp.GetLastPrunedHash()) + + // another call for the same current header should not trigger prune + bp.PruneTrieAsyncHeader() + + require.Equal(t, 3, len(cancelPruneRootHashes)) + require.Equal(t, 3, len(pruneTrieRootHashes)) + require.Equal(t, headerHash9, bp.GetLastPrunedHash()) }) } diff --git a/process/block/export_test.go b/process/block/export_test.go index 78a5a389f03..b85134899c1 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -1189,6 +1189,13 @@ func (bp *baseProcessor) WaitForExecutionResultsVerification( return bp.waitForExecutionResultsVerification(header, haveTime) } +// SetLastPrunedNonce - +func (bp *baseProcessor) SetLastPrunedNonce(nonce uint64) { + bp.mutLastPrunedHeader.Lock() + bp.lastPrunedHeaderNonce = nonce + bp.mutLastPrunedHeader.Unlock() +} + // SetLastPrunedHash - func (bp *baseProcessor) SetLastPrunedHash(hash []byte) { bp.mutLastPrunedHeader.Lock() From 78cab6671dca9a4dfcd31eb96cfe32f750de4be1 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 26 Mar 2026 12:50:59 +0200 Subject: [PATCH 14/20] remove unused common func --- process/common.go | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/process/common.go b/process/common.go index 294ac56dc8b..dddbde0e2ec 100644 --- a/process/common.go +++ b/process/common.go @@ -775,35 +775,6 @@ func GetSortedStorageUpdates(account *vmcommon.OutputAccount) []*vmcommon.Storag return storageUpdates } -// GetHeaderWithNonce tries to get the header by nonce from pool first and if not found, searches for it through storer -func GetHeaderWithNonce( - nonce uint64, - shardId uint32, - headersCacher dataRetriever.HeadersPool, - marshalizer marshal.Marshalizer, - storageService dataRetriever.StorageService, - uint64Converter typeConverters.Uint64ByteSliceConverter, -) (data.HeaderHandler, []byte, error) { - if shardId == core.MetachainShardId { - return GetMetaHeaderWithNonce( - nonce, - headersCacher, - marshalizer, - storageService, - uint64Converter, - ) - } - - return GetShardHeaderWithNonce( - nonce, - shardId, - headersCacher, - marshalizer, - storageService, - uint64Converter, - ) -} - // GetHeader tries to get the header from pool first and if not found, searches for it through storer func GetHeader( headerHash []byte, From a1a33e3cbe2ae5eee86b4dd7f267364d9abf7e43 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 26 Mar 2026 13:05:59 +0200 Subject: [PATCH 15/20] integrate pruning context reset --- process/block/baseProcess.go | 22 ++++++++++++++-------- process/block/interface.go | 1 + process/block/metablock.go | 12 ++++++++++++ process/block/shardblock.go | 11 ++++++++++- 4 files changed, 37 insertions(+), 9 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index f1af2d38b9b..13e7bb483ba 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -4058,7 +4058,12 @@ func (bp *baseProcessor) PruneTrieAsyncHeader() { return } - bp.pruneTrieForHeadersUnprotected(headerHash, header) + err := bp.pruneTrieForHeadersUnprotected(headerHash, header) + if err != nil { + // there was an error while fetching intermediate headers + // reset pruning context + bp.blockProcessor.resetPruning() + } bp.lastPrunedHeaderHash = headerHash bp.lastPrunedHeaderNonce = header.GetNonce() @@ -4067,9 +4072,9 @@ func (bp *baseProcessor) PruneTrieAsyncHeader() { func (bp *baseProcessor) pruneTrieForHeadersUnprotected( headerHash []byte, header data.HeaderHandler, -) { +) error { if bytes.Equal(headerHash, bp.lastPrunedHeaderHash) { - return + return nil } headersToPrune := make([]data.HeaderHandler, 0) @@ -4081,9 +4086,9 @@ func (bp *baseProcessor) pruneTrieForHeadersUnprotected( for !bytes.Equal(walkerHash, lastPrunedHeaderHash) { // headers pool is cleaned on consensus flow based on last execution result // included on the committed header (plus some delta), so intermediate headers - // should be available in pool, since trie prunning is triggered from + // should be available in pool, since trie pruning is triggered from // execution flow; if there are no included blocks from execution flow - // (and not prunning triggerd) headers will not be removed from pool + // (and not pruning triggered) headers will not be removed from pool header, err := process.GetHeader( walkerHash, bp.dataPool.Headers(), @@ -4092,9 +4097,8 @@ func (bp *baseProcessor) pruneTrieForHeadersUnprotected( header.GetShardID(), ) if err != nil { - // TODO: handle pruning eviction list cleanup - log.Warn("failed to get intermediate header for prunning", "error", err) - continue + log.Warn("failed to get intermediate header for pruning", "error", err) + return err } headersToPrune = append(headersToPrune, header) @@ -4106,4 +4110,6 @@ func (bp *baseProcessor) pruneTrieForHeadersUnprotected( header := headersToPrune[i] bp.blockProcessor.pruneTrieHeaderV3(header) } + + return nil } diff --git a/process/block/interface.go b/process/block/interface.go index ac7dc952416..d38324cc453 100644 --- a/process/block/interface.go +++ b/process/block/interface.go @@ -13,6 +13,7 @@ import ( type blockProcessor interface { removeStartOfEpochBlockDataFromPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error pruneTrieHeaderV3(header data.HeaderHandler) + resetPruning() } type gasConsumedProvider interface { diff --git a/process/block/metablock.go b/process/block/metablock.go index b5512de5db7..1d1f68a701b 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1704,6 +1704,18 @@ func (mp *metaProcessor) pruneTrieHeaderV3( } } +func (mp *metaProcessor) resetPruning() { + accountsDb := mp.accountsDB[state.UserAccountsState] + if accountsDb.IsPruningEnabled() { + accountsDb.ResetPruning() + } + + peerAccountsDb := mp.accountsDB[state.PeerAccountsState] + if peerAccountsDb.IsPruningEnabled() { + peerAccountsDb.ResetPruning() + } +} + func (mp *metaProcessor) getPreviousExecutionResult( index int, executionResultsHandlers []data.BaseExecutionResultHandler, diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 7c5695a40bf..d920a48d08a 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1247,7 +1247,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade if !currentHeader.IsHeaderV3() { sp.pruneTrieLegacy(headers) } else { - // for header v3, trie prunning is triggered in async mode from headers executor + // for header v3, trie pruning is triggered in async mode from headers executor if currentHeader.IsStartOfEpochBlock() { sp.nodesCoordinator.ShuffleOutForEpoch(currentHeader.GetEpoch()) @@ -1307,6 +1307,15 @@ func (sp *shardProcessor) pruneTrieHeaderV3( } } +func (sp *shardProcessor) resetPruning() { + accountsDb := sp.accountsDB[state.UserAccountsState] + if !accountsDb.IsPruningEnabled() { + return + } + + accountsDb.ResetPruning() +} + func (sp *shardProcessor) getPreviousExecutionResult( index int, executionResultsHandlers []data.BaseExecutionResultHandler, From c63af730bea91d7694e2c9133143ea4ba536c017 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 26 Mar 2026 13:06:52 +0200 Subject: [PATCH 16/20] fix typo --- process/block/metablock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index 1d1f68a701b..a73ae72bf97 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1635,7 +1635,7 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock ) } - // for header v3, trie prunning is triggered in async mode from headers executor + // for header v3, trie prnning is triggered in async mode from headers executor outportFinalizedHeaderHash := metaBlockHash if !common.IsFlagEnabledAfterEpochsStartBlock(metaBlock, mp.enableEpochsHandler, common.AndromedaFlag) { From c46a0e8b3111438512c7fb7233b586c22496a906 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Mar 2026 17:49:01 +0200 Subject: [PATCH 17/20] clean ewl on failed consensus --- .../disabled/disabledAccountsAdapter.go | 8 +- .../executionManager/executionManager.go | 6 + .../executionTrack/executionResultsTracker.go | 66 +++ .../executionResultsTracker_test.go | 477 ++++++++++++++++++ process/block/baseProcess.go | 82 ++- process/block/baseProcess_test.go | 247 +++++++++ process/block/export_test.go | 30 ++ process/block/interface.go | 2 + process/block/metablock.go | 60 +++ process/block/metablockProposal.go | 2 + process/block/metablock_test.go | 212 ++++++++ process/block/shardblock.go | 25 + process/block/shardblockProposal.go | 2 + process/block/shardblock_test.go | 201 ++++++++ process/interface.go | 3 + .../simulationAccountsDB.go | 8 +- state/accountsDB.go | 7 + state/accountsDBApi.go | 5 + state/accountsDBApiWithHistory.go | 5 + state/interface.go | 3 + .../disabled/disabledStoragePruningManager.go | 5 + .../memoryEvictionWaitingList.go | 10 +- .../memoryEvictionWaitingList_test.go | 22 +- .../storagePruningManager.go | 11 +- .../storagePruningManager_test.go | 73 +++ .../executionResultsTrackerStub.go | 11 + .../processMocks/executionManagerMock.go | 10 + .../processMocks/executionTrackerStub.go | 12 + testscommon/state/accountsAdapterStub.go | 12 +- testscommon/state/evictionWaitingListMock.go | 7 + .../state/storagePruningManagerStub.go | 19 +- 31 files changed, 1631 insertions(+), 12 deletions(-) diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index d6ace804ff9..39547a07531 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -3,9 +3,10 @@ package disabled import ( "context" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) type accountsAdapter struct { @@ -146,6 +147,11 @@ func (a *accountsAdapter) Close() error { return nil } +// GetEvictionWaitingListSize returns 0 for the disabled accounts adapter +func (a *accountsAdapter) GetEvictionWaitingListSize() int { + return 0 +} + // IsInterfaceNil - func (a *accountsAdapter) IsInterfaceNil() bool { return a == nil diff --git a/process/asyncExecution/executionManager/executionManager.go b/process/asyncExecution/executionManager/executionManager.go index 4995645b8a2..4287d693fb1 100644 --- a/process/asyncExecution/executionManager/executionManager.go +++ b/process/asyncExecution/executionManager/executionManager.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/asyncExecution/cache" "github.com/multiversx/mx-chain-go/process/asyncExecution/disabled" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" ) @@ -228,6 +229,11 @@ func (em *executionManager) RemovePendingExecutionResultsFromNonce(nonce uint64) return em.executionResultsTracker.RemoveFromNonce(nonce) } +// PopDismissedResults returns all batches of dismissed execution results and clears the internal queue +func (em *executionManager) PopDismissedResults() []executionTrack.DismissedBatch { + return em.executionResultsTracker.PopDismissedResults() +} + // ResetAndResumeExecution resets the managed components to the last notarized result and resumes execution func (em *executionManager) ResetAndResumeExecution(lastNotarizedResult data.BaseExecutionResultHandler) error { if check.IfNil(lastNotarizedResult) { diff --git a/process/asyncExecution/executionTrack/executionResultsTracker.go b/process/asyncExecution/executionTrack/executionResultsTracker.go index ff99f59d91d..bc1e7e7d06b 100644 --- a/process/asyncExecution/executionTrack/executionResultsTracker.go +++ b/process/asyncExecution/executionTrack/executionResultsTracker.go @@ -14,6 +14,16 @@ import ( var log = logger.GetOrCreate("process/asyncExecution/executionTrack") +const maxDismissedBatches = 100 + +// DismissedBatch holds a group of dismissed execution results alongside the anchor +// result that preceded them. The anchor's root hash is the "previous root hash" +// for the first dismissed result's EWL OldRoot entry. +type DismissedBatch struct { + AnchorResult data.BaseExecutionResultHandler + Results []data.BaseExecutionResultHandler +} + type executionResultsTracker struct { lastNotarizedResult data.BaseExecutionResultHandler mutex sync.RWMutex @@ -21,6 +31,7 @@ type executionResultsTracker struct { nonceHash *nonceHash lastExecutedResultHash []byte consensusCommittedHashes map[uint64][]byte // tracks which hash was committed by consensus for each nonce + dismissedBatches []DismissedBatch } // NewExecutionResultsTracker will create a new instance of *executionResultsTracker @@ -211,6 +222,18 @@ func (ert *executionResultsTracker) cleanConfirmedExecutionResults(headerExecuti if !areEqual { ert.lastExecutedResultHash = lastMatchingHash + // Compute anchor for dismissed batch + var anchor data.BaseExecutionResultHandler + if idx > 0 { + anchor = pendingExecutionResult[idx-1] + } else { + anchor = ert.lastNotarizedResult + } + ert.addDismissedBatch(DismissedBatch{ + AnchorResult: anchor, + Results: pendingExecutionResult[idx:], + }) + // different execution result should clean everything starting from this execution result and return CleanResultMismatch ert.cleanExecutionResults(pendingExecutionResult[idx:]) @@ -314,6 +337,14 @@ func (ert *executionResultsTracker) Clean(lastNotarizedResult data.BaseExecution ert.mutex.Lock() defer ert.mutex.Unlock() + pending, _ := ert.getPendingExecutionResults() + if len(pending) > 0 { + ert.addDismissedBatch(DismissedBatch{ + AnchorResult: ert.lastNotarizedResult, + Results: pending, + }) + } + ert.executionResultsByHash = make(map[string]data.BaseExecutionResultHandler) ert.nonceHash = newNonceHash() @@ -342,6 +373,21 @@ func (ert *executionResultsTracker) removePendingFromNonceUnprotected(nonce uint return nil } + // Compute anchor: last pending result before the dismissal point + var anchor data.BaseExecutionResultHandler + for _, result := range pendingExecutionResult { + if result.GetHeaderNonce() < nonce { + anchor = result + } + } + if anchor == nil { + anchor = ert.lastNotarizedResult + } + ert.addDismissedBatch(DismissedBatch{ + AnchorResult: anchor, + Results: resultsToRemove, + }) + // Remove from executionResultsByHash and nonceHash, but preserve consensusCommittedHashes // to continue blocking stale results from being added for these nonces ert.removeExecutionResultsFromMaps(resultsToRemove) @@ -364,6 +410,26 @@ func (ert *executionResultsTracker) removePendingFromNonceUnprotected(nonce uint return nil } +func (ert *executionResultsTracker) addDismissedBatch(batch DismissedBatch) { + if len(ert.dismissedBatches) >= maxDismissedBatches { + log.Warn("dismissed batches queue is full, dropping oldest batch", + "maxDismissedBatches", maxDismissedBatches, + "droppedResults", len(ert.dismissedBatches[0].Results), + ) + ert.dismissedBatches = ert.dismissedBatches[1:] + } + ert.dismissedBatches = append(ert.dismissedBatches, batch) +} + +// PopDismissedResults returns all batches of dismissed execution results and clears the internal queue +func (ert *executionResultsTracker) PopDismissedResults() []DismissedBatch { + ert.mutex.Lock() + defer ert.mutex.Unlock() + batches := ert.dismissedBatches + ert.dismissedBatches = nil + return batches +} + // IsInterfaceNil returns true if there is no value under the interface func (ert *executionResultsTracker) IsInterfaceNil() bool { return ert == nil diff --git a/process/asyncExecution/executionTrack/executionResultsTracker_test.go b/process/asyncExecution/executionTrack/executionResultsTracker_test.go index 35f5c285638..9de89760a5d 100644 --- a/process/asyncExecution/executionTrack/executionResultsTracker_test.go +++ b/process/asyncExecution/executionTrack/executionResultsTracker_test.go @@ -2,6 +2,7 @@ package executionTrack import ( "errors" + "fmt" "testing" "github.com/multiversx/mx-chain-core-go/data/block" @@ -581,3 +582,479 @@ func TestExecutionResultsTracker_Clean(t *testing.T) { require.Equal(t, newLast, lastExec) }) } + +func TestExecutionResultsTracker_PopDismissedResults_EmptyByDefault(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + batches := tracker.PopDismissedResults() + require.Nil(t, batches) +} + +func TestExecutionResultsTracker_PopDismissedResults_OnCleanOnConsensusReached(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + exec2 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash2"), + HeaderNonce: 12, + RootHash: []byte("rootHash2"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + _, _ = tracker.AddExecutionResult(exec2) + + // Consensus commits nonce 11 with a different hash - dismisses nonce 11 and higher + header := &block.HeaderV3{ + Nonce: 11, + } + tracker.CleanOnConsensusReached([]byte("different_hash"), header) + + batches := tracker.PopDismissedResults() + require.Len(t, batches, 1) + require.Len(t, batches[0].Results, 2) + // Anchor should be lastNotarized (no pending results before nonce 11) + require.Equal(t, lastNotarized.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Equal(t, exec1.GetRootHash(), batches[0].Results[0].GetRootHash()) + require.Equal(t, exec2.GetRootHash(), batches[0].Results[1].GetRootHash()) + + // Second pop should return nil (queue drained) + require.Nil(t, tracker.PopDismissedResults()) +} + +func TestExecutionResultsTracker_PopDismissedResults_AnchorIsLastPendingBeforeDismissal(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + exec2 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash2"), + HeaderNonce: 12, + RootHash: []byte("rootHash2"), + }, + } + exec3 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash3"), + HeaderNonce: 13, + RootHash: []byte("rootHash3"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + _, _ = tracker.AddExecutionResult(exec2) + _, _ = tracker.AddExecutionResult(exec3) + + // RemoveFromNonce(12) should dismiss exec2 and exec3, keeping exec1 + _ = tracker.RemoveFromNonce(12) + + batches := tracker.PopDismissedResults() + require.Len(t, batches, 1) + require.Len(t, batches[0].Results, 2) + // Anchor should be exec1 (last pending result before nonce 12) + require.Equal(t, exec1.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Equal(t, exec2.GetRootHash(), batches[0].Results[0].GetRootHash()) + require.Equal(t, exec3.GetRootHash(), batches[0].Results[1].GetRootHash()) +} + +func TestExecutionResultsTracker_PopDismissedResults_OnCleanConfirmedMismatch(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + exec2 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash2"), + HeaderNonce: 12, + RootHash: []byte("rootHash2"), + }, + } + exec3 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash3"), + HeaderNonce: 13, + RootHash: []byte("rootHash3"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + _, _ = tracker.AddExecutionResult(exec2) + _, _ = tracker.AddExecutionResult(exec3) + + // Header confirms exec1 matches, but exec2 mismatches at idx=1 + header := &block.HeaderV3{ + ExecutionResults: []*block.ExecutionResult{ + exec1, + { + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("different"), + HeaderNonce: 12, + RootHash: []byte("differentRoot"), + }, + }, + }, + } + err := tracker.CleanConfirmedExecutionResults(header) + require.ErrorIs(t, err, ErrExecutionResultMismatch) + + batches := tracker.PopDismissedResults() + require.Len(t, batches, 1) + require.Len(t, batches[0].Results, 2) // exec2 and exec3 dismissed + // Anchor should be exec1 (last matching result, at idx=0 the preceding pending result) + require.Equal(t, exec1.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Equal(t, exec2.GetRootHash(), batches[0].Results[0].GetRootHash()) + require.Equal(t, exec3.GetRootHash(), batches[0].Results[1].GetRootHash()) +} + +func TestExecutionResultsTracker_PopDismissedResults_OnCleanConfirmedMismatchAtFirstIndex(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + + // Header has mismatch at idx=0 + header := &block.HeaderV3{ + ExecutionResults: []*block.ExecutionResult{ + { + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("different"), + HeaderNonce: 11, + RootHash: []byte("differentRoot"), + }, + }, + }, + } + err := tracker.CleanConfirmedExecutionResults(header) + require.ErrorIs(t, err, ErrExecutionResultMismatch) + + batches := tracker.PopDismissedResults() + require.Len(t, batches, 1) + require.Len(t, batches[0].Results, 1) + // Anchor should be lastNotarized (mismatch at idx=0) + require.Equal(t, lastNotarized.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Equal(t, exec1.GetRootHash(), batches[0].Results[0].GetRootHash()) +} + +func TestExecutionResultsTracker_PopDismissedResults_OnClean(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + exec2 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash2"), + HeaderNonce: 12, + RootHash: []byte("rootHash2"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + _, _ = tracker.AddExecutionResult(exec2) + + newNotarized := &block.BaseExecutionResult{ + HeaderHash: []byte("new"), + HeaderNonce: 50, + RootHash: []byte("newRoot"), + } + tracker.Clean(newNotarized) + + batches := tracker.PopDismissedResults() + require.Len(t, batches, 1) + require.Len(t, batches[0].Results, 2) + // Anchor should be the OLD lastNotarized (before Clean overwrote it) + require.Equal(t, lastNotarized.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Equal(t, exec1.GetRootHash(), batches[0].Results[0].GetRootHash()) + require.Equal(t, exec2.GetRootHash(), batches[0].Results[1].GetRootHash()) +} + +func TestExecutionResultsTracker_PopDismissedResults_ConfirmedNotDismissed(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + + // Confirm exec1 successfully - should NOT appear in dismissed + header := &block.HeaderV3{ + ExecutionResults: []*block.ExecutionResult{exec1}, + } + err := tracker.CleanConfirmedExecutionResults(header) + require.NoError(t, err) + + batches := tracker.PopDismissedResults() + require.Nil(t, batches) +} + +func TestExecutionResultsTracker_PopDismissedResults_MultipleBatches(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + exec2 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash2"), + HeaderNonce: 12, + RootHash: []byte("rootHash2"), + }, + } + exec3 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash3"), + HeaderNonce: 13, + RootHash: []byte("rootHash3"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + _, _ = tracker.AddExecutionResult(exec2) + _, _ = tracker.AddExecutionResult(exec3) + + // First dismissal: remove nonce 13 and higher - dismisses exec3 + _ = tracker.RemoveFromNonce(13) + + // Second dismissal: remove nonce 12 and higher - dismisses exec2 + _ = tracker.RemoveFromNonce(12) + + // Should get 2 separate batches with different anchors + batches := tracker.PopDismissedResults() + require.Len(t, batches, 2) + + // Batch 1: exec3 dismissed, anchor = exec2 (last pending before nonce 13) + require.Equal(t, exec2.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Len(t, batches[0].Results, 1) + require.Equal(t, exec3.GetRootHash(), batches[0].Results[0].GetRootHash()) + + // Batch 2: exec2 dismissed, anchor = exec1 (last pending before nonce 12) + require.Equal(t, exec1.GetRootHash(), batches[1].AnchorResult.GetRootHash()) + require.Len(t, batches[1].Results, 1) + require.Equal(t, exec2.GetRootHash(), batches[1].Results[0].GetRootHash()) +} + +func TestExecutionResultsTracker_PopDismissedResults_IndependentSources(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + exec1 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash1"), + HeaderNonce: 11, + RootHash: []byte("rootHash1"), + }, + } + exec2 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash2"), + HeaderNonce: 12, + RootHash: []byte("rootHash2"), + }, + } + exec3 := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash3"), + HeaderNonce: 13, + RootHash: []byte("rootHash3"), + }, + } + _, _ = tracker.AddExecutionResult(exec1) + _, _ = tracker.AddExecutionResult(exec2) + _, _ = tracker.AddExecutionResult(exec3) + + // Source 1: CleanOnConsensusReached dismisses nonce 12+ (consensus committed different hash for 12) + header := &block.HeaderV3{Nonce: 12} + tracker.CleanOnConsensusReached([]byte("different_hash"), header) + + // Source 2: RemoveFromNonce dismisses the remaining exec1 + _ = tracker.RemoveFromNonce(11) + + batches := tracker.PopDismissedResults() + require.Len(t, batches, 2) + + // Batch 0: from CleanOnConsensusReached - dismissed exec2+exec3, anchor = exec1 + require.Equal(t, exec1.GetRootHash(), batches[0].AnchorResult.GetRootHash()) + require.Len(t, batches[0].Results, 2) + require.Equal(t, exec2.GetRootHash(), batches[0].Results[0].GetRootHash()) + require.Equal(t, exec3.GetRootHash(), batches[0].Results[1].GetRootHash()) + + // Batch 1: from RemoveFromNonce - dismissed exec1, anchor = lastNotarized + require.Equal(t, lastNotarized.GetRootHash(), batches[1].AnchorResult.GetRootHash()) + require.Len(t, batches[1].Results, 1) + require.Equal(t, exec1.GetRootHash(), batches[1].Results[0].GetRootHash()) +} + +func TestExecutionResultsTracker_DismissedBatchesOverflow(t *testing.T) { + t.Parallel() + + tracker := NewExecutionResultsTracker() + + lastNotarized := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte("hash0"), + HeaderNonce: 10, + RootHash: []byte("rootHash0"), + }, + } + _ = tracker.SetLastNotarizedResult(lastNotarized) + + // Fill the queue to capacity by repeatedly adding+removing execution results + for i := 0; i < maxDismissedBatches+5; i++ { + nonce := uint64(11) + exec := &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{ + HeaderHash: []byte(fmt.Sprintf("hash_%d", i)), + HeaderNonce: nonce, + RootHash: []byte(fmt.Sprintf("root_%d", i)), + }, + } + _, _ = tracker.AddExecutionResult(exec) + _ = tracker.RemoveFromNonce(nonce) + } + + batches := tracker.PopDismissedResults() + // Should be capped at maxDismissedBatches, oldest batches dropped + require.Len(t, batches, maxDismissedBatches) + + // The surviving batches should be the last maxDismissedBatches ones (oldest dropped) + // The first surviving batch should be from iteration index 5 (0-4 were dropped) + firstSurvivingIdx := 5 + require.Equal(t, + []byte(fmt.Sprintf("root_%d", firstSurvivingIdx)), + batches[0].Results[0].GetRootHash(), + ) + // All anchors should be lastNotarized (no pending results with nonce < 11 exist) + for i, batch := range batches { + require.Equal(t, lastNotarized.GetRootHash(), batch.AnchorResult.GetRootHash(), + "batch %d should have lastNotarized as anchor", i) + } + + // The last surviving batch should be from the last iteration + lastIdx := maxDismissedBatches + 5 - 1 + require.Equal(t, + []byte(fmt.Sprintf("root_%d", lastIdx)), + batches[maxDismissedBatches-1].Results[0].GetRootHash(), + ) + + // After pop, queue should be empty + require.Nil(t, tracker.PopDismissedResults()) +} diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 13e7bb483ba..8ba40be0622 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -52,6 +52,11 @@ import ( const ( cleanupHeadersDelta = 5 waitForExecutionResultsCheckInterval = 5 * time.Millisecond + + maxGapForEWLThreshold = 250 // cap to prevent pathological configs + ewlEntriesPerResult = 2 // OldRoot + NewRoot per commit + ewlTolerancePercent = 130 // 30% tolerance over expected size + ewlThresholdMinBaseline = 10 // minimum baseline to avoid false resets on small gaps ) var log = logger.GetOrCreate("process/block") @@ -151,6 +156,7 @@ type baseProcessor struct { txExecutionOrderHandler common.TxExecutionOrderHandler aotSelector process.AOTTransactionSelector maxProposalNonceGap uint64 + ewlResetThreshold int closingNodeStarted *atomic.Bool lastPrunedHeaderHash []byte @@ -191,6 +197,8 @@ func NewBaseProcessor(arguments ArgBaseProcessor) (*baseProcessor, error) { maxProposalNonceGap = defaultMaxProposalNonceGap } + ewlResetThreshold := computeEWLResetThreshold(maxProposalNonceGap) + base := &baseProcessor{ accountsDB: arguments.AccountsDB, accountsProposal: arguments.AccountsProposal, @@ -252,6 +260,7 @@ func NewBaseProcessor(arguments ArgBaseProcessor) (*baseProcessor, error) { txExecutionOrderHandler: arguments.TxExecutionOrderHandler, aotSelector: arguments.AOTSelector, maxProposalNonceGap: maxProposalNonceGap, + ewlResetThreshold: ewlResetThreshold, closingNodeStarted: arguments.CoreComponents.ClosingNodeStarted(), } @@ -2225,7 +2234,9 @@ func (bp *baseProcessor) commitInEpoch(currentEpoch uint32, epochToCommit uint32 return nil } -// PruneStateOnRollback recreates the state tries to the root hashes indicated by the provided headers +// PruneStateOnRollback recreates the state tries to the root hashes indicated by the provided headers. +// Not called for V3 headers: shouldAllowRollback returns false for V3 in baseSync.go. +// V3 block dismissal is handled via cancelPruneForDismissedExecutionResults. func (bp *baseProcessor) PruneStateOnRollback(currHeader data.HeaderHandler, currHeaderHash []byte, prevHeader data.HeaderHandler, prevHeaderHash []byte) { for key := range bp.accountsDB { if !bp.accountsDB[key].IsPruningEnabled() { @@ -4037,6 +4048,75 @@ func (bp *baseProcessor) saveEpochStartEconomicsMetrics(epochStartMetaBlock data bp.appStatusHandler.SetStringValue(common.MetricDevRewardsInEpoch, epochStartMetaBlock.GetDevFeesInEpoch().String()) } +func (bp *baseProcessor) cleanupDismissedEWLEntries() { + dismissedBatches := bp.executionManager.PopDismissedResults() + + if len(dismissedBatches) > 0 { + totalDismissed := 0 + for _, batch := range dismissedBatches { + totalDismissed += len(batch.Results) + } + log.Debug("cleanupDismissedEWLEntries: draining dismissed batches", + "batches", len(dismissedBatches), + "totalDismissed", totalDismissed, + ) + + bp.blockProcessor.cancelPruneForDismissedExecutionResults(dismissedBatches) + bp.resetLastPrunedHeader() + } + + bp.checkEWLSizeAndReset() +} + +// checkEWLSizeAndReset is a safety net (Layer 3). If the EWL size exceeds the +// precomputed threshold, it resets pruning to prevent unbounded memory growth. +func (bp *baseProcessor) checkEWLSizeAndReset() { + for key, accountsDb := range bp.accountsDB { + if !accountsDb.IsPruningEnabled() { + continue + } + ewlSize := accountsDb.GetEvictionWaitingListSize() + if ewlSize > bp.ewlResetThreshold { + log.Warn("EWL cache size exceeds threshold, resetting pruning", + "accountsDB", key, + "ewlSize", ewlSize, + "threshold", bp.ewlResetThreshold, + ) + accountsDb.ResetPruning() + bp.resetLastPrunedHeader() + } + } +} + +func computeEWLResetThreshold(maxProposalNonceGap uint64) int { + gap := maxProposalNonceGap + if gap > maxGapForEWLThreshold { + gap = maxGapForEWLThreshold + } + expected := gap * ewlEntriesPerResult + return int(expected*ewlTolerancePercent/100) + ewlThresholdMinBaseline +} + +// cancelPruneForRootHashTransition cancels pruning for a root hash transition from prev to current. +// It issues CancelPrune for currentRootHash as NewRoot and prevRootHash as OldRoot. +func cancelPruneForRootHashTransition(accountsDb state.AccountsAdapter, prevRootHash, currentRootHash []byte) { + if len(prevRootHash) == 0 || len(currentRootHash) == 0 { + return + } + if bytes.Equal(prevRootHash, currentRootHash) { + return + } + accountsDb.CancelPrune(currentRootHash, state.NewRoot) + accountsDb.CancelPrune(prevRootHash, state.OldRoot) +} + +func (bp *baseProcessor) resetLastPrunedHeader() { + bp.mutLastPrunedHeader.Lock() + bp.lastPrunedHeaderHash = nil + bp.lastPrunedHeaderNonce = 0 + bp.mutLastPrunedHeader.Unlock() +} + // PruneTrieAsyncHeader will trigger trie pruning for header from async execution flow func (bp *baseProcessor) PruneTrieAsyncHeader() { bp.mutLastPrunedHeader.Lock() diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 722fa3e1cb2..2703e4d9afd 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -6439,3 +6439,250 @@ func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { require.Equal(t, headerHash9, bp.GetLastPrunedHash()) }) } + +func TestComputeEWLResetThreshold(t *testing.T) { + t.Parallel() + + t.Run("gap 0 should return minimum baseline", func(t *testing.T) { + t.Parallel() + // gap=0 -> expected=0*2=0, 0*130/100=0, +10 = 10 + require.Equal(t, 10, blproc.ComputeEWLResetThreshold(0)) + }) + t.Run("gap 1", func(t *testing.T) { + t.Parallel() + // gap=1 -> expected=1*2=2, 2*130/100=2, +10 = 12 + require.Equal(t, 12, blproc.ComputeEWLResetThreshold(1)) + }) + t.Run("default gap 10", func(t *testing.T) { + t.Parallel() + // gap=10 -> expected=10*2=20, 20*130/100=26, +10 = 36 + require.Equal(t, 36, blproc.ComputeEWLResetThreshold(10)) + }) + t.Run("gap above cap should be clamped", func(t *testing.T) { + t.Parallel() + // gap=500 clamped to 250 -> expected=250*2=500, 500*130/100=650, +10 = 660 + require.Equal(t, 660, blproc.ComputeEWLResetThreshold(500)) + require.Equal(t, 660, blproc.ComputeEWLResetThreshold(1000)) + }) + t.Run("gap at cap boundary", func(t *testing.T) { + t.Parallel() + require.Equal(t, 660, blproc.ComputeEWLResetThreshold(250)) + require.Equal(t, blproc.ComputeEWLResetThreshold(250), blproc.ComputeEWLResetThreshold(251)) + }) +} + +func TestCancelPruneForRootHashTransition(t *testing.T) { + t.Parallel() + + t.Run("different hashes should call CancelPrune for both", func(t *testing.T) { + t.Parallel() + cancelPruneCalls := make([]struct { + rootHash []byte + identifier state.TriePruningIdentifier + }, 0) + accountsStub := &stateMock.AccountsStub{ + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalls = append(cancelPruneCalls, struct { + rootHash []byte + identifier state.TriePruningIdentifier + }{rootHash, identifier}) + }, + } + + blproc.CancelPruneForRootHashTransition(accountsStub, []byte("prev"), []byte("curr")) + + require.Len(t, cancelPruneCalls, 2) + require.Equal(t, []byte("curr"), cancelPruneCalls[0].rootHash) + require.Equal(t, state.NewRoot, cancelPruneCalls[0].identifier) + require.Equal(t, []byte("prev"), cancelPruneCalls[1].rootHash) + require.Equal(t, state.OldRoot, cancelPruneCalls[1].identifier) + }) + t.Run("equal hashes should not call CancelPrune", func(t *testing.T) { + t.Parallel() + accountsStub := &stateMock.AccountsStub{ + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called for equal hashes") + }, + } + blproc.CancelPruneForRootHashTransition(accountsStub, []byte("same"), []byte("same")) + }) + t.Run("empty prev hash should not call CancelPrune", func(t *testing.T) { + t.Parallel() + accountsStub := &stateMock.AccountsStub{ + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called when prev is empty") + }, + } + blproc.CancelPruneForRootHashTransition(accountsStub, nil, []byte("curr")) + blproc.CancelPruneForRootHashTransition(accountsStub, []byte{}, []byte("curr")) + }) + t.Run("empty current hash should not call CancelPrune", func(t *testing.T) { + t.Parallel() + accountsStub := &stateMock.AccountsStub{ + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called when current is empty") + }, + } + blproc.CancelPruneForRootHashTransition(accountsStub, []byte("prev"), nil) + blproc.CancelPruneForRootHashTransition(accountsStub, []byte("prev"), []byte{}) + }) +} + +func TestCleanupDismissedEWLEntries(t *testing.T) { + t.Parallel() + + t.Run("empty dismissed queue should only run size check", func(t *testing.T) { + t.Parallel() + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + GetEvictionWaitingListSizeCalled: func() int { return 0 }, + } + arguments.ExecutionManager = &processMocks.ExecutionManagerMock{ + PopDismissedResultsCalled: func() []executionTrack.DismissedBatch { return nil }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + // should not panic, should not call CancelPrune + sp.CleanupDismissedEWLEntries() + }) + t.Run("dismissed batches should trigger CancelPrune and reset last pruned header", func(t *testing.T) { + t.Parallel() + + cancelPruneCalls := 0 + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalls++ + }, + GetEvictionWaitingListSizeCalled: func() int { return 0 }, + } + popCalled := false + arguments.ExecutionManager = &processMocks.ExecutionManagerMock{ + PopDismissedResultsCalled: func() []executionTrack.DismissedBatch { + if popCalled { + return nil + } + popCalled = true + return []executionTrack.DismissedBatch{ + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R2")}, + }, + }, + }, + } + }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + sp.SetLastPrunedHash([]byte("someHash")) + sp.SetLastPrunedNonce(100) + + sp.CleanupDismissedEWLEntries() + + // Two transitions: R0->R1 and R1->R2, each producing 2 CancelPrune calls = 4 total + require.Equal(t, 4, cancelPruneCalls) + // Last pruned header should be reset + require.Nil(t, sp.GetLastPrunedHash()) + }) +} + +func TestCheckEWLSizeAndReset(t *testing.T) { + t.Parallel() + + t.Run("ewl size below threshold should not trigger reset", func(t *testing.T) { + t.Parallel() + + resetCalled := false + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + GetEvictionWaitingListSizeCalled: func() int { return 5 }, + ResetPruningCalled: func() { + resetCalled = true + }, + } + arguments.ExecutionManager = &processMocks.ExecutionManagerMock{ + PopDismissedResultsCalled: func() []executionTrack.DismissedBatch { return nil }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + // default gap=10 -> threshold=36, ewlSize=5 < 36 + sp.CheckEWLSizeAndReset() + require.False(t, resetCalled) + }) + t.Run("ewl size above threshold should trigger reset and clear last pruned header", func(t *testing.T) { + t.Parallel() + + resetCalled := false + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + GetEvictionWaitingListSizeCalled: func() int { return 1000 }, + ResetPruningCalled: func() { + resetCalled = true + }, + } + arguments.ExecutionManager = &processMocks.ExecutionManagerMock{ + PopDismissedResultsCalled: func() []executionTrack.DismissedBatch { return nil }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + sp.SetLastPrunedHash([]byte("someHash")) + sp.SetLastPrunedNonce(50) + + // default gap=10 -> threshold=36, ewlSize=1000 > 36 + sp.CheckEWLSizeAndReset() + require.True(t, resetCalled) + require.Nil(t, sp.GetLastPrunedHash()) + }) + t.Run("pruning disabled should skip reset even if size would exceed", func(t *testing.T) { + t.Parallel() + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return false }, + GetEvictionWaitingListSizeCalled: func() int { + require.Fail(t, "should not check EWL size when pruning is disabled") + return 0 + }, + ResetPruningCalled: func() { + require.Fail(t, "should not reset when pruning is disabled") + }, + } + arguments.ExecutionManager = &processMocks.ExecutionManagerMock{ + PopDismissedResultsCalled: func() []executionTrack.DismissedBatch { return nil }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + sp.CheckEWLSizeAndReset() + }) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index b85134899c1..d3ee76c2490 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -1211,3 +1211,33 @@ func (bp *baseProcessor) GetLastPrunedHash() []byte { return lastPrunedHeaderHash } + +// CleanupDismissedEWLEntries - +func (bp *baseProcessor) CleanupDismissedEWLEntries() { + bp.cleanupDismissedEWLEntries() +} + +// CheckEWLSizeAndReset - +func (bp *baseProcessor) CheckEWLSizeAndReset() { + bp.checkEWLSizeAndReset() +} + +// ComputeEWLResetThreshold - +func ComputeEWLResetThreshold(maxProposalNonceGap uint64) int { + return computeEWLResetThreshold(maxProposalNonceGap) +} + +// CancelPruneForRootHashTransition - +func CancelPruneForRootHashTransition(accountsDb state.AccountsAdapter, prevRootHash, currentRootHash []byte) { + cancelPruneForRootHashTransition(accountsDb, prevRootHash, currentRootHash) +} + +// CancelPruneForDismissedExecutionResults - +func (sp *shardProcessor) CancelPruneForDismissedExecutionResults(batches []executionTrack.DismissedBatch) { + sp.cancelPruneForDismissedExecutionResults(batches) +} + +// CancelPruneForDismissedExecutionResults - +func (mp *metaProcessor) CancelPruneForDismissedExecutionResults(batches []executionTrack.DismissedBatch) { + mp.cancelPruneForDismissedExecutionResults(batches) +} diff --git a/process/block/interface.go b/process/block/interface.go index d38324cc453..53ed9422f9a 100644 --- a/process/block/interface.go +++ b/process/block/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" "github.com/multiversx/mx-chain-go/process/block/headerForBlock" ) @@ -14,6 +15,7 @@ type blockProcessor interface { removeStartOfEpochBlockDataFromPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error pruneTrieHeaderV3(header data.HeaderHandler) resetPruning() + cancelPruneForDismissedExecutionResults(batches []executionTrack.DismissedBatch) } type gasConsumedProvider interface { diff --git a/process/block/metablock.go b/process/block/metablock.go index a73ae72bf97..2d9e824d531 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -24,6 +24,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" @@ -1716,6 +1717,65 @@ func (mp *metaProcessor) resetPruning() { } } +func (mp *metaProcessor) cancelPruneForDismissedExecutionResults(batches []executionTrack.DismissedBatch) { + accountsDb := mp.accountsDB[state.UserAccountsState] + peerAccountsDb := mp.accountsDB[state.PeerAccountsState] + userPruningEnabled := accountsDb.IsPruningEnabled() + peerPruningEnabled := peerAccountsDb.IsPruningEnabled() + if !userPruningEnabled && !peerPruningEnabled { + return + } + + for _, batch := range batches { + mp.cancelPruneForDismissedBatch(accountsDb, peerAccountsDb, batch, userPruningEnabled, peerPruningEnabled) + } +} + +func (mp *metaProcessor) cancelPruneForDismissedBatch( + accountsDb state.AccountsAdapter, + peerAccountsDb state.AccountsAdapter, + batch executionTrack.DismissedBatch, + userPruningEnabled bool, + peerPruningEnabled bool, +) { + if batch.AnchorResult == nil { + return + } + + prevUserRootHash := batch.AnchorResult.GetRootHash() + prevValidatorRootHash := mp.extractValidatorStatsRootHash(batch.AnchorResult, peerPruningEnabled, "anchor") + + for _, result := range batch.Results { + currentUserRootHash := result.GetRootHash() + currentValidatorRootHash := mp.extractValidatorStatsRootHash(result, peerPruningEnabled, "result") + + if userPruningEnabled { + cancelPruneForRootHashTransition(accountsDb, prevUserRootHash, currentUserRootHash) + } + if peerPruningEnabled { + cancelPruneForRootHashTransition(peerAccountsDb, prevValidatorRootHash, currentValidatorRootHash) + } + + prevUserRootHash = currentUserRootHash + prevValidatorRootHash = currentValidatorRootHash + } +} + +func (mp *metaProcessor) extractValidatorStatsRootHash( + result data.BaseExecutionResultHandler, + peerPruningEnabled bool, + context string, +) []byte { + metaResult, ok := result.(data.BaseMetaExecutionResultHandler) + if ok { + return metaResult.GetValidatorStatsRootHash() + } + if peerPruningEnabled { + log.Warn("cancelPruneForDismissedExecutionResults: " + context + " does not implement BaseMetaExecutionResultHandler") + } + return nil +} + func (mp *metaProcessor) getPreviousExecutionResult( index int, executionResultsHandlers []data.BaseExecutionResultHandler, diff --git a/process/block/metablockProposal.go b/process/block/metablockProposal.go index 1949970b9d8..df8fa573bd8 100644 --- a/process/block/metablockProposal.go +++ b/process/block/metablockProposal.go @@ -441,6 +441,8 @@ func (mp *metaProcessor) CommitBlockProposalState(headerHandler data.HeaderHandl return process.ErrNilBlockHeader } + mp.cleanupDismissedEWLEntries() + err := mp.commitState(headerHandler) if err != nil { return err diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index fd53a5f204f..a28c8eb7592 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -5054,3 +5054,215 @@ func TestMetaProcessor_CommitBlockV3FailAfterHeadMutationShouldRestoreChainHead( "currentBlockHeaderHash should be restored to previous hash after failed V3 commit") }) } + +func TestMetaProcessor_CancelPruneForDismissedExecutionResults(t *testing.T) { + t.Parallel() + + t.Run("both pruning disabled should not call CancelPrune", func(t *testing.T) { + t.Parallel() + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return false }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called on user accounts") + }, + } + arguments.AccountsDB[state.PeerAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return false }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called on peer accounts") + }, + } + + mp, err := processBlock.NewMetaProcessor(arguments) + require.Nil(t, err) + + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + ValidatorStatsRootHash: []byte("V0"), + }, + }, + Results: []data.BaseExecutionResultHandler{ + &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + ValidatorStatsRootHash: []byte("V1"), + }, + }, + }, + }, + } + mp.CancelPruneForDismissedExecutionResults(batches) + }) + t.Run("user and peer pruning enabled should cancel prune for both", func(t *testing.T) { + t.Parallel() + + type cancelPruneCall struct { + rootHash []byte + identifier state.TriePruningIdentifier + } + userCalls := make([]cancelPruneCall, 0) + peerCalls := make([]cancelPruneCall, 0) + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + userCalls = append(userCalls, cancelPruneCall{rootHash: rootHash, identifier: identifier}) + }, + } + arguments.AccountsDB[state.PeerAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + peerCalls = append(peerCalls, cancelPruneCall{rootHash: rootHash, identifier: identifier}) + }, + } + + mp, err := processBlock.NewMetaProcessor(arguments) + require.Nil(t, err) + + // Dismissed chain: anchor(R0,V0) -> (R1,V1) -> (R2,V2) + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + ValidatorStatsRootHash: []byte("V0"), + }, + }, + Results: []data.BaseExecutionResultHandler{ + &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + ValidatorStatsRootHash: []byte("V1"), + }, + }, + &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R2")}, + ValidatorStatsRootHash: []byte("V2"), + }, + }, + }, + }, + } + mp.CancelPruneForDismissedExecutionResults(batches) + + // User accounts: 2 transitions, 2 CancelPrune calls each = 4 + require.Len(t, userCalls, 4) + require.Equal(t, []byte("R1"), userCalls[0].rootHash) + require.Equal(t, state.NewRoot, userCalls[0].identifier) + require.Equal(t, []byte("R0"), userCalls[1].rootHash) + require.Equal(t, state.OldRoot, userCalls[1].identifier) + require.Equal(t, []byte("R2"), userCalls[2].rootHash) + require.Equal(t, state.NewRoot, userCalls[2].identifier) + require.Equal(t, []byte("R1"), userCalls[3].rootHash) + require.Equal(t, state.OldRoot, userCalls[3].identifier) + + // Peer accounts: 2 transitions, 2 CancelPrune calls each = 4 + require.Len(t, peerCalls, 4) + require.Equal(t, []byte("V1"), peerCalls[0].rootHash) + require.Equal(t, state.NewRoot, peerCalls[0].identifier) + require.Equal(t, []byte("V0"), peerCalls[1].rootHash) + require.Equal(t, state.OldRoot, peerCalls[1].identifier) + require.Equal(t, []byte("V2"), peerCalls[2].rootHash) + require.Equal(t, state.NewRoot, peerCalls[2].identifier) + require.Equal(t, []byte("V1"), peerCalls[3].rootHash) + require.Equal(t, state.OldRoot, peerCalls[3].identifier) + }) + t.Run("result not implementing BaseMetaExecutionResultHandler should skip peer cancel prune", func(t *testing.T) { + t.Parallel() + + userCancelCalls := 0 + peerCancelCalls := 0 + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + userCancelCalls++ + }, + } + arguments.AccountsDB[state.PeerAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + peerCancelCalls++ + }, + } + + mp, err := processBlock.NewMetaProcessor(arguments) + require.Nil(t, err) + + // Use ExecutionResult (not MetaExecutionResult) - does NOT implement BaseMetaExecutionResultHandler + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + }, + }, + }, + } + mp.CancelPruneForDismissedExecutionResults(batches) + + // User accounts should still get CancelPrune (R0->R1 = 2 calls) + require.Equal(t, 2, userCancelCalls) + // Peer accounts should get 0 calls (validator root hashes are nil from non-meta results) + require.Equal(t, 0, peerCancelCalls) + }) + t.Run("only user pruning enabled should skip peer operations", func(t *testing.T) { + t.Parallel() + + userCancelCalls := 0 + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + userCancelCalls++ + }, + } + arguments.AccountsDB[state.PeerAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return false }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called on peer accounts when pruning disabled") + }, + } + + mp, err := processBlock.NewMetaProcessor(arguments) + require.Nil(t, err) + + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + ValidatorStatsRootHash: []byte("V0"), + }, + }, + Results: []data.BaseExecutionResultHandler{ + &block.MetaExecutionResult{ + ExecutionResult: &block.BaseMetaExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + ValidatorStatsRootHash: []byte("V1"), + }, + }, + }, + }, + } + mp.CancelPruneForDismissedExecutionResults(batches) + + require.Equal(t, 2, userCancelCalls) + }) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index d920a48d08a..f09270000dd 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/block/helpers" "github.com/multiversx/mx-chain-go/process/block/processedMb" @@ -1316,6 +1317,30 @@ func (sp *shardProcessor) resetPruning() { accountsDb.ResetPruning() } +func (sp *shardProcessor) cancelPruneForDismissedExecutionResults(batches []executionTrack.DismissedBatch) { + accountsDb := sp.accountsDB[state.UserAccountsState] + if !accountsDb.IsPruningEnabled() { + return + } + + for _, batch := range batches { + sp.cancelPruneForDismissedBatch(accountsDb, batch) + } +} + +func (sp *shardProcessor) cancelPruneForDismissedBatch(accountsDb state.AccountsAdapter, batch executionTrack.DismissedBatch) { + if batch.AnchorResult == nil { + return + } + + prevRootHash := batch.AnchorResult.GetRootHash() + for _, result := range batch.Results { + currentRootHash := result.GetRootHash() + cancelPruneForRootHashTransition(accountsDb, prevRootHash, currentRootHash) + prevRootHash = currentRootHash + } +} + func (sp *shardProcessor) getPreviousExecutionResult( index int, executionResultsHandlers []data.BaseExecutionResultHandler, diff --git a/process/block/shardblockProposal.go b/process/block/shardblockProposal.go index d49a2d7000f..d6a7e152964 100644 --- a/process/block/shardblockProposal.go +++ b/process/block/shardblockProposal.go @@ -400,6 +400,8 @@ func (sp *shardProcessor) CommitBlockProposalState(headerHandler data.HeaderHand return process.ErrNilBlockHeader } + sp.cleanupDismissedEWLEntries() + err := sp.commitState(headerHandler) if err != nil { return err diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 4ef50232c9c..6615bcb1f62 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -33,6 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" blproc "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/headerForBlock" "github.com/multiversx/mx-chain-go/process/block/processedMb" @@ -7322,3 +7323,203 @@ func TestShardProcessor_CommitBlockV3FailAfterHeadMutationShouldRestoreChainHead "currentBlockHeaderHash should be restored to previous hash after failed V3 commit") }) } + +func TestShardProcessor_CancelPruneForDismissedExecutionResults(t *testing.T) { + t.Parallel() + + t.Run("pruning disabled should not call CancelPrune", func(t *testing.T) { + t.Parallel() + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return false }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called when pruning is disabled") + }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + }, + }, + }, + } + sp.CancelPruneForDismissedExecutionResults(batches) + }) + t.Run("nil anchor should skip batch", func(t *testing.T) { + t.Parallel() + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + require.Fail(t, "CancelPrune should not be called for nil anchor batch") + }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + batches := []executionTrack.DismissedBatch{ + {AnchorResult: nil, Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + }, + }}, + } + sp.CancelPruneForDismissedExecutionResults(batches) + }) + t.Run("single batch with multiple results should cancel prune for each transition", func(t *testing.T) { + t.Parallel() + + type cancelPruneCall struct { + rootHash []byte + identifier state.TriePruningIdentifier + } + calls := make([]cancelPruneCall, 0) + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + calls = append(calls, cancelPruneCall{rootHash: rootHash, identifier: identifier}) + }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + // Dismissed chain: anchor(R0) -> R1 -> R2 -> R3 + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R2")}, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R3")}, + }, + }, + }, + } + sp.CancelPruneForDismissedExecutionResults(batches) + + // 3 transitions: R0->R1, R1->R2, R2->R3, 2 CancelPrune calls each = 6 total + require.Len(t, calls, 6) + // Transition R0->R1 + require.Equal(t, []byte("R1"), calls[0].rootHash) + require.Equal(t, state.NewRoot, calls[0].identifier) + require.Equal(t, []byte("R0"), calls[1].rootHash) + require.Equal(t, state.OldRoot, calls[1].identifier) + // Transition R1->R2 + require.Equal(t, []byte("R2"), calls[2].rootHash) + require.Equal(t, state.NewRoot, calls[2].identifier) + require.Equal(t, []byte("R1"), calls[3].rootHash) + require.Equal(t, state.OldRoot, calls[3].identifier) + // Transition R2->R3 + require.Equal(t, []byte("R3"), calls[4].rootHash) + require.Equal(t, state.NewRoot, calls[4].identifier) + require.Equal(t, []byte("R2"), calls[5].rootHash) + require.Equal(t, state.OldRoot, calls[5].identifier) + }) + t.Run("equal consecutive root hashes should be skipped", func(t *testing.T) { + t.Parallel() + + cancelPruneCalls := 0 + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalls++ + }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + // anchor(R0) -> R0 (same hash, no state change) -> R1 + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R0")}, + }, + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("R1")}, + }, + }, + }, + } + sp.CancelPruneForDismissedExecutionResults(batches) + + // R0->R0 is skipped (equal), R0->R1 produces 2 calls + require.Equal(t, 2, cancelPruneCalls) + }) + t.Run("multiple batches should all be processed", func(t *testing.T) { + t.Parallel() + + cancelPruneCalls := 0 + + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + IsPruningEnabledCalled: func() bool { return true }, + CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { + cancelPruneCalls++ + }, + } + + sp, err := blproc.NewShardProcessor(arguments) + require.Nil(t, err) + + batches := []executionTrack.DismissedBatch{ + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("A0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("A1")}, + }, + }, + }, + { + AnchorResult: &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("B0")}, + }, + Results: []data.BaseExecutionResultHandler{ + &block.ExecutionResult{ + BaseExecutionResult: &block.BaseExecutionResult{RootHash: []byte("B1")}, + }, + }, + }, + } + sp.CancelPruneForDismissedExecutionResults(batches) + + // 2 batches, 1 transition each, 2 CancelPrune calls = 4 total + require.Equal(t, 4, cancelPruneCalls) + }) +} diff --git a/process/interface.go b/process/interface.go index d402a7cbf3b..18c14ecfa59 100644 --- a/process/interface.go +++ b/process/interface.go @@ -30,6 +30,7 @@ import ( "github.com/multiversx/mx-chain-go/ntp" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/asyncExecution/cache" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/sharding" @@ -357,6 +358,7 @@ type ExecutionManager interface { RemoveAtNonceAndHigher(nonce uint64) error ResetAndResumeExecution(lastNotarizedResult data.BaseExecutionResultHandler) error RemovePendingExecutionResultsFromNonce(nonce uint64) error + PopDismissedResults() []executionTrack.DismissedBatch GetSignalProcessCompletionChan() chan uint64 Close() error IsInterfaceNil() bool @@ -1661,6 +1663,7 @@ type ExecutionResultsTracker interface { Clean(lastNotarizedResult data.BaseExecutionResultHandler) CleanConfirmedExecutionResults(header data.HeaderHandler) error CleanOnConsensusReached(headerHash []byte, header data.HeaderHandler) + PopDismissedResults() []executionTrack.DismissedBatch IsInterfaceNil() bool } diff --git a/process/transactionEvaluator/simulationAccountsDB.go b/process/transactionEvaluator/simulationAccountsDB.go index 835a64252e5..a4d9fa34ef1 100644 --- a/process/transactionEvaluator/simulationAccountsDB.go +++ b/process/transactionEvaluator/simulationAccountsDB.go @@ -6,9 +6,10 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/core/check" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // simulationAccountsDB is a wrapper over an accounts db which works read-only. write operation are disabled @@ -193,6 +194,11 @@ func (r *simulationAccountsDB) Close() error { return nil } +// GetEvictionWaitingListSize returns 0 for the simulation accounts DB +func (adb *simulationAccountsDB) GetEvictionWaitingListSize() int { + return 0 +} + // IsInterfaceNil returns true if there is no value under the interface func (r *simulationAccountsDB) IsInterfaceNil() bool { return r == nil diff --git a/state/accountsDB.go b/state/accountsDB.go index 52dcb16be87..9d57fcc06a5 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -1230,6 +1230,13 @@ func (adb *AccountsDB) CancelPrune(rootHash []byte, identifier TriePruningIdenti adb.storagePruningManager.CancelPrune(rootHash, identifier, adb.mainTrie.GetStorageManager()) } +// GetEvictionWaitingListSize returns the number of entries in the eviction waiting list cache +func (adb *AccountsDB) GetEvictionWaitingListSize() int { + adb.mutOp.RLock() + defer adb.mutOp.RUnlock() + return adb.storagePruningManager.EvictionWaitingListCacheLen() +} + // ResetPruning will reset all collected data needed for pruning func (adb *AccountsDB) ResetPruning() { adb.mutOp.Lock() diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index 915a9e9bea5..cc940c77e5e 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -308,6 +308,11 @@ func (accountsDB *accountsDBApi) GetCodeWithBlockInfo(codeHash []byte, _ common. return accountsDB.innerAccountsAdapter.GetCode(codeHash), blockInfo, nil } +// GetEvictionWaitingListSize returns 0 for the API accounts adapter +func (adb *accountsDBApi) GetEvictionWaitingListSize() int { + return 0 +} + // IsInterfaceNil returns true if there is no value under the interface func (accountsDB *accountsDBApi) IsInterfaceNil() bool { return accountsDB == nil diff --git a/state/accountsDBApiWithHistory.go b/state/accountsDBApiWithHistory.go index 048fbd44bbe..8515aa657f2 100644 --- a/state/accountsDBApiWithHistory.go +++ b/state/accountsDBApiWithHistory.go @@ -246,6 +246,11 @@ func (accountsDB *accountsDBApiWithHistory) recreateTrieUnprotected(options comm return nil } +// GetEvictionWaitingListSize returns 0 for the API accounts adapter with history +func (adb *accountsDBApiWithHistory) GetEvictionWaitingListSize() int { + return 0 +} + // IsInterfaceNil returns true if there is no value under the interface func (accountsDB *accountsDBApiWithHistory) IsInterfaceNil() bool { return accountsDB == nil diff --git a/state/interface.go b/state/interface.go index 254c68ddd44..b9c8b988f5f 100644 --- a/state/interface.go +++ b/state/interface.go @@ -85,6 +85,7 @@ type AccountsAdapter interface { PruneTrie(rootHash []byte, identifier TriePruningIdentifier, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier) ResetPruning() + GetEvictionWaitingListSize() int SnapshotState(rootHash []byte, epoch uint32) IsPruningEnabled() bool GetAllLeaves(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error @@ -179,6 +180,7 @@ type DBRemoveCacher interface { Put([]byte, common.ModifiedHashes) error Evict([]byte) (common.ModifiedHashes, error) ShouldKeepHash(hash string, identifier TriePruningIdentifier) (bool, error) + CacheLen() int Reset() IsInterfaceNil() bool Close() error @@ -196,6 +198,7 @@ type StoragePruningManager interface { MarkForEviction([]byte, []byte, common.ModifiedHashes, common.ModifiedHashes) error PruneTrie(rootHash []byte, identifier TriePruningIdentifier, tsm common.StorageManager, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier, tsm common.StorageManager) + EvictionWaitingListCacheLen() int Reset() Close() error IsInterfaceNil() bool diff --git a/state/storagePruningManager/disabled/disabledStoragePruningManager.go b/state/storagePruningManager/disabled/disabledStoragePruningManager.go index 2e10c84a7f3..89c13dc9b54 100644 --- a/state/storagePruningManager/disabled/disabledStoragePruningManager.go +++ b/state/storagePruningManager/disabled/disabledStoragePruningManager.go @@ -34,6 +34,11 @@ func (i *disabledStoragePruningManager) Close() error { return nil } +// EvictionWaitingListCacheLen returns 0 for the disabled storage pruning manager +func (d *disabledStoragePruningManager) EvictionWaitingListCacheLen() int { + return 0 +} + // IsInterfaceNil returns true if there is no value under the interface func (i *disabledStoragePruningManager) IsInterfaceNil() bool { return i == nil diff --git a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go index df91bfcc525..a3121833180 100644 --- a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go +++ b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go @@ -6,9 +6,10 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/data" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" - logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("state/evictionWaitingList") @@ -218,6 +219,13 @@ func (mewl *memoryEvictionWaitingList) Reset() { mewl.opMutex.Unlock() } +// CacheLen returns the number of entries in the cache +func (mewl *memoryEvictionWaitingList) CacheLen() int { + mewl.opMutex.RLock() + defer mewl.opMutex.RUnlock() + return len(mewl.cache) +} + // Close returns nil func (mewl *memoryEvictionWaitingList) Close() error { return nil diff --git a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go index 69ef10606d3..482d4c8c40d 100644 --- a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go +++ b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList_test.go @@ -7,9 +7,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" - "github.com/stretchr/testify/assert" ) func getDefaultArgsForMemoryEvictionWaitingList() MemoryEvictionWaitingListArgs { @@ -374,3 +375,22 @@ func TestMemoryEvictionWaitingList_Reset(t *testing.T) { assert.Equal(t, 0, len(mewl.cache)) assert.Equal(t, 0, len(mewl.reversedCache)) } + +func TestMemoryEvictionWaitingList_CacheLen(t *testing.T) { + t.Parallel() + + mewl, _ := NewMemoryEvictionWaitingList(getDefaultArgsForMemoryEvictionWaitingList()) + assert.Equal(t, 0, mewl.CacheLen()) + + _ = mewl.Put([]byte("root1"), common.ModifiedHashes{"hash1": {}}) + assert.Equal(t, 1, mewl.CacheLen()) + + _ = mewl.Put([]byte("root2"), common.ModifiedHashes{"hash2": {}}) + assert.Equal(t, 2, mewl.CacheLen()) + + _, _ = mewl.Evict([]byte("root1")) + assert.Equal(t, 1, mewl.CacheLen()) + + mewl.Reset() + assert.Equal(t, 0, mewl.CacheLen()) +} diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 2478316a02a..15dceb13ef1 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -7,10 +7,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/storagePruningManager/pruningBuffer" - logger "github.com/multiversx/mx-chain-logger-go" ) type pruningOperation byte @@ -76,6 +77,9 @@ func (spm *storagePruningManager) markForEviction( rootHash = append(rootHash, byte(identifier)) + // Evict stale entry at this key from a previously dismissed block (no-op if absent). + spm.cancelPrune(rootHash) + err := spm.dbEvictionWaitingList.Put(rootHash, hashes) if err != nil { return err @@ -234,6 +238,11 @@ func (spm *storagePruningManager) removeFromDb( return nil } +// EvictionWaitingListCacheLen returns the number of entries in the eviction waiting list cache +func (spm *storagePruningManager) EvictionWaitingListCacheLen() int { + return spm.dbEvictionWaitingList.CacheLen() +} + func (spm *storagePruningManager) Reset() { bufferedHashes := spm.pruningBuffer.RemoveAll() for _, hash := range bufferedHashes { diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index 2f656e6b556..e7795697ac3 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -294,3 +294,76 @@ func TestStoragePruningManager_Reset(t *testing.T) { trieStorage.ExitPruningBufferingMode() assert.Equal(t, 0, spm.pruningBuffer.Len()) } + +func TestStoragePruningManager_EvictBeforePut(t *testing.T) { + t.Parallel() + + ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 100, + HashesSize: 10000, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) + spm, _ := NewStoragePruningManager(ewl, 1000) + + // Simulate dismissed block: MarkForEviction from oldRoot=R0 to newRoot=R1 + dismissedOldHashes := map[string]struct{}{"old_dismissed_1": {}, "old_dismissed_2": {}} + dismissedNewHashes := map[string]struct{}{"new_dismissed_1": {}} + err := spm.MarkForEviction([]byte("R0"), []byte("R1"), dismissedOldHashes, dismissedNewHashes) + assert.Nil(t, err) + + // Verify EWL has 2 entries (R0|OldRoot and R1|NewRoot) + assert.Equal(t, 2, spm.EvictionWaitingListCacheLen()) + + // Simulate replacement block with SAME oldRoot: MarkForEviction from oldRoot=R0 to newRoot=R2 + // The evict-before-put should clear the stale R0|OldRoot entry before writing the new one + replacementOldHashes := map[string]struct{}{"old_replacement_1": {}, "old_replacement_3": {}} + replacementNewHashes := map[string]struct{}{"new_replacement_1": {}} + err = spm.MarkForEviction([]byte("R0"), []byte("R2"), replacementOldHashes, replacementNewHashes) + assert.Nil(t, err) + + // EWL should have 3 entries: R1|NewRoot (from dismissed), R0|OldRoot (replacement), R2|NewRoot (replacement) + // The stale R0|OldRoot entry from the dismissed block was evicted before the replacement's Put + assert.Equal(t, 3, spm.EvictionWaitingListCacheLen()) + + // Now simulate pruning the replacement block's old state: + // CancelPrune(R0, NewRoot) - cancel the "new" marking from the previous block + // The R0|NewRoot was set by the DISMISSED block's MarkForEviction (removeDuplicatedKeys already ran) + // PruneTrie(R0, OldRoot) - prune old state + // The key R0|OldRoot should return the REPLACEMENT's hashes, not the dismissed block's + evictedOld, errEvict := ewl.Evict(append([]byte("R0"), byte(state.OldRoot))) + assert.Nil(t, errEvict) + + // The evicted hashes must be from the replacement block, not the dismissed block + _, hasReplacementHash := evictedOld["old_replacement_1"] + assert.True(t, hasReplacementHash, "should contain replacement hashes") + _, hasDismissedHash := evictedOld["old_dismissed_1"] + assert.False(t, hasDismissedHash, "should NOT contain dismissed hashes") + + // The dismissed block's NewRoot entry (R1|NewRoot) should still be in EWL + evictedDismissedNew, errEvict2 := ewl.Evict(append([]byte("R1"), byte(state.NewRoot))) + assert.Nil(t, errEvict2) + _, hasDismissedNewHash := evictedDismissedNew["new_dismissed_1"] + assert.True(t, hasDismissedNewHash, "dismissed NewRoot entry should still exist") + + // After evicting both R0|OldRoot and R1|NewRoot, only R2|NewRoot should remain + assert.Equal(t, 1, spm.EvictionWaitingListCacheLen()) +} + +func TestStoragePruningManager_EvictionWaitingListCacheLen(t *testing.T) { + t.Parallel() + + ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 100, + HashesSize: 10000, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) + spm, _ := NewStoragePruningManager(ewl, 1000) + + assert.Equal(t, 0, spm.EvictionWaitingListCacheLen()) + + err := spm.MarkForEviction([]byte("old"), []byte("new"), + map[string]struct{}{"h1": {}}, + map[string]struct{}{"h2": {}}) + assert.Nil(t, err) + assert.Equal(t, 2, spm.EvictionWaitingListCacheLen()) +} diff --git a/testscommon/executionTrack/executionResultsTrackerStub.go b/testscommon/executionTrack/executionResultsTrackerStub.go index 237f6b865ae..47a8401503b 100644 --- a/testscommon/executionTrack/executionResultsTrackerStub.go +++ b/testscommon/executionTrack/executionResultsTrackerStub.go @@ -2,6 +2,8 @@ package executionTrack import ( "github.com/multiversx/mx-chain-core-go/data" + + execTrack "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" ) // ExecutionResultsTrackerStub is a stub implementation of the ExecutionResultsTracker interface @@ -16,6 +18,7 @@ type ExecutionResultsTrackerStub struct { CleanCalled func(lastNotarizedResult data.BaseExecutionResultHandler) CleanConfirmedExecutionResultsCalled func(header data.HeaderHandler) error CleanOnConsensusReachedCalled func(headerHash []byte, header data.HeaderHandler) + PopDismissedResultsCalled func() []execTrack.DismissedBatch } // AddExecutionResult - @@ -97,6 +100,14 @@ func (ets *ExecutionResultsTrackerStub) CleanOnConsensusReached(headerHash []byt } } +// PopDismissedResults - +func (stub *ExecutionResultsTrackerStub) PopDismissedResults() []execTrack.DismissedBatch { + if stub.PopDismissedResultsCalled != nil { + return stub.PopDismissedResultsCalled() + } + return nil +} + // IsInterfaceNil checks if the interface is nil func (ets *ExecutionResultsTrackerStub) IsInterfaceNil() bool { return ets == nil diff --git a/testscommon/processMocks/executionManagerMock.go b/testscommon/processMocks/executionManagerMock.go index 63657a99d07..f4f3e42c9f1 100644 --- a/testscommon/processMocks/executionManagerMock.go +++ b/testscommon/processMocks/executionManagerMock.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/asyncExecution/cache" + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" ) // ExecutionManagerMock is a mock implementation of the ExecutionManager interface @@ -21,6 +22,7 @@ type ExecutionManagerMock struct { GetLastNotarizedExecutionResultCalled func() (data.BaseExecutionResultHandler, error) RemovePendingExecutionResultsFromNonceCalled func(nonce uint64) error GetSignalProcessCompletionChanCalled func() chan uint64 + PopDismissedResultsCalled func() []executionTrack.DismissedBatch CloseCalled func() error } @@ -126,6 +128,14 @@ func (emm *ExecutionManagerMock) Close() error { return nil } +// PopDismissedResults - +func (emm *ExecutionManagerMock) PopDismissedResults() []executionTrack.DismissedBatch { + if emm.PopDismissedResultsCalled != nil { + return emm.PopDismissedResultsCalled() + } + return nil +} + // IsInterfaceNil - func (emm *ExecutionManagerMock) IsInterfaceNil() bool { return emm == nil diff --git a/testscommon/processMocks/executionTrackerStub.go b/testscommon/processMocks/executionTrackerStub.go index 66dc37e02b8..1e6f896cc1f 100644 --- a/testscommon/processMocks/executionTrackerStub.go +++ b/testscommon/processMocks/executionTrackerStub.go @@ -2,6 +2,8 @@ package processMocks import ( "github.com/multiversx/mx-chain-core-go/data" + + "github.com/multiversx/mx-chain-go/process/asyncExecution/executionTrack" ) // ExecutionTrackerStub - @@ -16,6 +18,16 @@ type ExecutionTrackerStub struct { CleanCalled func(lastNotarizedResult data.BaseExecutionResultHandler) CleanConfirmedExecutionResultsCalled func(header data.HeaderHandler) error CleanOnConsensusReachedCalled func(headerHash []byte, header data.HeaderHandler) + PopDismissedResultsCalled func() []executionTrack.DismissedBatch +} + +// PopDismissedResults - +func (e *ExecutionTrackerStub) PopDismissedResults() []executionTrack.DismissedBatch { + if e.PopDismissedResultsCalled != nil { + return e.PopDismissedResultsCalled() + } + + return nil } // AddExecutionResult - diff --git a/testscommon/state/accountsAdapterStub.go b/testscommon/state/accountsAdapterStub.go index 04608b50045..60e8898b5e4 100644 --- a/testscommon/state/accountsAdapterStub.go +++ b/testscommon/state/accountsAdapterStub.go @@ -4,9 +4,10 @@ import ( "context" "errors" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var errNotImplemented = errors.New("not implemented") @@ -30,6 +31,7 @@ type AccountsStub struct { PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier) ResetPruningCalled func() + GetEvictionWaitingListSizeCalled func() int SnapshotStateCalled func(rootHash []byte, epoch uint32) IsPruningEnabledCalled func() bool GetAllLeavesCalled func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, trieLeafParser common.TrieLeafParser) error @@ -284,6 +286,14 @@ func (as *AccountsStub) Close() error { return nil } +// GetEvictionWaitingListSize - +func (as *AccountsStub) GetEvictionWaitingListSize() int { + if as.GetEvictionWaitingListSizeCalled != nil { + return as.GetEvictionWaitingListSizeCalled() + } + return 0 +} + // IsInterfaceNil returns true if there is no value under the interface func (as *AccountsStub) IsInterfaceNil() bool { return as == nil diff --git a/testscommon/state/evictionWaitingListMock.go b/testscommon/state/evictionWaitingListMock.go index 84d0e31e9e7..67e5ab702ae 100644 --- a/testscommon/state/evictionWaitingListMock.go +++ b/testscommon/state/evictionWaitingListMock.go @@ -88,6 +88,13 @@ func (ewl *EvictionWaitingListMock) Reset() { ewl.OpMutex.Unlock() } +// CacheLen - +func (ewl *EvictionWaitingListMock) CacheLen() int { + ewl.OpMutex.RLock() + defer ewl.OpMutex.RUnlock() + return len(ewl.Cache) +} + // Close - func (ewl *EvictionWaitingListMock) Close() error { return nil diff --git a/testscommon/state/storagePruningManagerStub.go b/testscommon/state/storagePruningManagerStub.go index 5b91046909c..3e72ec56aec 100644 --- a/testscommon/state/storagePruningManagerStub.go +++ b/testscommon/state/storagePruningManagerStub.go @@ -7,11 +7,12 @@ import ( // StoragePruningManagerStub - type StoragePruningManagerStub struct { - MarkForEvictionCalled func(bytes []byte, bytes2 []byte, hashes common.ModifiedHashes, hashes2 common.ModifiedHashes) error - PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, tsm common.StorageManager, handler state.PruningHandler) - CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier, tsm common.StorageManager) - ResetCalled func() - CloseCalled func() error + MarkForEvictionCalled func(bytes []byte, bytes2 []byte, hashes common.ModifiedHashes, hashes2 common.ModifiedHashes) error + PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, tsm common.StorageManager, handler state.PruningHandler) + CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier, tsm common.StorageManager) + ResetCalled func() + EvictionWaitingListCacheLenCalled func() int + CloseCalled func() error } // MarkForEviction - @@ -53,6 +54,14 @@ func (stub *StoragePruningManagerStub) Close() error { return nil } +// EvictionWaitingListCacheLen - +func (stub *StoragePruningManagerStub) EvictionWaitingListCacheLen() int { + if stub.EvictionWaitingListCacheLenCalled != nil { + return stub.EvictionWaitingListCacheLenCalled() + } + return 0 +} + // IsInterfaceNil - func (stub *StoragePruningManagerStub) IsInterfaceNil() bool { return stub == nil From 85aa07ab334d3a0d653be3f84c3e0498a20850c3 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Mar 2026 18:27:44 +0200 Subject: [PATCH 18/20] remove cancelPrune from markForEviction --- .../storagePruningManager.go | 4 -- .../storagePruningManager_test.go | 53 ++++++------------- 2 files changed, 17 insertions(+), 40 deletions(-) diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 15dceb13ef1..aa51caa9e05 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -76,10 +76,6 @@ func (spm *storagePruningManager) markForEviction( } rootHash = append(rootHash, byte(identifier)) - - // Evict stale entry at this key from a previously dismissed block (no-op if absent). - spm.cancelPrune(rootHash) - err := spm.dbEvictionWaitingList.Put(rootHash, hashes) if err != nil { return err diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index e7795697ac3..0ca2df57801 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -295,7 +295,7 @@ func TestStoragePruningManager_Reset(t *testing.T) { assert.Equal(t, 0, spm.pruningBuffer.Len()) } -func TestStoragePruningManager_EvictBeforePut(t *testing.T) { +func TestStoragePruningManager_DuplicateKeyIncrementsNumReferences(t *testing.T) { t.Parallel() ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -305,48 +305,29 @@ func TestStoragePruningManager_EvictBeforePut(t *testing.T) { ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) spm, _ := NewStoragePruningManager(ewl, 1000) - // Simulate dismissed block: MarkForEviction from oldRoot=R0 to newRoot=R1 - dismissedOldHashes := map[string]struct{}{"old_dismissed_1": {}, "old_dismissed_2": {}} - dismissedNewHashes := map[string]struct{}{"new_dismissed_1": {}} - err := spm.MarkForEviction([]byte("R0"), []byte("R1"), dismissedOldHashes, dismissedNewHashes) + // Put R0|OldRoot twice (simulates delayed pruning with recycled root hash) + oldHashes1 := map[string]struct{}{"h1": {}, "h2": {}} + newHashes1 := map[string]struct{}{"h3": {}} + err := spm.MarkForEviction([]byte("R0"), []byte("R1"), oldHashes1, newHashes1) assert.Nil(t, err) - - // Verify EWL has 2 entries (R0|OldRoot and R1|NewRoot) assert.Equal(t, 2, spm.EvictionWaitingListCacheLen()) - // Simulate replacement block with SAME oldRoot: MarkForEviction from oldRoot=R0 to newRoot=R2 - // The evict-before-put should clear the stale R0|OldRoot entry before writing the new one - replacementOldHashes := map[string]struct{}{"old_replacement_1": {}, "old_replacement_3": {}} - replacementNewHashes := map[string]struct{}{"new_replacement_1": {}} - err = spm.MarkForEviction([]byte("R0"), []byte("R2"), replacementOldHashes, replacementNewHashes) + // Second MarkForEviction with same oldRoot R0 increments numReferences + oldHashes2 := map[string]struct{}{"h4": {}, "h5": {}} + newHashes2 := map[string]struct{}{"h6": {}} + err = spm.MarkForEviction([]byte("R0"), []byte("R2"), oldHashes2, newHashes2) assert.Nil(t, err) + assert.Equal(t, 3, spm.EvictionWaitingListCacheLen()) // R0|OldRoot, R1|NewRoot, R2|NewRoot - // EWL should have 3 entries: R1|NewRoot (from dismissed), R0|OldRoot (replacement), R2|NewRoot (replacement) - // The stale R0|OldRoot entry from the dismissed block was evicted before the replacement's Put - assert.Equal(t, 3, spm.EvictionWaitingListCacheLen()) - - // Now simulate pruning the replacement block's old state: - // CancelPrune(R0, NewRoot) - cancel the "new" marking from the previous block - // The R0|NewRoot was set by the DISMISSED block's MarkForEviction (removeDuplicatedKeys already ran) - // PruneTrie(R0, OldRoot) - prune old state - // The key R0|OldRoot should return the REPLACEMENT's hashes, not the dismissed block's - evictedOld, errEvict := ewl.Evict(append([]byte("R0"), byte(state.OldRoot))) + // First Evict decrements numReferences, returns empty (entry still alive) + evicted, errEvict := ewl.Evict(append([]byte("R0"), byte(state.OldRoot))) assert.Nil(t, errEvict) + assert.Equal(t, 0, len(evicted)) - // The evicted hashes must be from the replacement block, not the dismissed block - _, hasReplacementHash := evictedOld["old_replacement_1"] - assert.True(t, hasReplacementHash, "should contain replacement hashes") - _, hasDismissedHash := evictedOld["old_dismissed_1"] - assert.False(t, hasDismissedHash, "should NOT contain dismissed hashes") - - // The dismissed block's NewRoot entry (R1|NewRoot) should still be in EWL - evictedDismissedNew, errEvict2 := ewl.Evict(append([]byte("R1"), byte(state.NewRoot))) - assert.Nil(t, errEvict2) - _, hasDismissedNewHash := evictedDismissedNew["new_dismissed_1"] - assert.True(t, hasDismissedNewHash, "dismissed NewRoot entry should still exist") - - // After evicting both R0|OldRoot and R1|NewRoot, only R2|NewRoot should remain - assert.Equal(t, 1, spm.EvictionWaitingListCacheLen()) + // Second Evict removes entry and returns hashes + evicted, errEvict = ewl.Evict(append([]byte("R0"), byte(state.OldRoot))) + assert.Nil(t, errEvict) + assert.True(t, len(evicted) > 0) } func TestStoragePruningManager_EvictionWaitingListCacheLen(t *testing.T) { From 54c6ae13e3cec3f2240c825df7672a402ef77870 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Mar 2026 18:30:51 +0200 Subject: [PATCH 19/20] rename receiver --- .../disabled/disabledStoragePruningManager.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/state/storagePruningManager/disabled/disabledStoragePruningManager.go b/state/storagePruningManager/disabled/disabledStoragePruningManager.go index 89c13dc9b54..15b9412d602 100644 --- a/state/storagePruningManager/disabled/disabledStoragePruningManager.go +++ b/state/storagePruningManager/disabled/disabledStoragePruningManager.go @@ -14,23 +14,23 @@ func NewDisabledStoragePruningManager() *disabledStoragePruningManager { } // MarkForEviction does nothing for this implementation -func (i *disabledStoragePruningManager) MarkForEviction(_ []byte, _ []byte, _ common.ModifiedHashes, _ common.ModifiedHashes) error { +func (d *disabledStoragePruningManager) MarkForEviction(_ []byte, _ []byte, _ common.ModifiedHashes, _ common.ModifiedHashes) error { return nil } // PruneTrie does nothing for this implementation -func (i *disabledStoragePruningManager) PruneTrie(_ []byte, _ state.TriePruningIdentifier, _ common.StorageManager, _ state.PruningHandler) { +func (d *disabledStoragePruningManager) PruneTrie(_ []byte, _ state.TriePruningIdentifier, _ common.StorageManager, _ state.PruningHandler) { } // CancelPrune does nothing for this implementation -func (i *disabledStoragePruningManager) CancelPrune(_ []byte, _ state.TriePruningIdentifier, _ common.StorageManager) { +func (d *disabledStoragePruningManager) CancelPrune(_ []byte, _ state.TriePruningIdentifier, _ common.StorageManager) { } // Reset does nothing for this implementation -func (i *disabledStoragePruningManager) Reset() {} +func (d *disabledStoragePruningManager) Reset() {} // Close does nothing for this implementation -func (i *disabledStoragePruningManager) Close() error { +func (d *disabledStoragePruningManager) Close() error { return nil } @@ -40,6 +40,6 @@ func (d *disabledStoragePruningManager) EvictionWaitingListCacheLen() int { } // IsInterfaceNil returns true if there is no value under the interface -func (i *disabledStoragePruningManager) IsInterfaceNil() bool { - return i == nil +func (d *disabledStoragePruningManager) IsInterfaceNil() bool { + return d == nil } From e5a00fdc7fe376347feb2446a3a98e93067c3d40 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 30 Mar 2026 14:50:32 +0300 Subject: [PATCH 20/20] wait for proof before broadcasting --- consensus/broadcast/delayedBroadcast.go | 64 ++++- consensus/broadcast/delayedBroadcast_test.go | 268 +++++++++++++++++- consensus/broadcast/export.go | 6 + .../broadcast/shardChainMessenger_test.go | 5 + consensus/interface.go | 6 + consensus/spos/errors.go | 3 + consensus/spos/sposFactory/sposFactory.go | 7 + .../spos/sposFactory/sposFactory_test.go | 14 + factory/consensus/consensusComponents.go | 3 + integrationTests/testFullNode.go | 3 + integrationTests/testProcessorNode.go | 6 + .../components/testOnlyProcessingNode.go | 3 + 12 files changed, 380 insertions(+), 8 deletions(-) diff --git a/consensus/broadcast/delayedBroadcast.go b/consensus/broadcast/delayedBroadcast.go index bb2008a82e2..9558024fe46 100644 --- a/consensus/broadcast/delayedBroadcast.go +++ b/consensus/broadcast/delayedBroadcast.go @@ -36,6 +36,9 @@ type shardDataHandler interface { type ArgsDelayedBlockBroadcaster struct { InterceptorsContainer process.InterceptorsContainer HeadersSubscriber consensus.HeadersPoolSubscriber + HeadersPool consensus.HeadersPoolGetter + ProofsPool consensus.EquivalentProofsPool + EnableEpochsHandler common.EnableEpochsHandler ShardCoordinator sharding.Coordinator LeaderCacheSize uint32 ValidatorCacheSize uint32 @@ -60,6 +63,9 @@ type delayedBlockBroadcaster struct { interceptorsContainer process.InterceptorsContainer shardCoordinator sharding.Coordinator headersSubscriber consensus.HeadersPoolSubscriber + headersPool consensus.HeadersPoolGetter + proofsPool consensus.EquivalentProofsPool + enableEpochsHandler common.EnableEpochsHandler valHeaderBroadcastData []*shared.ValidatorHeaderBroadcastData valBroadcastData []*shared.DelayedBroadcastData delayedBroadcastData []*shared.DelayedBroadcastData @@ -85,6 +91,15 @@ func NewDelayedBlockBroadcaster(args *ArgsDelayedBlockBroadcaster) (*delayedBloc if check.IfNil(args.HeadersSubscriber) { return nil, spos.ErrNilHeadersSubscriber } + if check.IfNil(args.HeadersPool) { + return nil, spos.ErrNilHeadersPool + } + if check.IfNil(args.ProofsPool) { + return nil, spos.ErrNilEquivalentProofPool + } + if check.IfNil(args.EnableEpochsHandler) { + return nil, spos.ErrNilEnableEpochsHandler + } if check.IfNil(args.AlarmScheduler) { return nil, spos.ErrNilAlarmScheduler } @@ -99,6 +114,9 @@ func NewDelayedBlockBroadcaster(args *ArgsDelayedBlockBroadcaster) (*delayedBloc shardCoordinator: args.ShardCoordinator, interceptorsContainer: args.InterceptorsContainer, headersSubscriber: args.HeadersSubscriber, + headersPool: args.HeadersPool, + proofsPool: args.ProofsPool, + enableEpochsHandler: args.EnableEpochsHandler, valHeaderBroadcastData: make([]*shared.ValidatorHeaderBroadcastData, 0), valBroadcastData: make([]*shared.DelayedBroadcastData, 0), delayedBroadcastData: make([]*shared.DelayedBroadcastData, 0), @@ -110,6 +128,7 @@ func NewDelayedBlockBroadcaster(args *ArgsDelayedBlockBroadcaster) (*delayedBloc } dbb.headersSubscriber.RegisterHandler(dbb.headerReceived) + dbb.proofsPool.RegisterHandler(dbb.receivedProof) err = dbb.registerHeaderInterceptorCallback(dbb.interceptedHeader) if err != nil { return nil, err @@ -266,36 +285,67 @@ func (dbb *delayedBlockBroadcaster) Close() { } func (dbb *delayedBlockBroadcaster) headerReceived(headerHandler data.HeaderHandler, headerHash []byte) { + if headerHandler.GetShardID() != core.MetachainShardId { + return + } + + if common.IsProofsFlagEnabledForHeader(dbb.enableEpochsHandler, headerHandler) { + if !dbb.proofsPool.HasProof(headerHandler.GetShardID(), headerHash) { + return + } + } + + dbb.processMetachainHeaderBroadcast(headerHandler, headerHash) +} + +func (dbb *delayedBlockBroadcaster) receivedProof(proof data.HeaderProofHandler) { + if check.IfNil(proof) { + return + } + if proof.GetHeaderShardId() != core.MetachainShardId { + return + } + + headerHash := proof.GetHeaderHash() + header, err := dbb.headersPool.GetHeaderByHash(headerHash) + if err != nil { + log.Trace("delayedBlockBroadcaster.receivedProof: header not found in pool, will be handled by headerReceived", + "headerHash", headerHash, + ) + return + } + + dbb.processMetachainHeaderBroadcast(header, headerHash) +} + +func (dbb *delayedBlockBroadcaster) processMetachainHeaderBroadcast(headerHandler data.HeaderHandler, headerHash []byte) { dbb.mutDataForBroadcast.RLock() defer dbb.mutDataForBroadcast.RUnlock() if len(dbb.delayedBroadcastData) == 0 && len(dbb.valBroadcastData) == 0 { return } - if headerHandler.GetShardID() != core.MetachainShardId { - return - } headerHashes, dataForValidators, err := getShardDataFromMetaChainBlock( headerHandler, dbb.shardCoordinator.SelfId(), ) if err != nil { - log.Error("delayedBlockBroadcaster.headerReceived", "error", err.Error(), + log.Error("delayedBlockBroadcaster.processMetachainHeaderBroadcast", "error", err.Error(), "headerHash", headerHash, ) return } if len(headerHashes) == 0 { - log.Trace("delayedBlockBroadcaster.headerReceived: header received with no shardData for current shard", + log.Trace("delayedBlockBroadcaster.processMetachainHeaderBroadcast: no shardData for current shard", "headerHash", headerHash, ) return } - log.Trace("delayedBlockBroadcaster.headerReceived", "nbHeaderHashes", len(headerHashes)) + log.Trace("delayedBlockBroadcaster.processMetachainHeaderBroadcast", "nbHeaderHashes", len(headerHashes)) for i := range headerHashes { - log.Trace("delayedBlockBroadcaster.headerReceived", "headerHash", headerHashes[i]) + log.Trace("delayedBlockBroadcaster.processMetachainHeaderBroadcast", "headerHash", headerHashes[i]) } go dbb.scheduleValidatorBroadcast(dataForValidators) diff --git a/consensus/broadcast/delayedBroadcast_test.go b/consensus/broadcast/delayedBroadcast_test.go index 6525e12572f..2ee5b40b8ff 100644 --- a/consensus/broadcast/delayedBroadcast_test.go +++ b/consensus/broadcast/delayedBroadcast_test.go @@ -26,6 +26,8 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/pool" ) @@ -137,6 +139,9 @@ func createDefaultDelayedBroadcasterArgs() *broadcast.ArgsDelayedBlockBroadcaste ShardCoordinator: &mock.ShardCoordinatorMock{}, InterceptorsContainer: interceptorsContainer, HeadersSubscriber: headersSubscriber, + HeadersPool: headersSubscriber, + ProofsPool: &dataRetrieverMock.ProofsPoolMock{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, LeaderCacheSize: 2, ValidatorCacheSize: 2, AlarmScheduler: alarm.NewAlarmScheduler(), @@ -185,6 +190,36 @@ func TestNewDelayedBlockBroadcaster_NilAlarmSchedulerShouldErr(t *testing.T) { require.Nil(t, dbb) } +func TestNewDelayedBlockBroadcaster_NilHeadersPoolShouldErr(t *testing.T) { + t.Parallel() + + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + delayBroadcasterArgs.HeadersPool = nil + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Equal(t, spos.ErrNilHeadersPool, err) + require.Nil(t, dbb) +} + +func TestNewDelayedBlockBroadcaster_NilProofsPoolShouldErr(t *testing.T) { + t.Parallel() + + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + delayBroadcasterArgs.ProofsPool = nil + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Equal(t, spos.ErrNilEquivalentProofPool, err) + require.Nil(t, dbb) +} + +func TestNewDelayedBlockBroadcaster_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + delayBroadcasterArgs.EnableEpochsHandler = nil + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Equal(t, spos.ErrNilEnableEpochsHandler, err) + require.Nil(t, dbb) +} + func TestNewDelayedBlockBroadcasterOK(t *testing.T) { t.Parallel() @@ -194,6 +229,237 @@ func TestNewDelayedBlockBroadcasterOK(t *testing.T) { require.NotNil(t, dbb) } +func TestDelayedBlockBroadcaster_HeaderReceivedProofsEnabled_ShouldNotBroadcastWithoutProof(t *testing.T) { + t.Parallel() + + mbBroadcastCalled := atomic.Flag{} + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + delayBroadcasterArgs.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.AndromedaFlag + }, + } + delayBroadcasterArgs.ProofsPool = &dataRetrieverMock.ProofsPoolMock{ + HasProofCalled: func(shardID uint32, headerHash []byte) bool { + return false // no proof available + }, + } + + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Nil(t, err) + + err = dbb.SetBroadcastHandlers( + func(mbData map[uint32][]byte, pk []byte) error { + mbBroadcastCalled.SetValue(true) + return nil + }, + func(txData map[string][][]byte, pk []byte) error { return nil }, + func(header data.HeaderHandler, pk []byte) error { return nil }, + func(message *consensus.Message) error { return nil }, + ) + require.Nil(t, err) + + headerHash, _, miniblocksData, transactionsData := createDelayData("1") + delayedData := broadcast.CreateDelayBroadcastDataForLeader(headerHash, miniblocksData, transactionsData) + _ = dbb.SetLeaderData(delayedData) + + metaBlock := createMetaBlock() + metaBlock.Nonce = 1 // nonce > 0 so proofs flag applies + + dbb.HeaderReceived(metaBlock, []byte("meta hash")) + time.Sleep(common.ExtraDelayForBroadcastBlockInfo + common.ExtraDelayBetweenBroadcastMbsAndTxs + 100*time.Millisecond) + + assert.False(t, mbBroadcastCalled.IsSet(), "should NOT broadcast when proof is missing and proofs flag enabled") +} + +func TestDelayedBlockBroadcaster_ReceivedProof_HeaderNotInPoolShouldNotBroadcast(t *testing.T) { + t.Parallel() + + mbBroadcastCalled := atomic.Flag{} + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + delayBroadcasterArgs.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.AndromedaFlag + }, + } + delayBroadcasterArgs.HeadersPool = &pool.HeadersPoolStub{ + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + return nil, errors.New("not found") + }, + } + + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Nil(t, err) + + err = dbb.SetBroadcastHandlers( + func(mbData map[uint32][]byte, pk []byte) error { + mbBroadcastCalled.SetValue(true) + return nil + }, + func(txData map[string][][]byte, pk []byte) error { return nil }, + func(header data.HeaderHandler, pk []byte) error { return nil }, + func(message *consensus.Message) error { return nil }, + ) + require.Nil(t, err) + + headerHash, _, miniblocksData, transactionsData := createDelayData("1") + delayedData := broadcast.CreateDelayBroadcastDataForLeader(headerHash, miniblocksData, transactionsData) + _ = dbb.SetLeaderData(delayedData) + + proof := &block.HeaderProof{ + HeaderHash: []byte("meta hash"), + HeaderShardId: core.MetachainShardId, + HeaderNonce: 1, + } + dbb.ReceivedProof(proof) + time.Sleep(common.ExtraDelayForBroadcastBlockInfo + common.ExtraDelayBetweenBroadcastMbsAndTxs + 100*time.Millisecond) + + assert.False(t, mbBroadcastCalled.IsSet(), "should NOT broadcast when header is not in pool") +} + +func TestDelayedBlockBroadcaster_ReceivedProof_NonMetaShouldBeIgnored(t *testing.T) { + t.Parallel() + + mbBroadcastCalled := atomic.Flag{} + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Nil(t, err) + + err = dbb.SetBroadcastHandlers( + func(mbData map[uint32][]byte, pk []byte) error { + mbBroadcastCalled.SetValue(true) + return nil + }, + func(txData map[string][][]byte, pk []byte) error { return nil }, + func(header data.HeaderHandler, pk []byte) error { return nil }, + func(message *consensus.Message) error { return nil }, + ) + require.Nil(t, err) + + proof := &block.HeaderProof{ + HeaderHash: []byte("shard hash"), + HeaderShardId: 0, // not metachain + } + dbb.ReceivedProof(proof) + time.Sleep(50 * time.Millisecond) + + assert.False(t, mbBroadcastCalled.IsSet(), "should NOT broadcast for non-metachain proofs") +} + +func TestDelayedBlockBroadcaster_ReceivedProof_NilProofShouldNotPanic(t *testing.T) { + t.Parallel() + + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Nil(t, err) + + require.NotPanics(t, func() { + dbb.ReceivedProof(nil) + }) +} + +func TestDelayedBlockBroadcaster_HeaderReceivedProofsDisabled_ShouldBroadcastImmediately(t *testing.T) { + t.Parallel() + + mbBroadcastCalled := atomic.Flag{} + + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + // default EnableEpochsHandlerStub returns false for IsFlagEnabledInEpoch -> proofs disabled + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Nil(t, err) + + err = dbb.SetBroadcastHandlers( + func(mbData map[uint32][]byte, pk []byte) error { + mbBroadcastCalled.SetValue(true) + return nil + }, + func(txData map[string][][]byte, pk []byte) error { return nil }, + func(header data.HeaderHandler, pk []byte) error { return nil }, + func(message *consensus.Message) error { return nil }, + ) + require.Nil(t, err) + + headerHash := []byte("shard0 headerHash") + miniblocksData := map[uint32][]byte{1: []byte("miniblock data")} + transactionsData := map[string][][]byte{"txBlockBodies_0_1": {[]byte("tx0")}} + delayedData := broadcast.CreateDelayBroadcastDataForLeader(headerHash, miniblocksData, transactionsData) + _ = dbb.SetLeaderData(delayedData) + + metaBlock := createMetaBlock() + dbb.HeaderReceived(metaBlock, []byte("meta hash")) + time.Sleep(common.ExtraDelayForBroadcastBlockInfo + common.ExtraDelayBetweenBroadcastMbsAndTxs + 100*time.Millisecond) + + assert.True(t, mbBroadcastCalled.IsSet(), "should broadcast when proofs flag is disabled (backwards compat)") +} + +func TestDelayedBlockBroadcaster_HeaderArrivesFirst_ThenProofTriggersBroadcast(t *testing.T) { + t.Parallel() + + mbBroadcastCalled := atomic.Flag{} + proofAvailable := atomic.Flag{} + + delayBroadcasterArgs := createDefaultDelayedBroadcasterArgs() + delayBroadcasterArgs.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.AndromedaFlag + }, + } + delayBroadcasterArgs.ProofsPool = &dataRetrieverMock.ProofsPoolMock{ + HasProofCalled: func(shardID uint32, headerHash []byte) bool { + return proofAvailable.IsSet() + }, + } + + metaBlock := createMetaBlock() + metaBlock.Nonce = 1 + metaHash := []byte("meta hash") + + delayBroadcasterArgs.HeadersPool = &pool.HeadersPoolStub{ + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + if bytes.Equal(hash, metaHash) { + return metaBlock, nil + } + return nil, errors.New("not found") + }, + } + + dbb, err := broadcast.NewDelayedBlockBroadcaster(delayBroadcasterArgs) + require.Nil(t, err) + + err = dbb.SetBroadcastHandlers( + func(mbData map[uint32][]byte, pk []byte) error { + mbBroadcastCalled.SetValue(true) + return nil + }, + func(txData map[string][][]byte, pk []byte) error { return nil }, + func(header data.HeaderHandler, pk []byte) error { return nil }, + func(message *consensus.Message) error { return nil }, + ) + require.Nil(t, err) + + headerHash := []byte("shard0 headerHash") + miniblocksData := map[uint32][]byte{1: []byte("miniblock data")} + transactionsData := map[string][][]byte{"txBlockBodies_0_1": {[]byte("tx0")}} + delayedData := broadcast.CreateDelayBroadcastDataForLeader(headerHash, miniblocksData, transactionsData) + _ = dbb.SetLeaderData(delayedData) + + // Step 1: header arrives, no proof yet -> should NOT broadcast + dbb.HeaderReceived(metaBlock, metaHash) + time.Sleep(common.ExtraDelayForBroadcastBlockInfo + common.ExtraDelayBetweenBroadcastMbsAndTxs + 100*time.Millisecond) + assert.False(t, mbBroadcastCalled.IsSet(), "should not broadcast before proof arrives") + + // Step 2: proof arrives -> should trigger broadcast via headerReceived delegation + proofAvailable.SetValue(true) + proof := &block.HeaderProof{ + HeaderHash: metaHash, + HeaderShardId: core.MetachainShardId, + HeaderNonce: 1, + } + dbb.ReceivedProof(proof) + time.Sleep(common.ExtraDelayForBroadcastBlockInfo + common.ExtraDelayBetweenBroadcastMbsAndTxs + 100*time.Millisecond) + assert.True(t, mbBroadcastCalled.IsSet(), "should broadcast after proof arrives") +} + func TestDelayedBlockBroadcaster_HeaderReceivedNoDelayedDataRegistered(t *testing.T) { t.Parallel() @@ -387,7 +653,7 @@ func TestDelayedBlockBroadcaster_HeaderReceivedWithoutSignaturesForShardShouldNo time.Sleep(sleepTime) logOutputStr := observer.getBufferStr() - expectedLogMsg := "delayedBlockBroadcaster.headerReceived: header received with no shardData for current shard" + expectedLogMsg := "delayedBlockBroadcaster.processMetachainHeaderBroadcast: no shardData for current shard" require.Contains(t, logOutputStr, expectedLogMsg) require.Contains(t, logOutputStr, fmt.Sprintf("headerHash = %s", hex.EncodeToString(headerHash))) diff --git a/consensus/broadcast/export.go b/consensus/broadcast/export.go index 1fed8127558..7addb7a4c48 100644 --- a/consensus/broadcast/export.go +++ b/consensus/broadcast/export.go @@ -81,6 +81,12 @@ func (dbb *delayedBlockBroadcaster) HeaderReceived(headerHandler data.HeaderHand dbb.headerReceived(headerHandler, hash) } +// ReceivedProof is the callback registered on the proofs pool +// to be called when a proof is added to the proofs pool +func (dbb *delayedBlockBroadcaster) ReceivedProof(proof data.HeaderProofHandler) { + dbb.receivedProof(proof) +} + // GetValidatorBroadcastData returns the set validator delayed broadcast data func (dbb *delayedBlockBroadcaster) GetValidatorBroadcastData() []*shared.DelayedBroadcastData { dbb.mutDataForBroadcast.RLock() diff --git a/consensus/broadcast/shardChainMessenger_test.go b/consensus/broadcast/shardChainMessenger_test.go index 7846ba12b0d..0b0d55d62b1 100644 --- a/consensus/broadcast/shardChainMessenger_test.go +++ b/consensus/broadcast/shardChainMessenger_test.go @@ -26,6 +26,8 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" ) @@ -572,6 +574,9 @@ func TestShardChainMessenger_BroadcastBlockDataLeaderShouldTriggerWaitingDelayed argsDelayedBroadcaster := broadcast.ArgsDelayedBlockBroadcaster{ InterceptorsContainer: args.InterceptorsContainer, HeadersSubscriber: args.HeadersSubscriber, + HeadersPool: &pool.HeadersPoolStub{}, + ProofsPool: &dataRetrieverMock.ProofsPoolMock{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, ShardCoordinator: args.ShardCoordinator, LeaderCacheSize: args.MaxDelayCacheSize, ValidatorCacheSize: args.MaxDelayCacheSize, diff --git a/consensus/interface.go b/consensus/interface.go index 92ae90fca36..266d2ff296f 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -115,6 +115,12 @@ type HeadersPoolSubscriber interface { IsInterfaceNil() bool } +// HeadersPoolGetter can retrieve a header by its hash from the headers pool +type HeadersPoolGetter interface { + GetHeaderByHash(hash []byte) (data.HeaderHandler, error) + IsInterfaceNil() bool +} + // PeerHonestyHandler defines the behaivour of a component able to handle/monitor the peer honesty of nodes which are // participating in consensus type PeerHonestyHandler interface { diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index 8f7601bb9f9..02eb2b36dea 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -58,6 +58,9 @@ var ErrNilBroadcastMessenger = errors.New("broadcast messenger is nil") // ErrNilHeadersSubscriber is raised when a valid headers subscriber is expected but nil is provided var ErrNilHeadersSubscriber = errors.New("headers subscriber is nil") +// ErrNilHeadersPool is raised when a valid headers pool is expected but nil is provided +var ErrNilHeadersPool = errors.New("headers pool is nil") + // ErrNilAlarmScheduler is raised when a valid alarm scheduler is expected but nil is provided var ErrNilAlarmScheduler = errors.New("alarm scheduler is nil") diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index 99f0cf682eb..6f2900c327b 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/broadcast" "github.com/multiversx/mx-chain-go/consensus/spos" @@ -33,6 +34,9 @@ func GetBroadcastMessenger( shardCoordinator sharding.Coordinator, peerSignatureHandler crypto.PeerSignatureHandler, headersSubscriber consensus.HeadersPoolSubscriber, + headersPool consensus.HeadersPoolGetter, + proofsPool consensus.EquivalentProofsPool, + enableEpochsHandler common.EnableEpochsHandler, interceptorsContainer process.InterceptorsContainer, alarmScheduler core.TimersScheduler, keysHandler consensus.KeysHandler, @@ -45,6 +49,9 @@ func GetBroadcastMessenger( dbbArgs := &broadcast.ArgsDelayedBlockBroadcaster{ InterceptorsContainer: interceptorsContainer, HeadersSubscriber: headersSubscriber, + HeadersPool: headersPool, + ProofsPool: proofsPool, + EnableEpochsHandler: enableEpochsHandler, ShardCoordinator: shardCoordinator, LeaderCacheSize: maxDelayCacheSize, ValidatorCacheSize: maxDelayCacheSize, diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 1f122884530..fb479babd98 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -12,6 +12,8 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/pool" @@ -57,6 +59,9 @@ func TestGetBroadcastMessenger_ShardShouldWork(t *testing.T) { shardCoord, peerSigHandler, headersSubscriber, + headersSubscriber, + &dataRetrieverMock.ProofsPoolMock{}, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, interceptosContainer, alarmSchedulerStub, &testscommon.KeysHandlerStub{}, @@ -88,6 +93,9 @@ func TestGetBroadcastMessenger_MetachainShouldWork(t *testing.T) { shardCoord, peerSigHandler, headersSubscriber, + headersSubscriber, + &dataRetrieverMock.ProofsPoolMock{}, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, interceptosContainer, alarmSchedulerStub, &testscommon.KeysHandlerStub{}, @@ -111,6 +119,9 @@ func TestGetBroadcastMessenger_NilShardCoordinatorShouldErr(t *testing.T) { nil, nil, headersSubscriber, + headersSubscriber, + &dataRetrieverMock.ProofsPoolMock{}, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, interceptosContainer, alarmSchedulerStub, &testscommon.KeysHandlerStub{}, @@ -138,6 +149,9 @@ func TestGetBroadcastMessenger_InvalidShardIdShouldErr(t *testing.T) { shardCoord, nil, headersSubscriber, + headersSubscriber, + &dataRetrieverMock.ProofsPoolMock{}, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, interceptosContainer, alarmSchedulerStub, &testscommon.KeysHandlerStub{}, diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 7410a695a30..8862fe21de6 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -161,6 +161,9 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { ccf.processComponents.ShardCoordinator(), ccf.cryptoComponents.PeerSignatureHandler(), ccf.dataComponents.Datapool().Headers(), + ccf.dataComponents.Datapool().Headers(), + ccf.dataComponents.Datapool().Proofs(), + ccf.coreComponents.EnableEpochsHandler(), ccf.processComponents.InterceptorsContainer(), ccf.coreComponents.AlarmScheduler(), ccf.cryptoComponents.KeysHandler(), diff --git a/integrationTests/testFullNode.go b/integrationTests/testFullNode.go index b97f7755c3e..fdce2e36300 100644 --- a/integrationTests/testFullNode.go +++ b/integrationTests/testFullNode.go @@ -394,6 +394,9 @@ func (tpn *TestFullNode) initTestNodeWithArgs(args ArgTestProcessorNode, fullArg tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), + tpn.DataPool.Headers(), + tpn.DataPool.Proofs(), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 0a4f06b6d14..b63908e027a 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -957,6 +957,9 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), + tpn.DataPool.Headers(), + tpn.DataPool.Proofs(), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( @@ -1187,6 +1190,9 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), + tpn.DataPool.Headers(), + tpn.DataPool.Proofs(), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( diff --git a/node/chainSimulator/components/testOnlyProcessingNode.go b/node/chainSimulator/components/testOnlyProcessingNode.go index 8f497b73db3..3360e56a6bc 100644 --- a/node/chainSimulator/components/testOnlyProcessingNode.go +++ b/node/chainSimulator/components/testOnlyProcessingNode.go @@ -374,6 +374,9 @@ func (node *testOnlyProcessingNode) createBroadcastMessenger() error { node.ProcessComponentsHolder.ShardCoordinator(), node.CryptoComponentsHolder.PeerSignatureHandler(), node.DataComponentsHolder.Datapool().Headers(), + node.DataComponentsHolder.Datapool().Headers(), + node.DataComponentsHolder.Datapool().Proofs(), + node.CoreComponentsHolder.EnableEpochsHandler(), node.ProcessComponentsHolder.InterceptorsContainer(), node.CoreComponentsHolder.AlarmScheduler(), node.CryptoComponentsHolder.KeysHandler(),