Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions integrationTests/mock/blockProcessorMock.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ type BlockProcessorMock struct {
) error
OnExecutedBlockCalled func(header data.HeaderHandler, rootHash []byte) error
ProposedDirectSentTransactionsToBroadcastCalled func(proposedBody data.BodyHandler) map[string][][]byte
PruneTrieAsyncHeaderCalled func(header data.HeaderHandler)
}

// ProcessBlock mocks processing a block
Expand Down Expand Up @@ -231,6 +232,13 @@ func (bpm *BlockProcessorMock) RemoveHeaderFromPool(headerHash []byte) {
}
}

// PruneTrieAsyncHeader -
func (bpm *BlockProcessorMock) PruneTrieAsyncHeader(header data.HeaderHandler) {
if bpm.PruneTrieAsyncHeaderCalled != nil {
bpm.PruneTrieAsyncHeaderCalled(header)
}
}

// VerifyBlockProposal -
func (bpm *BlockProcessorMock) VerifyBlockProposal(
headerHandler data.HeaderHandler,
Expand Down
2 changes: 2 additions & 0 deletions process/asyncExecution/headersExecutor.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,8 @@ func (he *headersExecutor) process(pair cache.HeaderBodyPair) error {
return nil
}

he.blockProcessor.PruneTrieAsyncHeader(he.blockChain.GetCurrentBlockHeader())

he.blockChain.SetFinalBlockInfo(
executionResult.GetHeaderNonce(),
executionResult.GetHeaderHash(),
Expand Down
1 change: 1 addition & 0 deletions process/asyncExecution/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,6 @@ type BlockProcessor interface {
ProcessBlockProposal(header data.HeaderHandler, headerHash []byte, body data.BodyHandler) (data.BaseExecutionResultHandler, error)
CommitBlockProposalState(headerHandler data.HeaderHandler) error
RevertBlockProposalState()
PruneTrieAsyncHeader(header data.HeaderHandler)
IsInterfaceNil() bool
}
49 changes: 49 additions & 0 deletions process/block/baseProcess.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,9 @@ type baseProcessor struct {
aotSelector process.AOTTransactionSelector
maxProposalNonceGap uint64
closingNodeStarted *atomic.Bool

lastPrunedHeaderNonce uint64
mutLastPrunedHeader sync.RWMutex
}

type bootStorerDataArgs struct {
Expand Down Expand Up @@ -4032,3 +4035,49 @@ func (bp *baseProcessor) saveEpochStartEconomicsMetrics(epochStartMetaBlock data
bp.appStatusHandler.SetStringValue(common.MetricTotalFees, epochStartMetaBlock.GetAccumulatedFeesInEpoch().String())
bp.appStatusHandler.SetStringValue(common.MetricDevRewardsInEpoch, epochStartMetaBlock.GetDevFeesInEpoch().String())
}

// PruneTrieAsyncHeader will trigger trie pruning for header from async execution flow
func (bp *baseProcessor) PruneTrieAsyncHeader(
header data.HeaderHandler,
) {
bp.mutLastPrunedHeader.Lock()
defer bp.mutLastPrunedHeader.Unlock()

if bp.lastPrunedHeaderNonce == 0 {
// last pruned header nonce not set, trigger prune trie for the provided header
bp.blockProcessor.pruneTrieHeaderV3(header)
bp.lastPrunedHeaderNonce = header.GetNonce()
return
}

if header.GetNonce() <= bp.lastPrunedHeaderNonce {
return
}

// prune trie for intermediate headers
for nonce := bp.lastPrunedHeaderNonce + 1; nonce < header.GetNonce(); nonce++ {
// headers pool is cleaned on consensus flow based on last execution result
// included on the committed header (plus some delta), so intermediate header
// should be available in pool, since trie prunning is triggered from
// execution flow; if there are no included blocks from execution flow
// (and not prunning triggerd) headers will not be removed from pool
intermHeader, _, err := process.GetHeaderWithNonce(
Comment on lines +4061 to +4064
Copy link

Copilot AI Mar 25, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Spelling: several occurrences of "prunning" / "triggerd" in these comments (and the warn log below in this function) should be "pruning" / "triggered" for consistency and searchability.

Copilot uses AI. Check for mistakes.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you get the headers by hash? (revert traversing and then go forward

nonce,
header.GetShardID(),
bp.dataPool.Headers(),
bp.marshalizer,
bp.store,
bp.uint64Converter,
)
if err != nil {
log.Warn("failed to get intermediate header for prunning", "error", err)
continue
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe in case of error here, should we just cancel prune for this one?
but then that header might have had more execution results
maybe simpler is just to clean up the eviction wait list and then it acts like on reset.

cc: @BeniaminDrasovean

}

bp.blockProcessor.pruneTrieHeaderV3(intermHeader)
}

// prune trie for the provided header
bp.blockProcessor.pruneTrieHeaderV3(header)
bp.lastPrunedHeaderNonce = header.GetNonce()
Comment on lines +4078 to +4082
Copy link

Copilot AI Mar 25, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If pruning an intermediate nonce fails (e.g. header not found and the loop continues), lastPrunedHeaderNonce is still advanced to the provided header’s nonce at the end. This permanently skips the failed intermediate nonce(s) and they will never be retried in later calls, which can lead to missing trie pruning. Consider only advancing lastPrunedHeaderNonce up to the last successfully pruned nonce, or track/retry failures instead of skipping them permanently.

Copilot uses AI. Check for mistakes.
}
200 changes: 200 additions & 0 deletions process/block/baseProcess_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5904,3 +5904,203 @@ func TestBaseProcessor_WaitForExecutionResultsVerification(t *testing.T) {
require.Equal(t, int32(1), callCount.Load())
})
}

func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) {
t.Parallel()

t.Run("last pruned header not set, should trigger provided header", func(t *testing.T) {
t.Parallel()

cancelPruneCalled := false
pruneTrieCalled := false

arguments := CreateMockArguments(createComponentHolderMocks())
arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{
IsPruningEnabledCalled: func() bool {
return true
},
CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) {
cancelPruneCalled = true
},
PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) {
pruneTrieCalled = true
},
}
bp, err := blproc.NewShardProcessor(arguments)
require.Nil(t, err)

require.Equal(t, uint64(0), bp.GetLastPrunedNonce())

rootHash1 := []byte("rootHash1")

executionResultsHandlers := []data.BaseExecutionResultHandler{
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: rootHash1,
},
},
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: []byte("some other root hash"),
},
},
}
header1 := &block.HeaderV3{
Nonce: 10,
}
_ = header1.SetExecutionResultsHandlers(executionResultsHandlers)
bp.PruneTrieAsyncHeader(header1)

require.True(t, cancelPruneCalled)
require.True(t, pruneTrieCalled)

require.Equal(t, uint64(10), bp.GetLastPrunedNonce())
})

t.Run("header nonce lower than last pruned header, should not trigger", func(t *testing.T) {
t.Parallel()

cancelPruneCalled := false
pruneTrieCalled := false

arguments := CreateMockArguments(createComponentHolderMocks())
arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{
IsPruningEnabledCalled: func() bool {
return true
},
CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) {
cancelPruneCalled = true
},
PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) {
pruneTrieCalled = true
},
}
bp, err := blproc.NewShardProcessor(arguments)
require.Nil(t, err)

bp.SetLastPrunedNonce(10)

header2 := &block.HeaderV3{
Nonce: 9,
}
bp.PruneTrieAsyncHeader(header2)
require.False(t, cancelPruneCalled)
require.False(t, pruneTrieCalled)

require.Equal(t, uint64(10), bp.GetLastPrunedNonce())
})

t.Run("should trigger multiple times for intermediate headers", func(t *testing.T) {
t.Parallel()

cancelPruneCalled := 0
pruneTrieCalled := 0

coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks()
arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents)

arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{
IsPruningEnabledCalled: func() bool {
return true
},
CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) {
cancelPruneCalled++
},
PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) {
pruneTrieCalled++
},
}

rootHash1 := []byte("rootHash1")
executionResultsHandlers := []data.BaseExecutionResultHandler{
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: rootHash1,
},
},
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: []byte("some other root hash"),
},
},
}
header1 := &block.HeaderV3{
Nonce: 8,
LastExecutionResult: &block.ExecutionResultInfo{},
}
_ = header1.SetExecutionResultsHandlers(executionResultsHandlers)

rootHash2 := []byte("rootHash2")
executionResultsHandlers = []data.BaseExecutionResultHandler{
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: rootHash2,
},
},
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: []byte("some other root hash6"),
},
},
}
header2 := &block.HeaderV3{
Nonce: 9,
}
_ = header2.SetExecutionResultsHandlers(executionResultsHandlers)

headerCalls := 0
headerHashCalls := 0
headersPool := &mock.HeadersCacherStub{
GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) {
if headerCalls == 0 {
headerCalls++
return []data.HeaderHandler{header2}, [][]byte{[]byte("hash1")}, nil
}

return []data.HeaderHandler{}, [][]byte{}, nil
},
GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) {
if headerHashCalls == 0 {
headerHashCalls++
return header1, nil
}
return header2, nil
},
}
dataPool := initDataPool()
dataPool.HeadersCalled = func() dataRetriever.HeadersPool {
return headersPool
}
dataComponents.DataPool = dataPool

bp, err := blproc.NewShardProcessor(arguments)
require.Nil(t, err)

bp.SetLastPrunedNonce(8)

rootHash3 := []byte("rootHash3")

executionResultsHandlers = []data.BaseExecutionResultHandler{
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: rootHash3,
},
},
&block.ExecutionResult{
BaseExecutionResult: &block.BaseExecutionResult{
RootHash: []byte("some other root hash2"),
},
},
}
header3 := &block.HeaderV3{
Nonce: 10,
}
_ = header1.SetExecutionResultsHandlers(executionResultsHandlers)
Copy link

Copilot AI Mar 25, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In this test, execution results are set on header1 (nonce 8), but the method under test is called with header3 (nonce 10). This leaves header3 without execution results handlers, so PruneTrieAsyncHeader(header3) may not trigger pruning as expected and the assertions can become incorrect/flaky. Set the execution results handlers on header3 (or call pruning on the header you populated).

Suggested change
_ = header1.SetExecutionResultsHandlers(executionResultsHandlers)
_ = header3.SetExecutionResultsHandlers(executionResultsHandlers)

Copilot uses AI. Check for mistakes.
bp.PruneTrieAsyncHeader(header3)

require.Equal(t, 2, cancelPruneCalled)
require.Equal(t, 2, pruneTrieCalled)

require.Equal(t, uint64(10), bp.GetLastPrunedNonce())
})
}
20 changes: 18 additions & 2 deletions process/block/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ func (sp *shardProcessor) UpdateStateStorage(finalHeaders []data.HeaderHandler,
}

// PruneTrieHeaderV3 -
func (sp *shardProcessor) PruneTrieHeaderV3(executionResultsHandlers []data.BaseExecutionResultHandler) {
sp.pruneTrieHeaderV3(executionResultsHandlers)
func (sp *shardProcessor) PruneTrieHeaderV3(header data.HeaderHandler) {
sp.pruneTrieHeaderV3(header)
}

// NewShardProcessorEmptyWith3shards -
Expand Down Expand Up @@ -1188,3 +1188,19 @@ func (bp *baseProcessor) WaitForExecutionResultsVerification(
) error {
return bp.waitForExecutionResultsVerification(header, haveTime)
}

// SetLastPrunedNonce -
func (bp *baseProcessor) SetLastPrunedNonce(nonce uint64) {
bp.mutLastPrunedHeader.Lock()
bp.lastPrunedHeaderNonce = nonce
bp.mutLastPrunedHeader.Unlock()
}

// GetLastPrunedNonce -
func (bp *baseProcessor) GetLastPrunedNonce() uint64 {
bp.mutLastPrunedHeader.RLock()
lastPrunedNonce := bp.lastPrunedHeaderNonce
bp.mutLastPrunedHeader.RUnlock()

return lastPrunedNonce
}
1 change: 1 addition & 0 deletions process/block/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

type blockProcessor interface {
removeStartOfEpochBlockDataFromPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error
pruneTrieHeaderV3(header data.HeaderHandler)
}

type gasConsumedProvider interface {
Expand Down
15 changes: 9 additions & 6 deletions process/block/metablock.go
Original file line number Diff line number Diff line change
Expand Up @@ -1634,7 +1634,7 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock
mp.accountsDB[state.PeerAccountsState],
)
} else {
mp.pruneTriesHeaderV3(metaBlock, prevMetaBlock)
mp.pruneTrieHeaderV3(metaBlock)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this still supposed to be here?

}

outportFinalizedHeaderHash := metaBlockHash
Expand All @@ -1646,9 +1646,8 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock
mp.blockChain.SetFinalBlockInfo(metaBlock.GetNonce(), metaBlockHash, rootHash)
}

func (mp *metaProcessor) pruneTriesHeaderV3(
metaBlock data.MetaHeaderHandler,
prevMetaBlock data.MetaHeaderHandler,
func (mp *metaProcessor) pruneTrieHeaderV3(
metaBlock data.HeaderHandler,
) {
accountsDb := mp.accountsDB[state.UserAccountsState]
peerAccountsDb := mp.accountsDB[state.PeerAccountsState]
Expand All @@ -1666,7 +1665,7 @@ func (mp *metaProcessor) pruneTriesHeaderV3(
"currentExecResType", fmt.Sprintf("%T", execResults[i]))
continue
}
prevExecRes, err := mp.getPreviousExecutionResult(i, execResults, prevMetaBlock, prevMetaBlockHash)
prevExecRes, err := mp.getPreviousExecutionResult(i, execResults, prevMetaBlockHash)
if err != nil {
log.Warn("failed to get previous execution result for pruning",
"err", err,
Expand Down Expand Up @@ -1708,7 +1707,6 @@ func (mp *metaProcessor) pruneTriesHeaderV3(
func (mp *metaProcessor) getPreviousExecutionResult(
index int,
executionResultsHandlers []data.BaseExecutionResultHandler,
prevMetaBlock data.MetaHeaderHandler,
prevMetaBlockHash []byte,
) (data.BaseMetaExecutionResultHandler, error) {
if index > 0 {
Expand All @@ -1719,6 +1717,11 @@ func (mp *metaProcessor) getPreviousExecutionResult(
return metaExecRes, nil
}

prevMetaBlock, err := process.GetMetaHeader(prevMetaBlockHash, mp.dataPool.Headers(), mp.marshalizer, mp.store)
if err != nil {
return nil, err
}

if prevMetaBlock.IsHeaderV3() {
lastExecRes := prevMetaBlock.GetLastExecutionResultHandler()
if check.IfNil(lastExecRes) {
Expand Down
Loading
Loading