-
Notifications
You must be signed in to change notification settings - Fork 226
Trie prunning async trigger #7800
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: feat/supernova-async-exec
Are you sure you want to change the base?
Changes from 5 commits
95ba5fe
1be0302
322b33c
247357f
e77ce87
1714b93
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -152,6 +152,9 @@ type baseProcessor struct { | |
| aotSelector process.AOTTransactionSelector | ||
| maxProposalNonceGap uint64 | ||
| closingNodeStarted *atomic.Bool | ||
|
|
||
| lastPrunedHeaderNonce uint64 | ||
| mutLastPrunedHeader sync.RWMutex | ||
| } | ||
|
|
||
| type bootStorerDataArgs struct { | ||
|
|
@@ -4032,3 +4035,49 @@ func (bp *baseProcessor) saveEpochStartEconomicsMetrics(epochStartMetaBlock data | |
| bp.appStatusHandler.SetStringValue(common.MetricTotalFees, epochStartMetaBlock.GetAccumulatedFeesInEpoch().String()) | ||
| bp.appStatusHandler.SetStringValue(common.MetricDevRewardsInEpoch, epochStartMetaBlock.GetDevFeesInEpoch().String()) | ||
| } | ||
|
|
||
| // PruneTrieAsyncHeader will trigger trie pruning for header from async execution flow | ||
| func (bp *baseProcessor) PruneTrieAsyncHeader( | ||
| header data.HeaderHandler, | ||
| ) { | ||
| bp.mutLastPrunedHeader.Lock() | ||
| defer bp.mutLastPrunedHeader.Unlock() | ||
|
|
||
| if bp.lastPrunedHeaderNonce == 0 { | ||
| // last pruned header nonce not set, trigger prune trie for the provided header | ||
| bp.blockProcessor.pruneTrieHeaderV3(header) | ||
| bp.lastPrunedHeaderNonce = header.GetNonce() | ||
| return | ||
| } | ||
|
|
||
| if header.GetNonce() <= bp.lastPrunedHeaderNonce { | ||
| return | ||
| } | ||
|
|
||
| // prune trie for intermediate headers | ||
| for nonce := bp.lastPrunedHeaderNonce + 1; nonce < header.GetNonce(); nonce++ { | ||
| // headers pool is cleaned on consensus flow based on last execution result | ||
| // included on the committed header (plus some delta), so intermediate header | ||
| // should be available in pool, since trie prunning is triggered from | ||
| // execution flow; if there are no included blocks from execution flow | ||
| // (and not prunning triggerd) headers will not be removed from pool | ||
| intermHeader, _, err := process.GetHeaderWithNonce( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can you get the headers by hash? (revert traversing and then go forward |
||
| nonce, | ||
| header.GetShardID(), | ||
| bp.dataPool.Headers(), | ||
| bp.marshalizer, | ||
| bp.store, | ||
| bp.uint64Converter, | ||
| ) | ||
| if err != nil { | ||
| log.Warn("failed to get intermediate header for prunning", "error", err) | ||
| continue | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe in case of error here, should we just cancel prune for this one? |
||
| } | ||
|
|
||
| bp.blockProcessor.pruneTrieHeaderV3(intermHeader) | ||
| } | ||
|
|
||
| // prune trie for the provided header | ||
| bp.blockProcessor.pruneTrieHeaderV3(header) | ||
| bp.lastPrunedHeaderNonce = header.GetNonce() | ||
|
Comment on lines
+4078
to
+4082
|
||
| } | ||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -5904,3 +5904,203 @@ func TestBaseProcessor_WaitForExecutionResultsVerification(t *testing.T) { | |||||
| require.Equal(t, int32(1), callCount.Load()) | ||||||
| }) | ||||||
| } | ||||||
|
|
||||||
| func TestBaseProcessor_PruneTrieAsyncHeader(t *testing.T) { | ||||||
| t.Parallel() | ||||||
|
|
||||||
| t.Run("last pruned header not set, should trigger provided header", func(t *testing.T) { | ||||||
| t.Parallel() | ||||||
|
|
||||||
| cancelPruneCalled := false | ||||||
| pruneTrieCalled := false | ||||||
|
|
||||||
| arguments := CreateMockArguments(createComponentHolderMocks()) | ||||||
| arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ | ||||||
| IsPruningEnabledCalled: func() bool { | ||||||
| return true | ||||||
| }, | ||||||
| CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { | ||||||
| cancelPruneCalled = true | ||||||
| }, | ||||||
| PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { | ||||||
| pruneTrieCalled = true | ||||||
| }, | ||||||
| } | ||||||
| bp, err := blproc.NewShardProcessor(arguments) | ||||||
| require.Nil(t, err) | ||||||
|
|
||||||
| require.Equal(t, uint64(0), bp.GetLastPrunedNonce()) | ||||||
|
|
||||||
| rootHash1 := []byte("rootHash1") | ||||||
|
|
||||||
| executionResultsHandlers := []data.BaseExecutionResultHandler{ | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: rootHash1, | ||||||
| }, | ||||||
| }, | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: []byte("some other root hash"), | ||||||
| }, | ||||||
| }, | ||||||
| } | ||||||
| header1 := &block.HeaderV3{ | ||||||
| Nonce: 10, | ||||||
| } | ||||||
| _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) | ||||||
| bp.PruneTrieAsyncHeader(header1) | ||||||
|
|
||||||
| require.True(t, cancelPruneCalled) | ||||||
| require.True(t, pruneTrieCalled) | ||||||
|
|
||||||
| require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) | ||||||
| }) | ||||||
|
|
||||||
| t.Run("header nonce lower than last pruned header, should not trigger", func(t *testing.T) { | ||||||
| t.Parallel() | ||||||
|
|
||||||
| cancelPruneCalled := false | ||||||
| pruneTrieCalled := false | ||||||
|
|
||||||
| arguments := CreateMockArguments(createComponentHolderMocks()) | ||||||
| arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ | ||||||
| IsPruningEnabledCalled: func() bool { | ||||||
| return true | ||||||
| }, | ||||||
| CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { | ||||||
| cancelPruneCalled = true | ||||||
| }, | ||||||
| PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { | ||||||
| pruneTrieCalled = true | ||||||
| }, | ||||||
| } | ||||||
| bp, err := blproc.NewShardProcessor(arguments) | ||||||
| require.Nil(t, err) | ||||||
|
|
||||||
| bp.SetLastPrunedNonce(10) | ||||||
|
|
||||||
| header2 := &block.HeaderV3{ | ||||||
| Nonce: 9, | ||||||
| } | ||||||
| bp.PruneTrieAsyncHeader(header2) | ||||||
| require.False(t, cancelPruneCalled) | ||||||
| require.False(t, pruneTrieCalled) | ||||||
|
|
||||||
| require.Equal(t, uint64(10), bp.GetLastPrunedNonce()) | ||||||
| }) | ||||||
|
|
||||||
| t.Run("should trigger multiple times for intermediate headers", func(t *testing.T) { | ||||||
| t.Parallel() | ||||||
|
|
||||||
| cancelPruneCalled := 0 | ||||||
| pruneTrieCalled := 0 | ||||||
|
|
||||||
| coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() | ||||||
| arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) | ||||||
|
|
||||||
| arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ | ||||||
| IsPruningEnabledCalled: func() bool { | ||||||
| return true | ||||||
| }, | ||||||
| CancelPruneCalled: func(rootHash []byte, identifier state.TriePruningIdentifier) { | ||||||
| cancelPruneCalled++ | ||||||
| }, | ||||||
| PruneTrieCalled: func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) { | ||||||
| pruneTrieCalled++ | ||||||
| }, | ||||||
| } | ||||||
|
|
||||||
| rootHash1 := []byte("rootHash1") | ||||||
| executionResultsHandlers := []data.BaseExecutionResultHandler{ | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: rootHash1, | ||||||
| }, | ||||||
| }, | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: []byte("some other root hash"), | ||||||
| }, | ||||||
| }, | ||||||
| } | ||||||
| header1 := &block.HeaderV3{ | ||||||
| Nonce: 8, | ||||||
| LastExecutionResult: &block.ExecutionResultInfo{}, | ||||||
| } | ||||||
| _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) | ||||||
|
|
||||||
| rootHash2 := []byte("rootHash2") | ||||||
| executionResultsHandlers = []data.BaseExecutionResultHandler{ | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: rootHash2, | ||||||
| }, | ||||||
| }, | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: []byte("some other root hash6"), | ||||||
| }, | ||||||
| }, | ||||||
| } | ||||||
| header2 := &block.HeaderV3{ | ||||||
| Nonce: 9, | ||||||
| } | ||||||
| _ = header2.SetExecutionResultsHandlers(executionResultsHandlers) | ||||||
|
|
||||||
| headerCalls := 0 | ||||||
| headerHashCalls := 0 | ||||||
| headersPool := &mock.HeadersCacherStub{ | ||||||
| GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { | ||||||
| if headerCalls == 0 { | ||||||
| headerCalls++ | ||||||
| return []data.HeaderHandler{header2}, [][]byte{[]byte("hash1")}, nil | ||||||
| } | ||||||
|
|
||||||
| return []data.HeaderHandler{}, [][]byte{}, nil | ||||||
| }, | ||||||
| GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { | ||||||
| if headerHashCalls == 0 { | ||||||
| headerHashCalls++ | ||||||
| return header1, nil | ||||||
| } | ||||||
| return header2, nil | ||||||
| }, | ||||||
| } | ||||||
| dataPool := initDataPool() | ||||||
| dataPool.HeadersCalled = func() dataRetriever.HeadersPool { | ||||||
| return headersPool | ||||||
| } | ||||||
| dataComponents.DataPool = dataPool | ||||||
|
|
||||||
| bp, err := blproc.NewShardProcessor(arguments) | ||||||
| require.Nil(t, err) | ||||||
|
|
||||||
| bp.SetLastPrunedNonce(8) | ||||||
|
|
||||||
| rootHash3 := []byte("rootHash3") | ||||||
|
|
||||||
| executionResultsHandlers = []data.BaseExecutionResultHandler{ | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: rootHash3, | ||||||
| }, | ||||||
| }, | ||||||
| &block.ExecutionResult{ | ||||||
| BaseExecutionResult: &block.BaseExecutionResult{ | ||||||
| RootHash: []byte("some other root hash2"), | ||||||
| }, | ||||||
| }, | ||||||
| } | ||||||
| header3 := &block.HeaderV3{ | ||||||
| Nonce: 10, | ||||||
| } | ||||||
| _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) | ||||||
|
||||||
| _ = header1.SetExecutionResultsHandlers(executionResultsHandlers) | |
| _ = header3.SetExecutionResultsHandlers(executionResultsHandlers) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1634,7 +1634,7 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock | |
| mp.accountsDB[state.PeerAccountsState], | ||
| ) | ||
| } else { | ||
| mp.pruneTriesHeaderV3(metaBlock, prevMetaBlock) | ||
| mp.pruneTrieHeaderV3(metaBlock) | ||
|
||
| } | ||
|
|
||
| outportFinalizedHeaderHash := metaBlockHash | ||
|
|
@@ -1646,9 +1646,8 @@ func (mp *metaProcessor) updateState(metaBlock data.MetaHeaderHandler, metaBlock | |
| mp.blockChain.SetFinalBlockInfo(metaBlock.GetNonce(), metaBlockHash, rootHash) | ||
| } | ||
|
|
||
| func (mp *metaProcessor) pruneTriesHeaderV3( | ||
| metaBlock data.MetaHeaderHandler, | ||
| prevMetaBlock data.MetaHeaderHandler, | ||
| func (mp *metaProcessor) pruneTrieHeaderV3( | ||
| metaBlock data.HeaderHandler, | ||
| ) { | ||
| accountsDb := mp.accountsDB[state.UserAccountsState] | ||
| peerAccountsDb := mp.accountsDB[state.PeerAccountsState] | ||
|
|
@@ -1666,7 +1665,7 @@ func (mp *metaProcessor) pruneTriesHeaderV3( | |
| "currentExecResType", fmt.Sprintf("%T", execResults[i])) | ||
| continue | ||
| } | ||
| prevExecRes, err := mp.getPreviousExecutionResult(i, execResults, prevMetaBlock, prevMetaBlockHash) | ||
| prevExecRes, err := mp.getPreviousExecutionResult(i, execResults, prevMetaBlockHash) | ||
| if err != nil { | ||
| log.Warn("failed to get previous execution result for pruning", | ||
| "err", err, | ||
|
|
@@ -1708,7 +1707,6 @@ func (mp *metaProcessor) pruneTriesHeaderV3( | |
| func (mp *metaProcessor) getPreviousExecutionResult( | ||
| index int, | ||
| executionResultsHandlers []data.BaseExecutionResultHandler, | ||
| prevMetaBlock data.MetaHeaderHandler, | ||
| prevMetaBlockHash []byte, | ||
| ) (data.BaseMetaExecutionResultHandler, error) { | ||
| if index > 0 { | ||
|
|
@@ -1719,6 +1717,11 @@ func (mp *metaProcessor) getPreviousExecutionResult( | |
| return metaExecRes, nil | ||
| } | ||
|
|
||
| prevMetaBlock, err := process.GetMetaHeader(prevMetaBlockHash, mp.dataPool.Headers(), mp.marshalizer, mp.store) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
|
|
||
| if prevMetaBlock.IsHeaderV3() { | ||
| lastExecRes := prevMetaBlock.GetLastExecutionResultHandler() | ||
| if check.IfNil(lastExecRes) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Spelling: several occurrences of "prunning" / "triggerd" in these comments (and the warn log below in this function) should be "pruning" / "triggered" for consistency and searchability.