diff --git a/db/kv/membatchwithdb/memory_mutation.go b/db/kv/membatchwithdb/memory_mutation.go index f384d1e1b9f..346ed960a93 100644 --- a/db/kv/membatchwithdb/memory_mutation.go +++ b/db/kv/membatchwithdb/memory_mutation.go @@ -73,6 +73,10 @@ func NewMemoryBatch(tx kv.TemporalTx, tmpDir string, logger log.Logger) *MemoryM } } +func (m *MemoryMutation) UnderlyingTx() kv.TemporalTx { + return m.db +} + func (m *MemoryMutation) UpdateTxn(tx kv.TemporalTx) { m.db = tx m.statelessCursors = nil diff --git a/execution/execmodule/block_building.go b/execution/execmodule/block_building.go index 454ed27fb5d..584f6653839 100644 --- a/execution/execmodule/block_building.go +++ b/execution/execmodule/block_building.go @@ -178,6 +178,17 @@ func (e *EthereumExecutionModule) GetAssembledBlock(ctx context.Context, req *ex payload.BlobGasUsed = header.BlobGasUsed payload.ExcessBlobGas = header.ExcessBlobGas } + blockAccessList := block.BlockAccessList() + if header.BlockAccessListHash != nil || blockAccessList != nil { + payload.Version = 4 + if header.BlockAccessListHash != nil { + payload.BlockAccessListHash = gointerfaces.ConvertHashToH256(*header.BlockAccessListHash) + } + payload.BlockAccessList = types.ConvertBlockAccessListToTypesProto(blockAccessList) + if payload.BlockAccessList == nil { + payload.BlockAccessList = []*typesproto.BlockAccessListAccount{} + } + } blockValue := blockValue(blockWithReceipts, baseFee) diff --git a/execution/stagedsync/exec3_parallel.go b/execution/stagedsync/exec3_parallel.go index df3a7f87762..ff5e0c1f4a3 100644 --- a/execution/stagedsync/exec3_parallel.go +++ b/execution/stagedsync/exec3_parallel.go @@ -216,16 +216,25 @@ func (pe *parallelExecutor) exec(ctx context.Context, execStage *StageState, u U if pe.cfg.chainConfig.IsAmsterdam(applyResult.BlockTime) || pe.cfg.experimentalBAL { bal := CreateBAL(applyResult.BlockNum, applyResult.TxIO, pe.cfg.dirs.DataDir) log.Debug("bal", "blockNum", applyResult.BlockNum, "hash", bal.Hash(), "valid", bal.Validate() == nil) - if pe.cfg.chainConfig.IsAmsterdam(applyResult.BlockTime) { - headerBALHash := *lastHeader.BlockAccessListHash - if headerBALHash != b.BlockAccessList().Hash() { - log.Info(fmt.Sprintf("bal from block: %s", b.BlockAccessList().DebugString())) - return fmt.Errorf("block %d: invalid block access list, hash mismatch: got %s expected %s", applyResult.BlockNum, b.BlockAccessList().Hash(), headerBALHash) + if lastHeader.BlockAccessListHash == nil { + if pe.isBlockProduction { + hash := bal.Hash() + lastHeader.BlockAccessListHash = &hash + } else { + return fmt.Errorf("block %d: missing block access list hash", applyResult.BlockNum) + } } - if headerBALHash != bal.Hash() { - log.Info(fmt.Sprintf("computed bal: %s", bal.DebugString())) - return fmt.Errorf("%w, block=%d: block access list mismatch: got %s expected %s", rules.ErrInvalidBlock, applyResult.BlockNum, bal.Hash(), headerBALHash) + headerBALHash := *lastHeader.BlockAccessListHash + if !pe.isBlockProduction { + if headerBALHash != b.BlockAccessList().Hash() { + log.Info(fmt.Sprintf("bal from block: %s", b.BlockAccessList().DebugString())) + return fmt.Errorf("block %d: invalid block access list, hash mismatch: got %s expected %s", applyResult.BlockNum, b.BlockAccessList().Hash(), headerBALHash) + } + if headerBALHash != bal.Hash() { + log.Info(fmt.Sprintf("computed bal: %s", bal.DebugString())) + return fmt.Errorf("%w, block=%d: block access list mismatch: got %s expected %s", rules.ErrInvalidBlock, applyResult.BlockNum, bal.Hash(), headerBALHash) + } } } } @@ -1715,3 +1724,38 @@ func mergeReadSets(a state.ReadSet, b state.ReadSet) state.ReadSet { } return out } + +func mergeVersionedWrites(prev, next state.VersionedWrites) state.VersionedWrites { + if len(prev) == 0 { + return next + } + if len(next) == 0 { + return prev + } + merged := state.WriteSet{} + for _, v := range prev { + merged.Set(*v) + } + for _, v := range next { + merged.Set(*v) + } + out := make(state.VersionedWrites, 0, merged.Len()) + merged.Scan(func(v *state.VersionedWrite) bool { + out = append(out, v) + return true + }) + return out +} + +func mergeAccessedAddresses(dst, src map[accounts.Address]struct{}) map[accounts.Address]struct{} { + if len(src) == 0 { + return dst + } + if dst == nil { + dst = make(map[accounts.Address]struct{}, len(src)) + } + for addr := range src { + dst[addr] = struct{}{} + } + return dst +} diff --git a/execution/stagedsync/stage_mining_create_block.go b/execution/stagedsync/stage_mining_create_block.go index 3f0fd399fac..7002da99740 100644 --- a/execution/stagedsync/stage_mining_create_block.go +++ b/execution/stagedsync/stage_mining_create_block.go @@ -51,6 +51,7 @@ type MiningBlock struct { Receipts types.Receipts Withdrawals []*types.Withdrawal Requests types.FlatRequests + BlockAccessList types.BlockAccessList headerRlpSize *int withdrawalsRlpSize *int diff --git a/execution/stagedsync/stage_mining_exec.go b/execution/stagedsync/stage_mining_exec.go index a2dab61387f..4bcabcac81a 100644 --- a/execution/stagedsync/stage_mining_exec.go +++ b/execution/stagedsync/stage_mining_exec.go @@ -32,6 +32,7 @@ import ( "github.com/erigontech/erigon/common/metrics" "github.com/erigontech/erigon/db/kv" "github.com/erigontech/erigon/db/kv/membatchwithdb" + "github.com/erigontech/erigon/db/kv/temporal" "github.com/erigontech/erigon/db/rawdb" "github.com/erigontech/erigon/db/services" "github.com/erigontech/erigon/db/state/execctx" @@ -99,9 +100,18 @@ func SpawnMiningExecStage(ctx context.Context, s *StageState, sd *execctx.Shared chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) logPrefix := s.LogPrefix() current := cfg.miningState.MiningBlock + needBAL := execCfg.chainConfig.IsAmsterdam(current.Header.Time) || execCfg.experimentalBAL stateReader := state.NewReaderV3(sd.AsGetter(tx)) ibs := state.New(stateReader) + var balIO *state.VersionedIO + var systemReads state.ReadSet + var systemWrites state.VersionedWrites + var systemAccess map[accounts.Address]struct{} + if needBAL { + ibs.SetVersionMap(state.NewVersionMap(nil)) + balIO = &state.VersionedIO{} + } // Clique consensus needs forced author in the evm context //if cfg.chainConfig.Consensus == chain.CliqueConsensus { // execCfg.author = &cfg.miningState.MiningConfig.Etherbase @@ -128,6 +138,12 @@ func SpawnMiningExecStage(ctx context.Context, s *StageState, sd *execctx.Shared txNum := sd.TxNum() protocol.InitializeBlockExecution(cfg.engine, chainReader, current.Header, cfg.chainConfig, ibs, &state.NoopWriter{}, logger, nil) + if needBAL { + systemReads = mergeReadSets(systemReads, ibs.VersionedReads()) + systemWrites = mergeVersionedWrites(systemWrites, ibs.VersionedWrites(false)) + systemAccess = mergeAccessedAddresses(systemAccess, ibs.AccessedAddresses()) + ibs.ResetVersionedIO() + } coinbase := accounts.InternAddress(cfg.miningState.MiningConfig.Etherbase) @@ -149,7 +165,7 @@ func SpawnMiningExecStage(ctx context.Context, s *StageState, sd *execctx.Shared } if len(txns) > 0 { - logs, stop, err := addTransactionsToMiningBlock(ctx, logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, cfg.engine, txns, coinbase, ibs, interrupt, cfg.payloadId, logger) + logs, stop, err := addTransactionsToMiningBlock(ctx, logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, cfg.engine, txns, coinbase, ibs, balIO, interrupt, cfg.payloadId, logger) if err != nil { return err } @@ -189,49 +205,106 @@ func SpawnMiningExecStage(ctx context.Context, s *StageState, sd *execctx.Shared } var block *types.Block + if needBAL { + ibs.ResetVersionedIO() + } block, current.Requests, err = protocol.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txns, current.Uncles, &state.NoopWriter{}, cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, chainReader, true, logger, nil) if err != nil { return fmt.Errorf("cannot finalize block execution: %s", err) } - // Simulate the block execution to get the final state root - if err = rawdb.WriteHeader(tx, block.Header()); err != nil { - return fmt.Errorf("cannot write header: %s", err) - } blockHeight := block.NumberU64() - - if err = rawdb.WriteCanonicalHash(tx, block.Hash(), blockHeight); err != nil { - return fmt.Errorf("cannot write canonical hash: %s", err) - } - if err = rawdb.WriteHeadHeaderHash(tx, block.Hash()); err != nil { - return err - } - if _, err = rawdb.WriteRawBodyIfNotExists(tx, block.Hash(), blockHeight, block.RawBody()); err != nil { - return fmt.Errorf("cannot write body: %s", err) - } - if err = rawdb.AppendCanonicalTxNums(tx, blockHeight); err != nil { - return err + if needBAL { + systemReads = mergeReadSets(systemReads, ibs.VersionedReads()) + systemWrites = mergeVersionedWrites(systemWrites, ibs.VersionedWrites(false)) + systemAccess = mergeAccessedAddresses(systemAccess, ibs.AccessedAddresses()) + ibs.ResetVersionedIO() + + systemVersion := state.Version{BlockNum: blockHeight, TxIndex: -1} + balIO.RecordReads(systemVersion, systemReads) + balIO.RecordWrites(systemVersion, systemWrites) + balIO.RecordAccesses(systemVersion, systemAccess) + current.BlockAccessList = CreateBAL(blockHeight, balIO, execCfg.dirs.DataDir) } - if err = stages.SaveStageProgress(tx, kv.Headers, blockHeight); err != nil { - return err - } - if err = stages.SaveStageProgress(tx, stages.Bodies, blockHeight); err != nil { - return err + writeBlockForExecution := func(rwTx kv.TemporalRwTx) error { + if err = rawdb.WriteHeader(rwTx, block.Header()); err != nil { + return fmt.Errorf("cannot write header: %s", err) + } + if err = rawdb.WriteCanonicalHash(rwTx, block.Hash(), blockHeight); err != nil { + return fmt.Errorf("cannot write canonical hash: %s", err) + } + if err = rawdb.WriteHeadHeaderHash(rwTx, block.Hash()); err != nil { + return err + } + if _, err = rawdb.WriteRawBodyIfNotExists(rwTx, block.Hash(), blockHeight, block.RawBody()); err != nil { + return fmt.Errorf("cannot write body: %s", err) + } + if err = rawdb.AppendCanonicalTxNums(rwTx, blockHeight); err != nil { + return err + } + if err = stages.SaveStageProgress(rwTx, kv.Headers, blockHeight); err != nil { + return err + } + if err = stages.SaveStageProgress(rwTx, stages.Bodies, blockHeight); err != nil { + return err + } + senderS := &StageState{state: s.state, ID: stages.Senders, BlockNumber: blockHeight - 1} + if err = SpawnRecoverSendersStage(sendersCfg, senderS, nil, rwTx, blockHeight, ctx, logger); err != nil { + return err + } + return nil } - senderS := &StageState{state: s.state, ID: stages.Senders, BlockNumber: blockHeight - 1} - if err = SpawnRecoverSendersStage(sendersCfg, senderS, nil, tx, blockHeight, ctx, logger); err != nil { + + // Simulate the block execution to get the final state root + if err = writeBlockForExecution(tx); err != nil { return err } // This flag will skip checking the state root execS := &StageState{state: s.state, ID: stages.Execution, BlockNumber: blockHeight - 1} + forceParallel := dbg.Exec3Parallel || cfg.chainConfig.IsAmsterdam(current.Header.Time) + execTx := tx + execSd := sd + var execCleanup func() + if forceParallel { + // get the underlying TemporalTx from MemoryMutation and create temporary SharedDomain + if _, ok := tx.(*temporal.RwTx); !ok { + type txUnwrapper interface { + UnderlyingTx() kv.TemporalTx + } + if unwrap, ok := tx.(txUnwrapper); ok { + if rwTx, ok := unwrap.UnderlyingTx().(kv.TemporalRwTx); ok { + tempSd, err := execctx.NewSharedDomains(ctx, rwTx, logger) + if err != nil { + return err + } + execTx = rwTx + execSd = tempSd + execCleanup = func() { + tempSd.Close() + } + if err = writeBlockForExecution(execTx); err != nil { + execCleanup() + return err + } + } + } + } + if _, ok := execTx.(*temporal.RwTx); !ok { + return fmt.Errorf("parallel execution requires *temporal.RwTx, got %T", execTx) + } + } + if execCleanup != nil { + defer execCleanup() + } - if err = ExecV3(ctx, execS, u, execCfg, sd, tx, dbg.Exec3Parallel, blockHeight, logger); err != nil { + if err = ExecV3(ctx, execS, u, execCfg, execSd, execTx, forceParallel, blockHeight, logger); err != nil { logger.Error("cannot execute block execution", "err", err) return err } - rh, err := sd.ComputeCommitment(ctx, tx, true, blockHeight, txNum, s.LogPrefix(), nil) + commitmentTxNum := execSd.TxNum() + rh, err := execSd.ComputeCommitment(ctx, execTx, true, blockHeight, commitmentTxNum, s.LogPrefix(), nil) if err != nil { return fmt.Errorf("compute commitment failed: %w", err) } @@ -426,12 +499,13 @@ func addTransactionsToMiningBlock( txns types.Transactions, coinbase accounts.Address, ibs *state.IntraBlockState, + balIO *state.VersionedIO, interrupt *atomic.Bool, payloadId uint64, logger log.Logger, ) (types.Logs, bool, error) { header := current.Header - txnIdx := ibs.TxnIndex() + 1 + txnIdx := ibs.TxnIndex() gasPool := new(protocol.GasPool).AddGas(header.GasLimit - header.GasUsed) if header.BlobGasUsed != nil { gasPool.AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(header.Time) - *header.BlobGasUsed) @@ -440,6 +514,23 @@ func addTransactionsToMiningBlock( var coalescedLogs types.Logs noop := state.NewNoopWriter() + recordTxIO := func() { + if balIO == nil { + return + } + version := ibs.Version() + balIO.RecordReads(version, ibs.VersionedReads()) + balIO.RecordWrites(version, ibs.VersionedWrites(false)) + balIO.RecordAccesses(version, ibs.AccessedAddresses()) + ibs.ResetVersionedIO() + } + clearTxIO := func() { + if balIO == nil { + return + } + ibs.AccessedAddresses() + ibs.ResetVersionedIO() + } var miningCommitTx = func(txn types.Transaction, coinbase accounts.Address, vmConfig *vm.Config, chainConfig *chain.Config, ibs *state.IntraBlockState, current *MiningBlock) ([]*types.Log, error) { ibs.SetTxContext(current.Header.Number.Uint64(), txnIdx) @@ -552,6 +643,11 @@ LOOP: // Start executing the transaction logs, err := miningCommitTx(txn, coinbase, vmConfig, chainConfig, ibs, current) + if err == nil { + recordTxIO() + } else { + clearTxIO() + } if errors.Is(err, protocol.ErrGasLimitReached) { // Skip the env out-of-gas transaction logger.Debug(fmt.Sprintf("[%s] Gas limit exceeded for env block", logPrefix), "hash", txn.Hash(), "sender", from) diff --git a/execution/stagedsync/stage_mining_finish.go b/execution/stagedsync/stage_mining_finish.go index 60fa682b93f..12fb40cb3e4 100644 --- a/execution/stagedsync/stage_mining_finish.go +++ b/execution/stagedsync/stage_mining_finish.go @@ -19,6 +19,7 @@ package stagedsync import ( "fmt" + "github.com/erigontech/erigon/common/empty" "github.com/erigontech/erigon/common/log/v3" "github.com/erigontech/erigon/db/kv" "github.com/erigontech/erigon/db/services" @@ -69,6 +70,13 @@ func SpawnMiningFinishStage(s *StageState, sd *execctx.SharedDomains, tx kv.Temp //} block := types.NewBlockForAsembling(current.Header, current.Txns, current.Uncles, current.Receipts, current.Withdrawals) + if current.BlockAccessList != nil { + block.SetBlockAccessList(current.BlockAccessList) + if block.BlockAccessListHash() == nil { + hash := empty.BlockAccessListHash + block.HeaderNoCopy().BlockAccessListHash = &hash + } + } blockWithReceipts := &types.BlockWithReceipts{Block: block, Receipts: current.Receipts, Requests: current.Requests} *current = MiningBlock{} // hack to clean global data diff --git a/execution/state/intra_block_state.go b/execution/state/intra_block_state.go index ff1a3d92eb7..e19fd791d6a 100644 --- a/execution/state/intra_block_state.go +++ b/execution/state/intra_block_state.go @@ -1959,6 +1959,12 @@ func (sdb *IntraBlockState) VersionedReads() ReadSet { return sdb.versionedReads } +func (sdb *IntraBlockState) ResetVersionedIO() { + sdb.versionedReads = nil + sdb.versionedWrites = nil + sdb.dep = UnknownDep +} + // VersionedWrites returns the current versioned write set if this block // checkDirty - is mainly for testing, for block processing this is called // after the block execution is completed and non dirty writes (due to reversions) diff --git a/execution/tests/engine_api_bal_test.go b/execution/tests/engine_api_bal_test.go new file mode 100644 index 00000000000..6d3b38c859f --- /dev/null +++ b/execution/tests/engine_api_bal_test.go @@ -0,0 +1,131 @@ +// Copyright 2025 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package executiontests + +import ( + "context" + "math/big" + "testing" + + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + + "github.com/erigontech/erigon/common" + "github.com/erigontech/erigon/common/crypto" + "github.com/erigontech/erigon/execution/types" + "github.com/erigontech/erigon/execution/types/accounts" + "github.com/erigontech/erigon/rpc" +) + +func TestEngineApiGeneratedPayloadIncludesBlockAccessList(t *testing.T) { + eat := DefaultEngineApiTester(t) + receiver := common.HexToAddress("0x333") + eat.Run(t, func(ctx context.Context, t *testing.T, eat EngineApiTester) { + sender := crypto.PubkeyToAddress(eat.CoinbaseKey.PublicKey) + txn, err := eat.Transactor.SubmitSimpleTransfer(eat.CoinbaseKey, receiver, big.NewInt(1)) + require.NoError(t, err) + + payload, err := eat.MockCl.BuildCanonicalBlock(ctx) + require.NoError(t, err) + err = eat.TxnInclusionVerifier.VerifyTxnsInclusion(ctx, payload.ExecutionPayload, txn.Hash()) + require.NoError(t, err) + + balBytes := payload.ExecutionPayload.BlockAccessList + require.NotNil(t, balBytes) + require.NotEmpty(t, *balBytes) + + bal, err := types.DecodeBlockAccessListBytes(*balBytes) + require.NoError(t, err) + require.NoError(t, bal.Validate()) + require.NotEmpty(t, bal) + + blockNumber := rpc.BlockNumber(payload.ExecutionPayload.BlockNumber) + block, err := eat.RpcApiClient.GetBlockByNumber(ctx, blockNumber, false) + require.NoError(t, err) + require.NotNil(t, block) + require.Equal(t, payload.ExecutionPayload.BlockHash, block.Hash) + require.NotNil(t, block.BlockAccessListHash) + require.Equal(t, bal.Hash(), *block.BlockAccessListHash) + + senderChanges := findAccountChanges(bal, accounts.InternAddress(sender)) + receiverChanges := findAccountChanges(bal, accounts.InternAddress(receiver)) + require.NotNil(t, senderChanges) + require.NotNil(t, receiverChanges) + + receipt, err := eat.RpcApiClient.GetTransactionReceipt(ctx, txn.Hash()) + require.NoError(t, err) + require.NotNil(t, receipt) + + balIndex := uint16(receipt.TransactionIndex + 1) + + senderBalance, err := eat.RpcApiClient.GetBalance(sender, rpc.LatestBlock) + require.NoError(t, err) + receiverBalance, err := eat.RpcApiClient.GetBalance(receiver, rpc.LatestBlock) + require.NoError(t, err) + senderNonce, err := eat.RpcApiClient.GetTransactionCount(sender, rpc.LatestBlock) + require.NoError(t, err) + + senderBalanceChange := findBalanceChange(senderChanges, balIndex) + require.NotNilf(t, senderBalanceChange, "missing sender balance change at index %d\n%s", balIndex, bal.DebugString()) + expectedSenderBalance, overflow := uint256.FromBig(senderBalance) + require.False(t, overflow) + require.True(t, senderBalanceChange.Value.Eq(expectedSenderBalance)) + + receiverBalanceChange := findBalanceChange(receiverChanges, balIndex) + require.NotNilf(t, receiverBalanceChange, "missing receiver balance change at index %d\n%s", balIndex, bal.DebugString()) + expectedReceiverBalance, overflow := uint256.FromBig(receiverBalance) + require.False(t, overflow) + require.True(t, receiverBalanceChange.Value.Eq(expectedReceiverBalance)) + + senderNonceChange := findNonceChange(senderChanges, balIndex) + require.NotNilf(t, senderNonceChange, "missing sender nonce change at index %d\n%s", balIndex, bal.DebugString()) + require.Equal(t, senderNonce.Uint64(), senderNonceChange.Value) + }) +} + +func findAccountChanges(bal types.BlockAccessList, addr accounts.Address) *types.AccountChanges { + for _, ac := range bal { + if ac != nil && ac.Address == addr { + return ac + } + } + return nil +} + +func findBalanceChange(ac *types.AccountChanges, index uint16) *types.BalanceChange { + if ac == nil { + return nil + } + for _, change := range ac.BalanceChanges { + if change != nil && change.Index == index { + return change + } + } + return nil +} + +func findNonceChange(ac *types.AccountChanges, index uint16) *types.NonceChange { + if ac == nil { + return nil + } + for _, change := range ac.NonceChanges { + if change != nil && change.Index == index { + return change + } + } + return nil +} diff --git a/execution/tests/engine_api_reorg_test.go b/execution/tests/engine_api_reorg_test.go index 33b38477607..219f6643d20 100644 --- a/execution/tests/engine_api_reorg_test.go +++ b/execution/tests/engine_api_reorg_test.go @@ -61,7 +61,8 @@ func TestEngineApiInvalidPayloadThenValidCanonicalFcuWithPayloadShouldSucceed(t status, err := eat.MockCl.InsertNewPayload(ctx, b3Faulty) require.NoError(t, err) require.Equal(t, enginetypes.InvalidStatus, status.Status) - require.True(t, strings.Contains(status.ValidationError.Error().Error(), "wrong trie root")) + t.Log(status.ValidationError.Error().Error()) + require.True(t, strings.Contains(status.ValidationError.Error().Error(), "invalid block hash")) // build b4 on the canonical chain txn, err = changer.Change(transactOpts) require.NoError(t, err) diff --git a/execution/tests/engine_api_tester.go b/execution/tests/engine_api_tester.go index b8e51d58bcf..2478f1ad542 100644 --- a/execution/tests/engine_api_tester.go +++ b/execution/tests/engine_api_tester.go @@ -81,7 +81,6 @@ func DefaultEngineApiTesterGenesis(t *testing.T) (*types.Genesis, *ecdsa.Private var chainConfig chain.Config err = copier.CopyWithOption(&chainConfig, chain.AllProtocolChanges, copier.Option{DeepCopy: true}) require.NoError(t, err) - chainConfig.AmsterdamTime = nil // test uses osaka spec, unset amsterdam config to prevent "unsupported fork" error genesis := &types.Genesis{ Config: &chainConfig, Coinbase: coinbaseAddr, diff --git a/execution/tests/mock_cl.go b/execution/tests/mock_cl.go index 50fe0b611d9..59ecd783b3f 100644 --- a/execution/tests/mock_cl.go +++ b/execution/tests/mock_cl.go @@ -160,7 +160,7 @@ func (cl *MockCl) BuildNewPayload(ctx context.Context, opts ...BlockBuildingOpti // get the newly built block newPayload, err := retryEngine(ctx, []enginetypes.EngineStatus{enginetypes.SyncingStatus}, []error{&engine_helpers.UnknownPayloadErr}, func() (*enginetypes.GetPayloadResponse, enginetypes.EngineStatus, error) { - r, err := cl.engineApiClient.GetPayloadV5(ctx, *fcuRes.PayloadId) + r, err := cl.engineApiClient.GetPayloadV6(ctx, *fcuRes.PayloadId) if err != nil { return nil, "", err } @@ -179,7 +179,7 @@ func (cl *MockCl) InsertNewPayload(ctx context.Context, p *MockClPayload) (*engi clParentBlockRoot := p.ParentBeaconBlockRoot return retryEngine(ctx, []enginetypes.EngineStatus{enginetypes.SyncingStatus}, nil, func() (*enginetypes.PayloadStatus, enginetypes.EngineStatus, error) { - r, err := cl.engineApiClient.NewPayloadV4(ctx, elPayload, []common.Hash{}, clParentBlockRoot, []hexutil.Bytes{}) + r, err := cl.engineApiClient.NewPayloadV5(ctx, elPayload, []common.Hash{}, clParentBlockRoot, []hexutil.Bytes{}) if err != nil { return nil, "", err } diff --git a/execution/types/block_access_list.go b/execution/types/block_access_list.go index 190bd8af6a1..0b90484e1a8 100644 --- a/execution/types/block_access_list.go +++ b/execution/types/block_access_list.go @@ -1020,6 +1020,74 @@ func ConvertBlockAccessListFromTypesProto(protoList []*typesproto.BlockAccessLis return &res } +func ConvertBlockAccessListToTypesProto(bal BlockAccessList) []*typesproto.BlockAccessListAccount { + if bal == nil { + return nil + } + out := make([]*typesproto.BlockAccessListAccount, 0, len(bal)) + for _, account := range bal { + if account == nil { + continue + } + balAccount := &typesproto.BlockAccessListAccount{ + Address: gointerfaces.ConvertAddressToH160(account.Address.Value()), + } + for _, storageChange := range account.StorageChanges { + if storageChange == nil { + continue + } + slotChanges := &typesproto.BlockAccessListSlotChanges{ + Slot: gointerfaces.ConvertHashToH256(storageChange.Slot.Value()), + } + for _, change := range storageChange.Changes { + if change == nil { + continue + } + slotChanges.Changes = append(slotChanges.Changes, &typesproto.BlockAccessListStorageChange{ + Index: uint32(change.Index), + Value: gointerfaces.ConvertHashToH256(change.Value), + }) + } + balAccount.StorageChanges = append(balAccount.StorageChanges, slotChanges) + } + for _, read := range account.StorageReads { + balAccount.StorageReads = append(balAccount.StorageReads, gointerfaces.ConvertHashToH256(read.Value())) + } + for _, balanceChange := range account.BalanceChanges { + if balanceChange == nil { + continue + } + val := balanceChange.Value + balAccount.BalanceChanges = append(balAccount.BalanceChanges, &typesproto.BlockAccessListBalanceChange{ + Index: uint32(balanceChange.Index), + Value: gointerfaces.ConvertUint256IntToH256(&val), + }) + } + for _, nonceChange := range account.NonceChanges { + if nonceChange == nil { + continue + } + balAccount.NonceChanges = append(balAccount.NonceChanges, &typesproto.BlockAccessListNonceChange{ + Index: uint32(nonceChange.Index), + Value: nonceChange.Value, + }) + } + for _, codeChange := range account.CodeChanges { + if codeChange == nil { + continue + } + data := make([]byte, len(codeChange.Data)) + copy(data, codeChange.Data) + balAccount.CodeChanges = append(balAccount.CodeChanges, &typesproto.BlockAccessListCodeChange{ + Index: uint32(codeChange.Index), + Data: data, + }) + } + out = append(out, balAccount) + } + return out +} + func ConvertBlockAccessListFromExecutionProto(protoList []*executionproto.BlockAccessListAccount) *hexutil.Bytes { if protoList == nil { return nil diff --git a/rpc/ethapi/api.go b/rpc/ethapi/api.go index 5c2c9146de2..257059fb937 100644 --- a/rpc/ethapi/api.go +++ b/rpc/ethapi/api.go @@ -462,6 +462,9 @@ func RPCMarshalHeader(head *types.Header) map[string]any { if head.RequestsHash != nil { result["requestsHash"] = head.RequestsHash } + if head.BlockAccessListHash != nil { + result["blockAccessListHash"] = head.BlockAccessListHash + } // For Gnosis only if head.AuRaSeal != nil {