From a18e75ff4d22affefae372feaaa6e3e264886fbc Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 21 Nov 2025 14:28:52 +0100 Subject: [PATCH 01/15] feat: forced inclusion --- CHANGELOG.md | 6 + apps/evm/cmd/run.go | 33 +- apps/evm/go.mod | 9 +- apps/evm/go.sum | 14 +- apps/grpc/cmd/run.go | 31 +- apps/testapp/cmd/run.go | 101 +- apps/testapp/go.mod | 4 +- apps/testapp/go.sum | 3 +- block/components.go | 9 + block/components_test.go | 3 + block/internal/da/client.go | 67 +- block/internal/da/client_test.go | 85 +- .../internal/da/forced_inclusion_retriever.go | 177 ++++ .../da/forced_inclusion_retriever_test.go | 344 +++++++ block/internal/executing/executor.go | 69 +- .../internal/executing/executor_lazy_test.go | 8 + .../internal/executing/executor_logic_test.go | 6 + .../executing/executor_restart_test.go | 14 +- block/internal/syncing/syncer.go | 80 ++ .../syncing/syncer_forced_inclusion_test.go | 428 ++++++++ block/public.go | 34 +- core/execution/execution.go | 1 + core/sequencer/dummy.go | 10 + core/sequencer/sequencing.go | 13 +- .../adr/adr-019-forced-inclusion-mechanism.md | 920 +++++++++++------- go.mod | 4 +- go.sum | 5 +- pkg/cmd/run_node.go | 4 +- pkg/config/config.go | 55 +- pkg/config/config_test.go | 62 +- pkg/config/defaults.go | 11 +- pkg/genesis/genesis.go | 26 +- pkg/genesis/genesis_test.go | 45 +- pkg/genesis/io_test.go | 45 +- sequencers/based/sequencer.go | 185 ++++ sequencers/based/sequencer_test.go | 569 +++++++++++ sequencers/common/size_validation.go | 27 + sequencers/common/size_validation_test.go | 141 +++ sequencers/single/queue.go | 20 + sequencers/single/queue_test.go | 154 +++ sequencers/single/sequencer.go | 245 ++++- sequencers/single/sequencer_test.go | 410 +++++++- test/mocks/sequencer.go | 84 ++ types/CLAUDE.md | 11 +- types/epoch.go | 50 + types/epoch_test.go | 300 ++++++ types/state.go | 3 +- 47 files changed, 4351 insertions(+), 574 deletions(-) create mode 100644 block/internal/da/forced_inclusion_retriever.go create mode 100644 block/internal/da/forced_inclusion_retriever_test.go create mode 100644 block/internal/syncing/syncer_forced_inclusion_test.go create mode 100644 sequencers/based/sequencer.go create mode 100644 sequencers/based/sequencer_test.go create mode 100644 sequencers/common/size_validation.go create mode 100644 sequencers/common/size_validation_test.go create mode 100644 types/epoch.go create mode 100644 types/epoch_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 233422099..9d1f1f847 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Implement forced inclusion and based sequencing ([#2797](https://github.com/evstack/ev-node/pull/2797)) + This changes requires to add a `da_epoch_forced_inclusion` field in `genesis.json` file. + To enable this feature, set the force inclusion namespace in the `evnode.yaml`. + ### Changed - Rename `evm-single` to `evm` and `grpc-single` to `evgrpc` for clarity. [#2839](https://github.com/evstack/ev-node/pull/2839) diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index 8d7926404..5ce22e8e3 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -12,6 +12,7 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" @@ -25,6 +26,8 @@ import ( "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -55,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -101,6 +104,8 @@ func init() { } // createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. func createSequencer( ctx context.Context, logger zerolog.Logger, @@ -109,6 +114,25 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + singleMetrics, err := single.NopMetrics() if err != nil { return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) @@ -123,11 +147,18 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/evm/go.mod b/apps/evm/go.mod index 126891fed..3be04a51b 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -12,10 +12,10 @@ replace ( require ( github.com/celestiaorg/go-header v0.7.3 - github.com/ethereum/go-ethereum v1.16.5 + github.com/ethereum/go-ethereum v1.16.7 github.com/evstack/ev-node v1.0.0-beta.10 github.com/evstack/ev-node/core v1.0.0-beta.5 - github.com/evstack/ev-node/da v1.0.0-beta.6 + github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 github.com/ipfs/go-datastore v0.9.0 github.com/rs/zerolog v1.34.0 @@ -26,6 +26,7 @@ require ( connectrpc.com/connect v1.19.1 // indirect connectrpc.com/grpcreflect v1.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -33,7 +34,7 @@ require ( github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/consensys/gnark-crypto v0.18.1 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -44,7 +45,7 @@ require ( github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/ethereum/c-kzg-4844/v2 v2.1.3 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 173fb3ba0..e2f2939d4 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -16,6 +16,8 @@ github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= @@ -55,8 +57,8 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= -github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -95,12 +97,12 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= -github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= -github.com/ethereum/go-ethereum v1.16.5 h1:GZI995PZkzP7ySCxEFaOPzS8+bd8NldE//1qvQDQpe0= -github.com/ethereum/go-ethereum v1.16.5/go.mod h1:kId9vOtlYg3PZk9VwKbGlQmSACB5ESPTBGT+M9zjmok= +github.com/ethereum/go-ethereum v1.16.7 h1:qeM4TvbrWK0UC0tgkZ7NiRsmBGwsjqc64BHo20U59UQ= +github.com/ethereum/go-ethereum v1.16.7/go.mod h1:Fs6QebQbavneQTYcA39PEKv2+zIjX7rPUZ14DER46wk= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 h1:xo0mZz3CJtntP1RPLFDBubBKpNkqStImt9H9N0xysj8= diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 484f51d7a..4439aee2e 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -9,6 +9,7 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" @@ -22,6 +23,8 @@ import ( "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -57,7 +60,7 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -118,6 +121,25 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + singleMetrics, err := single.NopMetrics() if err != nil { return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) @@ -132,11 +154,18 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c72d220cd..dd3440b86 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -5,17 +5,24 @@ import ( "fmt" "path/filepath" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/da/jsonrpc" "github.com/evstack/ev-node/node" - rollcmd "github.com/evstack/ev-node/pkg/cmd" - genesispkg "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -23,16 +30,16 @@ var RunCmd = &cobra.Command{ Use: "start", Aliases: []string{"node", "run"}, Short: "Run the testapp node", - RunE: func(cmd *cobra.Command, args []string) error { - nodeConfig, err := rollcmd.ParseConfig(cmd) + RunE: func(command *cobra.Command, args []string) error { + nodeConfig, err := cmd.ParseConfig(command) if err != nil { return err } - logger := rollcmd.SetupLogger(nodeConfig.Log) + logger := cmd.SetupLogger(nodeConfig.Log) // Get KV endpoint flag - kvEndpoint, _ := cmd.Flags().GetString(flagKVEndpoint) + kvEndpoint, _ := command.Flags().GetString(flagKVEndpoint) if kvEndpoint == "" { logger.Info().Msg("KV endpoint flag not set, using default from http_server") } @@ -51,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -66,11 +73,6 @@ var RunCmd = &cobra.Command{ return err } - singleMetrics, err := single.NopMetrics() - if err != nil { - return err - } - // Start the KV executor HTTP server if kvEndpoint != "" { // Only start if endpoint is provided httpServer := kvexecutor.NewHTTPServer(executor, kvEndpoint) @@ -83,7 +85,7 @@ var RunCmd = &cobra.Command{ } genesisPath := filepath.Join(filepath.Dir(nodeConfig.ConfigPath()), "genesis.json") - genesis, err := genesispkg.LoadGenesis(genesisPath) + genesis, err := genesis.LoadGenesis(genesisPath) if err != nil { return fmt.Errorf("failed to load genesis: %w", err) } @@ -92,16 +94,8 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - sequencer, err := single.NewSequencer( - ctx, - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(ctx, logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -111,6 +105,65 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return cmd.StartNode(logger, command, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } + +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + + return sequencer, nil +} diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 0e6ed2f89..ff077783c 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -16,6 +16,7 @@ require ( github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.11.1 ) @@ -79,7 +80,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -129,7 +130,6 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index eeafba1a8..c2e0e46a7 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -229,8 +229,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/block/components.go b/block/components.go index 546cda62c..bd29f9244 100644 --- a/block/components.go +++ b/block/components.go @@ -245,6 +245,15 @@ func NewAggregatorComponents( return nil, fmt.Errorf("failed to create reaper: %w", err) } + if config.Node.BasedSequencer { // no submissions needed for bases sequencer + return &Components{ + Executor: executor, + Reaper: reaper, + Cache: cacheManager, + errorCh: errorCh, + }, nil + } + // Create DA client and submitter for aggregator nodes (with signer for submission) daClient := NewDAClient(da, config, logger) daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) diff --git a/block/components_test.go b/block/components_test.go index eadf45328..c288f5322 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -203,6 +203,9 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) { mockExec.On("InitChain", mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]byte("state-root"), uint64(1024), nil).Once() + // Mock SetDAHeight to be called during initialization + mockSeq.On("SetDAHeight", uint64(0)).Return().Once() + // Mock GetNextBatch to return empty batch mockSeq.On("GetNextBatch", mock.Anything, mock.Anything). Return(&coresequencer.GetNextBatchResponse{ diff --git a/block/internal/da/client.go b/block/internal/da/client.go index 571e5f765..01c5bf981 100644 --- a/block/internal/da/client.go +++ b/block/internal/da/client.go @@ -20,29 +20,35 @@ type Client interface { Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve + RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve GetHeaderNamespace() []byte GetDataNamespace() []byte + GetForcedInclusionNamespace() []byte + HasForcedInclusionNamespace() bool GetDA() coreda.DA } // client provides a reusable wrapper around the core DA interface // with common configuration for namespace handling and timeouts. type client struct { - da coreda.DA - logger zerolog.Logger - defaultTimeout time.Duration - namespaceBz []byte - namespaceDataBz []byte + da coreda.DA + logger zerolog.Logger + defaultTimeout time.Duration + namespaceBz []byte + namespaceDataBz []byte + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool } // Config contains configuration for the DA client. type Config struct { - DA coreda.DA - Logger zerolog.Logger - DefaultTimeout time.Duration - Namespace string - DataNamespace string + DA coreda.DA + Logger zerolog.Logger + DefaultTimeout time.Duration + Namespace string + DataNamespace string + ForcedInclusionNamespace string } // NewClient creates a new DA client with pre-calculated namespace bytes. @@ -51,12 +57,20 @@ func NewClient(cfg Config) *client { cfg.DefaultTimeout = 30 * time.Second } + hasForcedInclusionNs := cfg.ForcedInclusionNamespace != "" + var namespaceForcedInclusionBz []byte + if hasForcedInclusionNs { + namespaceForcedInclusionBz = coreda.NamespaceFromString(cfg.ForcedInclusionNamespace).Bytes() + } + return &client{ - da: cfg.DA, - logger: cfg.Logger.With().Str("component", "da_client").Logger(), - defaultTimeout: cfg.DefaultTimeout, - namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), + da: cfg.DA, + logger: cfg.Logger.With().Str("component", "da_client").Logger(), + defaultTimeout: cfg.DefaultTimeout, + namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), + namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), + namespaceForcedInclusionBz: namespaceForcedInclusionBz, + hasForcedInclusionNs: hasForcedInclusionNs, } } @@ -248,6 +262,19 @@ func (c *client) RetrieveData(ctx context.Context, height uint64) coreda.ResultR return c.Retrieve(ctx, height, c.namespaceDataBz) } +// RetrieveForcedInclusion retrieves blobs from the forced inclusion namespace at the specified height. +func (c *client) RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve { + if !c.hasForcedInclusionNs { + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "forced inclusion namespace not configured", + }, + } + } + return c.Retrieve(ctx, height, c.namespaceForcedInclusionBz) +} + // GetHeaderNamespace returns the header namespace bytes. func (c *client) GetHeaderNamespace() []byte { return c.namespaceBz @@ -258,6 +285,16 @@ func (c *client) GetDataNamespace() []byte { return c.namespaceDataBz } +// GetForcedInclusionNamespace returns the forced inclusion namespace bytes. +func (c *client) GetForcedInclusionNamespace() []byte { + return c.namespaceForcedInclusionBz +} + +// HasForcedInclusionNamespace returns whether forced inclusion namespace is configured. +func (c *client) HasForcedInclusionNamespace() bool { + return c.hasForcedInclusionNs +} + // GetDA returns the underlying DA interface for advanced usage. func (c *client) GetDA() coreda.DA { return c.da diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go index 788aab2b3..7bc7e972a 100644 --- a/block/internal/da/client_test.go +++ b/block/internal/da/client_test.go @@ -68,11 +68,12 @@ func TestNewClient(t *testing.T) { { name: "with all namespaces", cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - DefaultTimeout: 5 * time.Second, - Namespace: "test-ns", - DataNamespace: "test-data-ns", + DA: &mockDA{}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", }, }, { @@ -104,6 +105,13 @@ func TestNewClient(t *testing.T) { assert.Assert(t, len(client.namespaceBz) > 0) assert.Assert(t, len(client.namespaceDataBz) > 0) + if tt.cfg.ForcedInclusionNamespace != "" { + assert.Assert(t, client.hasForcedInclusionNs) + assert.Assert(t, len(client.namespaceForcedInclusionBz) > 0) + } else { + assert.Assert(t, !client.hasForcedInclusionNs) + } + expectedTimeout := tt.cfg.DefaultTimeout if expectedTimeout == 0 { expectedTimeout = 30 * time.Second @@ -113,12 +121,50 @@ func TestNewClient(t *testing.T) { } } +func TestClient_HasForcedInclusionNamespace(t *testing.T) { + tests := []struct { + name string + cfg Config + expected bool + }{ + { + name: "with forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }, + expected: true, + }, + { + name: "without forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + assert.Equal(t, client.HasForcedInclusionNamespace(), tt.expected) + }) + } +} + func TestClient_GetNamespaces(t *testing.T) { cfg := Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - Namespace: "test-header", - DataNamespace: "test-data", + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-header", + DataNamespace: "test-data", + ForcedInclusionNamespace: "test-fi", } client := NewClient(cfg) @@ -129,8 +175,29 @@ func TestClient_GetNamespaces(t *testing.T) { dataNs := client.GetDataNamespace() assert.Assert(t, len(dataNs) > 0) + fiNs := client.GetForcedInclusionNamespace() + assert.Assert(t, len(fiNs) > 0) + // Namespaces should be different assert.Assert(t, string(headerNs) != string(dataNs)) + assert.Assert(t, string(headerNs) != string(fiNs)) + assert.Assert(t, string(dataNs) != string(fiNs)) +} + +func TestClient_RetrieveForcedInclusion_NotConfigured(t *testing.T) { + cfg := Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + } + + client := NewClient(cfg) + ctx := context.Background() + + result := client.RetrieveForcedInclusion(ctx, 100) + assert.Equal(t, result.Code, coreda.StatusError) + assert.Assert(t, result.Message != "") } func TestClient_GetDA(t *testing.T) { diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go new file mode 100644 index 000000000..5f5047338 --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever.go @@ -0,0 +1,177 @@ +package da + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/types" +) + +// ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. +var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + +// ForcedInclusionRetriever handles retrieval of forced inclusion transactions from DA. +type ForcedInclusionRetriever struct { + client Client + genesis genesis.Genesis + logger zerolog.Logger + daEpochSize uint64 +} + +// ForcedInclusionEvent contains forced inclusion transactions retrieved from DA. +type ForcedInclusionEvent struct { + StartDaHeight uint64 + EndDaHeight uint64 + Txs [][]byte +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever. +func NewForcedInclusionRetriever( + client Client, + genesis genesis.Genesis, + logger zerolog.Logger, +) *ForcedInclusionRetriever { + return &ForcedInclusionRetriever{ + client: client, + genesis: genesis, + logger: logger.With().Str("component", "forced_inclusion_retriever").Logger(), + daEpochSize: genesis.DAEpochForcedInclusion, + } +} + +// RetrieveForcedIncludedTxs retrieves forced inclusion transactions at the given DA height. +// It respects epoch boundaries and only fetches at epoch start. +func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + if !r.client.HasForcedInclusionNamespace() { + return nil, ErrForceInclusionNotConfigured + } + + epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + if daHeight != epochStart { + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("not at epoch start - returning empty transactions") + + return &ForcedInclusionEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil + } + + // We're at epoch start - fetch transactions from DA + currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + event := &ForcedInclusionEvent{ + StartDaHeight: epochStart, + Txs: [][]byte{}, + } + + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", currentEpochNumber). + Msg("retrieving forced included transactions from DA") + + epochStartResult := r.client.RetrieveForcedInclusion(ctx, epochStart) + if epochStartResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Msg("epoch start height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) + } + + epochEndResult := epochStartResult + if epochStart != epochEnd { + epochEndResult = r.client.RetrieveForcedInclusion(ctx, epochEnd) + if epochEndResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_end", epochEnd). + Msg("epoch end height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) + } + } + + lastProcessedHeight := epochStart + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochStartResult, epochStart); err != nil { + return nil, err + } + + // Process heights between start and end (exclusive) + for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { + result := r.client.RetrieveForcedInclusion(ctx, epochHeight) + + // If any intermediate height is from future, break early + if result.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_height", epochHeight). + Uint64("last_processed", lastProcessedHeight). + Msg("reached future DA height within epoch - stopping") + break + } + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, result, epochHeight); err != nil { + return nil, err + } + } + + // Process epoch end (only if different from start) + if epochEnd != epochStart { + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { + return nil, err + } + } + + event.EndDaHeight = lastProcessedHeight + + r.logger.Info(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", lastProcessedHeight). + Int("tx_count", len(event.Txs)). + Msg("retrieved forced inclusion transactions") + + return event, nil +} + +// processForcedInclusionBlobs processes blobs from a single DA height for forced inclusion. +func (r *ForcedInclusionRetriever) processForcedInclusionBlobs( + event *ForcedInclusionEvent, + lastProcessedHeight *uint64, + result coreda.ResultRetrieve, + height uint64, +) error { + if result.Code == coreda.StatusNotFound { + r.logger.Debug().Uint64("height", height).Msg("no forced inclusion blobs at height") + *lastProcessedHeight = height + return nil + } + + if result.Code != coreda.StatusSuccess { + return fmt.Errorf("failed to retrieve forced inclusion blobs at height %d: %s", height, result.Message) + } + + // Process each blob as a transaction + for _, blob := range result.Data { + if len(blob) > 0 { + event.Txs = append(event.Txs, blob) + } + } + + *lastProcessedHeight = height + + r.logger.Debug(). + Uint64("height", height). + Int("blob_count", len(result.Data)). + Msg("processed forced inclusion blobs") + + return nil +} diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go new file mode 100644 index 000000000..e58612573 --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -0,0 +1,344 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "gotest.tools/v3/assert" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" +) + +func TestNewForcedInclusionRetriever(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + assert.Assert(t, retriever != nil) + assert.Equal(t, retriever.daEpochSize, uint64(10)) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoNamespace(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + // No forced inclusion namespace + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not configured") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + // Height 105 is not an epoch start (100, 110, 120, etc. are epoch starts) + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 105) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(105)) + assert.Equal(t, event.EndDaHeight, uint64(105)) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t *testing.T) { + testBlobs := [][]byte{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return &coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + return testBlobs, nil + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + // Height 100 is an epoch start + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(100)) + assert.Equal(t, len(event.Txs), len(testBlobs)) + assert.DeepEqual(t, event.Txs[0], testBlobs[0]) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailable(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrHeightFromFuture + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not yet available") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrBlobNotFound + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t *testing.T) { + callCount := 0 + testBlobsByHeight := map[uint64][][]byte{ + 100: {[]byte("tx1"), []byte("tx2")}, + 101: {[]byte("tx3")}, + 102: {[]byte("tx4"), []byte("tx5"), []byte("tx6")}, + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + callCount++ + blobs, exists := testBlobsByHeight[height] + if !exists { + return nil, coreda.ErrBlobNotFound + } + ids := make([]coreda.ID, len(blobs)) + for i := range blobs { + ids[i] = []byte("id") + } + return &coreda.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + // Return blobs based on current call count + switch callCount { + case 1: + return testBlobsByHeight[100], nil + case 2: + return testBlobsByHeight[101], nil + case 3: + return testBlobsByHeight[102], nil + default: + return nil, errors.New("unexpected call") + } + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 3, // Epoch: 100-102 + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(102)) + + // Should have collected all txs from all heights + expectedTxCount := len(testBlobsByHeight[100]) + len(testBlobsByHeight[101]) + len(testBlobsByHeight[102]) + assert.Equal(t, len(event.Txs), expectedTxCount) +} + +func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + + tests := []struct { + name string + result coreda.ResultRetrieve + height uint64 + expectedTxCount int + expectedLastHeight uint64 + expectError bool + }{ + { + name: "success with blobs", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "not found", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + }, + }, + height: 100, + expectedTxCount: 0, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "error status", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "test error", + }, + }, + height: 100, + expectError: true, + }, + { + name: "empty blobs are skipped", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), {}, []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &ForcedInclusionEvent{ + Txs: [][]byte{}, + } + lastHeight := uint64(0) + + err := retriever.processForcedInclusionBlobs(event, &lastHeight, tt.result, tt.height) + + if tt.expectError { + assert.Assert(t, err != nil) + } else { + assert.NilError(t, err) + assert.Equal(t, len(event.Txs), tt.expectedTxCount) + assert.Equal(t, lastHeight, tt.expectedLastHeight) + } + }) + } +} diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 5578fe043..856932ce2 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -67,6 +68,8 @@ type Executor struct { // - State transitions and validation // - P2P broadcasting of produced blocks // - DA submission of headers and data +// +// When BasedSequencer is enabled, signer can be nil as blocks are not signed. func NewExecutor( store store.Store, exec coreexecutor.Executor, @@ -82,17 +85,20 @@ func NewExecutor( options common.BlockOptions, errorCh chan<- error, ) (*Executor, error) { - if signer == nil { - return nil, errors.New("signer cannot be nil") - } + // For based sequencer, signer is optional as blocks are not signed + if !config.Node.BasedSequencer { + if signer == nil { + return nil, errors.New("signer cannot be nil") + } - addr, err := signer.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address: %w", err) - } + addr, err := signer.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address: %w", err) + } - if !bytes.Equal(addr, genesis.ProposerAddress) { - return nil, common.ErrNotProposer + if !bytes.Equal(addr, genesis.ProposerAddress) { + return nil, common.ErrNotProposer + } } return &Executor{ @@ -204,6 +210,7 @@ func (e *Executor) initializeState() error { } e.setLastState(state) + e.sequencer.SetDAHeight(state.DAHeight) // Initialize store height using batch for atomicity batch, err := e.store.NewBatch(e.ctx) @@ -379,8 +386,12 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to apply block: %w", err) } + // set the DA height in the sequencer + newState.DAHeight = e.sequencer.GetDAHeight() + // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. + // For based sequencer, this will return an empty signature signature, err := e.signHeader(header.Header) if err != nil { return fmt.Errorf("failed to sign header: %w", err) @@ -440,8 +451,9 @@ func (e *Executor) produceBlock() error { // retrieveBatch gets the next batch of transactions from the sequencer func (e *Executor) retrieveBatch(ctx context.Context) (*BatchData, error) { req := coresequencer.GetNextBatchRequest{ - Id: []byte(e.genesis.ChainID), - MaxBytes: common.DefaultMaxBlobSize, + Id: []byte(e.genesis.ChainID), + MaxBytes: common.DefaultMaxBlobSize, + LastBatchData: [][]byte{}, // Can be populated if needed for sequencer context } res, err := e.sequencer.GetNextBatch(ctx, req) @@ -495,16 +507,28 @@ func (e *Executor) createBlock(ctx context.Context, height uint64, batchData *Ba lastSignature = *lastSignaturePtr } - // Get signer info - pubKey, err := e.signer.GetPublic() - if err != nil { - return nil, nil, fmt.Errorf("failed to get public key: %w", err) - } + // Get signer info and validator hash + var pubKey crypto.PubKey + var validatorHash types.Hash - // Get validator hash - validatorHash, err := e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) - if err != nil { - return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + if e.signer != nil { + var err error + pubKey, err = e.signer.GetPublic() + if err != nil { + return nil, nil, fmt.Errorf("failed to get public key: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } + } else { + // For based sequencer without signer, use nil pubkey and compute validator hash + var err error + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, nil) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } } // Create header @@ -586,6 +610,11 @@ func (e *Executor) applyBlock(ctx context.Context, header types.Header, data *ty // signHeader signs the block header func (e *Executor) signHeader(header types.Header) (types.Signature, error) { + // For based sequencer, return empty signature as there is no signer + if e.signer == nil { + return types.Signature{}, nil + } + bz, err := e.options.AggregatorNodeSignatureBytesProvider(&header) if err != nil { return nil, fmt.Errorf("failed to get signature payload: %w", err) diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index b72f0a856..a11cf6a1c 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -73,6 +73,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -91,6 +92,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // Direct call to produceBlock should work (this is what lazy timer does) err = exec.produceBlock() require.NoError(t, err) @@ -113,6 +116,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) @@ -183,6 +188,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -201,6 +207,8 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 9aa79d0c4..6029186e8 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -95,6 +95,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() // initialize state (creates genesis block in store and sets state) require.NoError(t, exec.initializeState()) @@ -113,6 +114,8 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // produce one block err = exec.produceBlock() require.NoError(t, err) @@ -180,6 +183,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return([]byte("i0"), uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -196,6 +200,8 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), []byte("i0")). Return([]byte("i1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + require.NoError(t, exec.produceBlock()) h1, err := memStore.Height(context.Background()) require.NoError(t, err) diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 3f0e8b500..14daccddc 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -73,6 +73,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) // Set up context for first executor @@ -92,6 +93,8 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -189,6 +192,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { require.NoError(t, err) // Initialize state for second executor (should load existing state) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) // Set up context for second executor @@ -206,7 +210,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), currentState2.AppHash). Return([]byte("new_root_2"), uint64(1024), nil).Once() - // Note: mockSeq2 should NOT receive any calls because pending block should be used + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + + // Note: mockSeq2 should NOT receive GetNextBatch calls because pending block should be used err = exec2.produceBlock() require.NoError(t, err) @@ -289,6 +295,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) exec1.ctx, exec1.cancel = context.WithCancel(context.Background()) @@ -307,6 +314,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -338,6 +347,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { ) require.NoError(t, err) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) exec2.ctx, exec2.cancel = context.WithCancel(context.Background()) defer exec2.cancel() @@ -360,6 +370,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec2.produceBlock() require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 5aa4394c4..3185ff6e7 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -3,6 +3,8 @@ package syncing import ( "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" "fmt" "sync" @@ -57,6 +59,7 @@ type Syncer struct { // Handlers daRetriever DARetriever + fiRetriever *da.ForcedInclusionRetriever p2pHandler p2pHandler // Logging @@ -115,6 +118,7 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) + s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -452,6 +456,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { switch { case errors.Is(err, errInvalidBlock): // do not reschedule + case errors.Is(err, errMaliciousProposer): + s.sendCriticalError(fmt.Errorf("sequencer malicious. Restart the node with --node.aggregator --node.based_sequencer or keep the chain halted: %w", err)) case errors.Is(err, errInvalidState): s.sendCriticalError(fmt.Errorf("invalid state detected (block-height %d, state-height %d) "+ "- block references do not match local state. Manual intervention required: %w", event.Header.Height(), @@ -517,6 +523,16 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { return err } + // Verify forced inclusion transactions if configured + if err := s.verifyForcedInclusionTxs(currentState, data); err != nil { + s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") + if errors.Is(err, errMaliciousProposer) { + s.cache.RemoveHeaderDAIncluded(headerHash) + return err + } + } + + // Apply block newState, err := s.applyBlock(header.Header, data, currentState) if err != nil { return fmt.Errorf("failed to apply block: %w", err) @@ -639,6 +655,70 @@ func (s *Syncer) validateBlock(currState types.State, data *types.Data, header * return nil } +var errMaliciousProposer = errors.New("malicious proposer detected") + +// hashTx returns a hex-encoded SHA256 hash of the transaction. +func hashTx(tx []byte) string { + hash := sha256.Sum256(tx) + return hex.EncodeToString(hash[:]) +} + +// verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block +func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { + if s.fiRetriever == nil { + return nil + } + + // Retrieve forced inclusion transactions from DA + forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(s.ctx, currentState.DAHeight) + if err != nil { + if errors.Is(err, da.ErrForceInclusionNotConfigured) { + s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") + return nil + } + + return fmt.Errorf("failed to retrieve forced included txs from DA: %w", err) + } + + // If no forced inclusion transactions found, nothing to verify + if len(forcedIncludedTxsEvent.Txs) == 0 { + s.logger.Debug().Uint64("da_height", currentState.DAHeight).Msg("no forced inclusion transactions to verify") + return nil + } + + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[hashTx(tx)] = struct{}{} + } + + // Check if all forced inclusion transactions are present in the block + var missingTxs [][]byte + for _, forcedTx := range forcedIncludedTxsEvent.Txs { + if _, ok := blockTxMap[hashTx(forcedTx)]; !ok { + missingTxs = append(missingTxs, forcedTx) + } + } + + if len(missingTxs) > 0 { + s.logger.Error(). + Uint64("height", data.Height()). + Uint64("da_height", currentState.DAHeight). + Uint64("da_epoch_start", forcedIncludedTxsEvent.StartDaHeight). + Uint64("da_epoch_end", forcedIncludedTxsEvent.EndDaHeight). + Int("missing_count", len(missingTxs)). + Int("total_forced", len(forcedIncludedTxsEvent.Txs)). + Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions missing from block") + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs))) + } + + s.logger.Debug(). + Uint64("height", data.Height()). + Int("forced_txs", len(forcedIncludedTxsEvent.Txs)). + Msg("all forced inclusion transactions verified in block") + + return nil +} + // sendCriticalError sends a critical error to the error channel without blocking func (s *Syncer) sendCriticalError(err error) { if s.errorCh != nil { diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go new file mode 100644 index 000000000..1948109d9 --- /dev/null +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -0,0 +1,428 @@ +package syncing + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/store" + testmocks "github.com/evstack/ev-node/test/mocks" + "github.com/evstack/ev-node/types" +) + +func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that includes the forced transaction blob + data := makeData(gen.ChainID, 1, 1) + data.Txs[0] = types.Tx(dataBin) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since all forced txs are included + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that does NOT include the forced transaction blob + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx([]byte("regular_tx_1")) + data.Txs[1] = types.Tx([]byte("regular_tx_2")) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since forced tx blob is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return two forced inclusion transaction blobs + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create two forced inclusion transaction blobs in DA + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2}, nil).Once() + + // Create block data that includes only one of the forced transaction blobs + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx(dataBin1) + data.Txs[1] = types.Tx([]byte("regular_tx")) + // dataBin2 is missing + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since dataBin2 is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return no forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since no forced txs to verify + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + // Leave ForcedInclusionNamespace empty + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + // No ForcedInclusionNamespace - not configured + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since namespace not configured + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} diff --git a/block/public.go b/block/public.go index f084f2757..4f1a7417e 100644 --- a/block/public.go +++ b/block/public.go @@ -1,12 +1,14 @@ package block import ( + "context" "time" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" "github.com/rs/zerolog" ) @@ -41,10 +43,32 @@ func NewDAClient( logger zerolog.Logger, ) DAClient { return da.NewClient(da.Config{ - DA: daLayer, - Logger: logger, - DefaultTimeout: 10 * time.Second, - Namespace: config.DA.GetNamespace(), - DataNamespace: config.DA.GetDataNamespace(), + DA: daLayer, + Logger: logger, + DefaultTimeout: 10 * time.Second, + Namespace: config.DA.GetNamespace(), + DataNamespace: config.DA.GetDataNamespace(), + ForcedInclusionNamespace: config.DA.GetForcedInclusionNamespace(), }) } + +// ErrForceInclusionNotConfigured is returned when force inclusion is not configured. +// It is exported because sequencers needs to check for this error. +var ErrForceInclusionNotConfigured = da.ErrForceInclusionNotConfigured + +// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA +type ForcedInclusionEvent = da.ForcedInclusionEvent + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever +func NewForcedInclusionRetriever( + client DAClient, + genesis genesis.Genesis, + logger zerolog.Logger, +) ForcedInclusionRetriever { + return da.NewForcedInclusionRetriever(client, genesis, logger) +} diff --git a/core/execution/execution.go b/core/execution/execution.go index 896e2d65a..5085ebe57 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -52,6 +52,7 @@ type Executor interface { // Requirements: // - Must validate state transition against previous state root // - Must handle empty transaction list + // - Must handle gracefully gibberish transactions // - Must maintain deterministic execution // - Must respect context cancellation/timeout // - The rest of the rules are defined by the specific execution layer diff --git a/core/sequencer/dummy.go b/core/sequencer/dummy.go index 5f44dae2a..ef614173a 100644 --- a/core/sequencer/dummy.go +++ b/core/sequencer/dummy.go @@ -64,3 +64,13 @@ func (s *DummySequencer) VerifyBatch(ctx context.Context, req VerifyBatchRequest Status: true, }, nil } + +// SetDAHeight sets the current DA height for the sequencer +func (s *DummySequencer) SetDAHeight(height uint64) { + // No-op for dummy sequencer +} + +// GetDAHeight returns the current DA height for the sequencer +func (s *DummySequencer) GetDAHeight() uint64 { + return 0 +} diff --git a/core/sequencer/sequencing.go b/core/sequencer/sequencing.go index 211681589..e97ef93dd 100644 --- a/core/sequencer/sequencing.go +++ b/core/sequencer/sequencing.go @@ -7,15 +7,15 @@ import ( "time" ) -// Sequencer is a generic interface for a sequencer +// Sequencer defines the minimal sequencing interface used by the block executor. type Sequencer interface { - // SubmitBatchTxs submits a batch of transactions to the sequencer + // SubmitBatchTxs submits a batch of transactions from executor to sequencer // Id is the unique identifier for the target chain // Batch is the batch of transactions to submit // returns an error if any from the sequencer SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - // GetNextBatch returns the next batch of transactions from sequencer to + // GetNextBatch returns the next batch of transactions from sequencer and from DA to // Id is the unique identifier for the target chain // LastBatchHash is the cryptographic hash of the last batch received by the // MaxBytes is the maximum number of bytes to return in the batch @@ -27,6 +27,13 @@ type Sequencer interface { // BatchHash is the cryptographic hash of the batch to verify // returns a boolean indicating if the batch is valid and an error if any from the sequencer VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) + + // SetDAHeight sets the current DA height for the sequencer + // This allows the sequencer to track DA height for forced inclusion retrieval + SetDAHeight(height uint64) + + // GetDAHeight returns the current DA height for the sequencer + GetDAHeight() uint64 } // Batch is a collection of transactions diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 378dd9b17..51e1be63c 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -4,435 +4,697 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. +- 2025-11-10: Updated to reflect actual implementation ## Context -Evolve currently supports a single sequencer implementation as described in ADR-013. While this approach provides a simple and efficient solution, it introduces a single point of failure that can impact the liveness of the network. If the sequencer goes down or becomes unresponsive, the chain cannot progress. +In a single-sequencer rollup architecture, users depend entirely on the sequencer to include their transactions in blocks. This creates several problems: -To address this limitation and improve the liveness properties of applications built with Evolve, we propose implementing a forced inclusion mechanism. This mechanism will allow transactions to be included directly from the Data Availability (DA) layer when the sequencer is unresponsive, creating an "unstoppable" property for Evolve-based chains. +1. **Censorship Risk**: A malicious or coerced sequencer can selectively exclude transactions +2. **Liveness Failure**: If the sequencer goes offline, no new transactions can be processed +3. **Centralization**: Users must trust a single entity to behave honestly +4. **No Recourse**: Users have no alternative path to submit transactions if the sequencer refuses them -This enhancement aligns with the requirements defined in the [L2 Beat framework](https://forum.l2beat.com/t/the-stages-framework/291#p-516-stage-1-requirements-3) for Stage 1 L2s, advancing Evolve's capabilities as a robust sequencer library. +While eventual solutions like decentralized sequencer networks exist, they introduce significant complexity. We need a simpler mechanism that provides censorship resistance and liveness guarantees while maintaining the performance benefits of a single sequencer. ## Alternative Approaches ### Decentralized Sequencer -A fully decentralized sequencer could solve the liveness issue by distributing sequencing responsibilities across multiple nodes. However, this approach introduces significant complexity in terms of consensus, leader election, and coordination between nodes. It would require substantial development effort and resources, making it less suitable as an immediate solution. +A fully decentralized sequencer network would eliminate single points of failure but requires: + +- Complex consensus mechanisms +- Increased latency due to coordination +- More infrastructure and operational complexity ### Automatic Sequencer Failover -Another approach would be to implement an automatic failover mechanism where backup sequencers take over when the primary sequencer fails. While simpler than a fully decentralized solution, this approach still requires managing multiple sequencers and introduces complexity in coordination and state transfer between them. +Implementing automatic failover to backup sequencers when the primary goes down requires: -## Decision +- Complex monitoring and health checks +- Coordination between sequencers to prevent forks +- Does not solve censorship issues with a malicious sequencer -We will implement a forced inclusion mechanism for the Evolve single sequencer architecture that uses a time-based inclusion delay approach. This approach will: +## Decision -1. Track when transactions are first seen in terms of DA block time -2. Require a minimum number of DA blocks to pass before including a direct transaction -3. Let full nodes enforce inclusion within a fixed period of time window +We implement a **forced inclusion mechanism** that allows users to submit transactions directly to the Data Availability (DA) layer. This approach provides: -The mechanism will be designed to maintain backward compatibility with existing Evolve deployments while providing enhanced liveness guarantees. +1. **Censorship Resistance**: Users can always bypass the sequencer by posting to DA +2. **Verifiable Inclusion**: Full nodes verify that sequencers include all forced transactions +3. **Based Rollup Option**: A based sequencer mode for fully DA-driven transaction ordering +4. **Simplicity**: No complex timing mechanisms or fallback modes ### High-Level Architecture -The following diagram illustrates the high-level architecture of the forced inclusion mechanism: - -```mermaid -flowchart TB - subgraph DAL["Data Availability Layer"] - end - - subgraph SEQ["Single Sequencer"] - subgraph NO["Normal Operation"] - direction TB - process["Process user txs"] - create["Create batches"] - include["Include direct txs from DA"] - checkDelay["Check MinDADelay"] - end - end - - subgraph FN["Full Nodes"] - subgraph NormalOp["Normal Operation"] - follow["Follow sequencer produced blocks"] - validate["Validate time windows"] - validateDelay["Validate MinDADelay"] - end - - subgraph FallbackMode["Fallback Mode"] - detect["Detect sequencer down"] - scan["Scan DA for direct txs"] - createBlocks["Create deterministic blocks from direct txs"] - end - end - - SEQ -->|"Publish Batches"| DAL - DAL -->|"Direct Txs"| SEQ - DAL -->|"Direct Txs"| FN - SEQ -->|"Blocks"| FN - NormalOp <--> FallbackMode +``` +┌─────────────────────────────────────────────────────────────────┐ +│ User Actions │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ Normal Path: Forced Inclusion Path: │ +│ Submit tx to Sequencer ────► Submit tx directly to DA │ +│ (Fast) (Censorship-resistant) │ +│ │ +└──────────┬────────────────────────────────────┬─────────────────┘ + │ │ + ▼ ▼ + ┌─────────────┐ ┌──────────────────┐ + │ Sequencer │ │ DA Layer │ + │ (Mempool) │ │ (Forced Inc. NS) │ + └──────┬──────┘ └─────────┬────────┘ + │ │ + │ 1. Fetch forced inc. txs │ + │◄────────────────────────────────────┘ + │ + │ 2. Prepend forced txs to batch + │ + ▼ + ┌─────────────┐ + │ Block │ + │ Production │ + └──────┬──────┘ + │ + │ 3. Submit block to DA + │ + ▼ + ┌─────────────┐ + │ DA Layer │ + └──────┬──────┘ + │ + │ 4. Full nodes retrieve block + │ + ▼ + ┌─────────────────────┐ + │ Full Nodes │ + │ (Verification) │ + │ │ + │ 5. Verify forced │ + │ inc. txs are │ + │ included │ + └─────────────────────┘ ``` +### Key Components + +1. **Forced Inclusion Namespace**: A dedicated DA namespace where users can post transactions +2. **DA Retriever**: Fetches forced inclusion transactions from DA using epoch-based scanning +3. **Single Sequencer**: Enhanced to include forced transactions from DA in every batch +4. **Based Sequencer**: Alternative sequencer that ONLY retrieves transactions from DA +5. **Verification**: Full nodes validate that blocks include all forced transactions + ## Detailed Design ### User Requirements -- Developers need a mechanism to ensure their chains can progress even when the single sequencer is unavailable -- The system should maintain a deterministic and consistent state regardless of sequencer availability -- The transition between sequencer-led and forced inclusion modes should be seamless -- Transactions must be included within a fixed time window from when they are first seen -- Direct transactions must wait for a minimum number of DA blocks before inclusion +Users can submit transactions in two ways: -### Systems Affected +1. **Normal Path**: Submit to sequencer's mempool/RPC (fast, low cost) +2. **Forced Inclusion Path**: Submit directly to DA forced inclusion namespace (censorship-resistant) -The implementation of the forced inclusion mechanism will affect several components of the Evolve framework: +No additional requirements or monitoring needed from users. -1. **Single Sequencer**: Must be modified to track and include direct transactions from the DA layer within the time window and after minimum DA block delay -2. **Full Node**: Must be updated to recognize and validate blocks with forced inclusions -3. **Block Processing Logic**: Must implement the modified fork choice rule -4. **DA Client**: Must be enhanced to scan for direct transactions -5. **Transaction Validation**: Must validate both sequencer-batched and direct transactions +### Systems Affected + +1. **DA Layer**: New namespace for forced inclusion transactions +2. **Sequencer (Single)**: Fetches and includes forced transactions +3. **Sequencer (Based)**: New sequencer type that only uses DA transactions +4. **DA Retriever**: New component for fetching forced transactions +5. **Syncer**: Verifies forced transaction inclusion in blocks +6. **Configuration**: New fields for forced inclusion settings ### Data Structures -#### Direct Transaction Tracking +#### Forced Inclusion Event ```go -type ForcedInclusionConfig struct { - MaxInclusionDelay uint64 // Max inclusion time in DA block time units - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type ForcedIncludedEvent struct { + Txs [][]byte // Forced inclusion transactions + StartDaHeight uint64 // Start of DA height range + EndDaHeight uint64 // End of DA height range } +``` + +#### DA Retriever Interface -type DirectTransaction struct { - TxHash common.Hash - FirstSeenAt uint64 // DA block time when the tx was seen - Included bool // Whether it has been included in a block - IncludedAt uint64 // Height at which it was included +```go +type DARetriever interface { + // Retrieve forced inclusion transactions from DA at specified height + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) } +``` -type DirectTxTracker struct { - txs map[common.Hash]DirectTransaction // Map of direct transactions - mu sync.RWMutex // Mutex for thread-safe access - latestSeenTime uint64 // Latest DA block time scanned - latestDAHeight uint64 // Latest DA block height +### APIs and Interfaces + +#### DA Retriever + +The DA Retriever component handles fetching forced inclusion transactions: + +```go +type daRetriever struct { + da coreda.DA + cache cache.CacheManager + genesis genesis.Genesis + logger zerolog.Logger + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool + daEpochSize uint64 } + +// RetrieveForcedIncludedTxsFromDA fetches forced inclusion transactions +// Only fetches at epoch boundaries to prevent redundant DA queries +func (r *daRetriever) RetrieveForcedIncludedTxsFromDA( + ctx context.Context, + daHeight uint64, +) (*ForcedIncludedEvent, error) ``` -#### Sequencer Status Tracking +#### Single Sequencer Extension + +The single sequencer is enhanced to fetch and include forced transactions: ```go -type SequencerStatus struct { - IsActive bool // Whether the sequencer is considered active - LastActiveTime uint64 // Last DA block time where sequencer posted a batch - InactiveTime uint64 // Time since last sequencer activity +type Sequencer struct { + // ... existing fields ... + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx + queue *BatchQueue +} + +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 +} + +func (s *Sequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + // 1. Fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight.Load()) + + // 2. Process forced txs with size validation and pending queue + forcedTxs := s.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + + // 3. Get batch from mempool queue + batch, err := s.queue.Next(ctx) + + // 4. Prepend forced txs and trim batch to fit MaxBytes + if len(forcedTxs) > 0 { + forcedTxsSize := calculateSize(forcedTxs) + remainingBytes := req.MaxBytes - forcedTxsSize + + // Trim batch transactions to fit + trimmedBatchTxs := trimToSize(batch.Transactions, remainingBytes) + + // Return excluded txs to front of queue + if len(trimmedBatchTxs) < len(batch.Transactions) { + excludedBatch := batch.Transactions[len(trimmedBatchTxs):] + s.queue.Prepend(ctx, Batch{Transactions: excludedBatch}) + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + } + + return &GetNextBatchResponse{Batch: batch} +} + +// processForcedInclusionTxs validates and queues forced txs +func (s *Sequencer) processForcedInclusionTxs(event *ForcedInclusionEvent, maxBytes uint64) [][]byte { + var validatedTxs [][]byte + var newPendingTxs []pendingForcedInclusionTx + currentSize := 0 + + // Process pending txs from previous epochs first + for _, pendingTx := range s.pendingForcedInclusionTxs { + if !ValidateBlobSize(pendingTx.Data) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(pendingTx.Data), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += len(pendingTx.Data) + } + + // Process new txs from this epoch + for _, tx := range event.Txs { + if !ValidateBlobSize(tx) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(tx), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + validatedTxs = append(validatedTxs, tx) + currentSize += len(tx) + } + + s.pendingForcedInclusionTxs = newPendingTxs + return validatedTxs } ``` -### APIs and Interfaces +#### Based Sequencer -#### Enhanced DA Client Interface +A new sequencer implementation that ONLY retrieves transactions from DA: ```go -type DAClient interface { - // Existing methods - // ... +type BasedSequencer struct { + fiRetriever ForcedInclusionRetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + mu sync.RWMutex + daHeight uint64 + txQueue [][]byte // Buffer for transactions exceeding batch size +} + +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + + + // Always fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight) + if err != nil && !errors.Is(err, ErrHeightFromFuture) { + return nil, err + } - // New method for forced inclusion - GetDirectTransactions(ctx context.Context, fromTime, toTime uint64) ([][]byte, error) - // Note: SubmitDirectTransaction is removed as it's not a responsibility of the node + // Validate and add transactions to queue + for _, tx := range forcedEvent.Txs { + if ValidateBlobSize(tx) { + s.txQueue = append(s.txQueue, tx) + } + } + + // Create batch from queue respecting MaxBytes + batch := s.createBatchFromQueue(req.MaxBytes) + + return &GetNextBatchResponse{Batch: batch} +} + +// SubmitBatchTxs is a no-op for based sequencer +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) { + // Based sequencer ignores submitted transactions + return &SubmitBatchTxsResponse{}, nil } ``` -#### Sequencer Interface Extensions +#### Syncer Verification + +Full nodes verify forced inclusion in the sync process: ```go -// New methods added to the Sequencer interface -func (s *Sequencer) ScanDALayerForDirectTxs(ctx context.Context) error -func (s *Sequencer) IncludeDirectTransactions(ctx context.Context, batch *Batch) error +func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { + // 1. Retrieve forced inclusion transactions from DA + forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) + if err != nil { + return err + } + + // 2. Build map of transactions in block + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[string(tx)] = struct{}{} + } + + // 3. Verify all forced transactions are included + for _, forcedTx := range forcedEvent.Txs { + if _, ok := blockTxMap[string(forcedTx)]; !ok { + return errMaliciousProposer + } + } + + return nil +} ``` -#### Full Node Interface Extensions +### Implementation Details + +#### Epoch-Based Fetching + +To avoid excessive DA queries, the DA Retriever uses epoch-based fetching: + +- **Epoch Size**: Configurable number of DA blocks (e.g., 10) +- **Epoch Boundaries**: Deterministically calculated based on `DAStartHeight` +- **Fetch Timing**: Only fetch at epoch start to prevent duplicate fetches ```go -// New methods added to the Node interface -func (n *Node) CheckSequencerStatus(ctx context.Context) (bool, error) -func (n *Node) ProcessDirectTransactions(ctx context.Context) error -func (n *Node) ValidateBlockTimeWindow(ctx context.Context, block *types.Block) error +// Calculate epoch boundaries +func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { + epochNum := r.calculateEpochNumber(daHeight) + start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize + end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 + return start, end +} + +// Only fetch at epoch start +if daHeight != epochStart { + return &ForcedIncludedEvent{Txs: [][]byte{}} +} + +// Fetch all heights in epoch range +for height := epochStart; height <= epochEnd; height++ { + // Fetch forced inclusion blobs from this DA height +} ``` -### Implementation Changes - -#### Single Sequencer Node Changes - -1. **DA Layer Scanner**: - - Implement a periodic scanner that queries the DA layer for direct transactions - - Track all direct transactions in the DirectTxTracker data structure - - Update the latest seen DA block time and height after each scan - -2. **Transaction Inclusion Logic**: - - Modify the batch creation process to include direct transactions from the DA layer - - Ensure all direct transactions are included within the MaxInclusionDelay time window - - Check that transactions have waited for MinDADelay DA blocks - - Track transaction inclusion times and enforce both delay constraints - -3. **Validation Rules**: - - Implement time window validation to ensure transactions are included within MaxInclusionDelay - - Implement DA block delay validation to ensure transactions wait for MinDADelay blocks - - Track both time-based and DA block-based delays for each transaction - -4. **Recovery Mechanism**: - - Add logic to detect when the sequencer comes back online after downtime - - Implement state synchronization to catch up with any forced inclusions that occurred during downtime - - Resume normal operation by building on top of the canonical chain tip - -#### Sequencer Operation Flow - -The following diagram illustrates the operation flow for the sequencer with forced inclusion: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Sequencer Operation Flow │ -└─────────────────┬───────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Process User Transactions │ │ 2. Periodic DA Layer Scanning │ -│ │ │ │ -│ - Accept transactions from users│ │ - Query DA layer for direct txs │ -│ - Validate and queue txs │ │ - Update DirectTxTracker │ -│ - Process queue based on policy │ │ - Track latest seen DA block time │ -└─────────────────┬───────────────┘ └────────────────────┬───────────────────┘ - │ │ - ▼ ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 3. Batch Creation │ │ 4. Direct Transaction Inclusion │ -│ │ │ │ -│ - Create batch of txs │◄─────┤ - Include unprocessed direct txs │ -│ - Apply ordering policy │ │ - Prioritize by first seen │ -│ - Calculate batch metadata │ │ - Mark included txs as processed │ -└─────────────────┬───────────────┘ └────────────────────────────────────────┘ - │ - ▼ -┌──────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 5. Time Window Validation │ │ 6. Block Production │ -│ │ │ │ -│ - Check transaction timestamps │ │ - Create block with batch │ -│ - Ensure within MaxInclusionDelay│─────►│ - Sign and publish block │ -│ - Track inclusion times │ │ │ -└──────────────────────────────────┘ └─────────────────┬──────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ 7. DA Batch Submission │ - │ │ - │ - Submit batch to DA layer │ - │ - Track submission status │ - │ - Handle retry on failure │ - └────────────────────────────────────────┘ +#### Height From Future Handling + +When DA height is not yet available: + +```go +if errors.Is(err, coreda.ErrHeightFromFuture) { + // Keep current DA height, return empty batch + // Retry same height on next call + return &ForcedIncludedEvent{Txs: [][]byte{}}, nil +} ``` -#### Full Node Operation Flow - -The following diagram illustrates the operation flow for full nodes with forced inclusion support: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Full Node Operation Flow │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Normal Operation Mode │ │ 2. Sequencer Status Monitoring │ -│ │ │ │ -│ - Receive blocks from sequencer │ │ - Monitor sequencer activity on DA │ -│ - Validate time windows │◄───►│ - Track time since last sequencer batch│ -│ - Apply state transitions │ │ - Check against downtime threshold │ -└─────────────────────────────────┘ └───────────────────┬────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ Is Sequencer Down? │ - │ (Based on configurable threshold) │ - └───────────┬───────────────┬────────────┘ - │ │ - │ Yes │ No - ▼ │ - ┌────────────────────────┐ │ - │ 3. Enter Fallback Mode │ │ - │ │ │ - │ - Switch to direct tx │ │ - │ processing │ │ - │ - Notify subsystems │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 4. DA Layer Scanning │ │ - │ │ │ - │ - Scan DA for direct │ │ - │ transactions │ │ - │ - Track latest seen │ │ - │ DA block time │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 5. Deterministic Block │ │ - │ Creation │ │ - │ │ │ - │ - Create blocks with │ │ - │ direct txs only │ │ - │ - Apply deterministic │ │ - │ ordering rules │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ 6. Block Processing and State Update │ -│ │ -│ - Execute transactions │ -│ - Update state │ -│ - Persist blocks and state │ -└─────────────────────────────────────────────────────────────────────────────────┘ +#### Size Validation and Max Bytes Handling + +Both sequencers enforce strict size limits to prevent DoS and ensure batches never exceed the DA layer's limits: + +```go +// Size validation utilities +const AbsoluteMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB DA layer limit + +// ValidateBlobSize checks against absolute DA layer limit +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks against per-batch limit +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} ``` -### Fallback Mode Transition - -The following diagram illustrates the transition between normal operation and fallback mode: - -```mermaid -sequenceDiagram - participant DA as Data Availability Layer - participant S as Sequencer - participant R as Chain - - Note over S,R: Normal Operation - DA->>S: DA Block N - S->>R: Sequencer Block N - DA->>S: DA Block N+1 - S->>R: Sequencer Block N+1 - DA->>S: DA Block N+2 - S->>R: Sequencer Block N+2 - - Note over S,R: Sequencer Down - DA->>R: DA Block N+3 (Direct Txs) - Note over R: Fallback Mode Start - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+4 (Direct Txs) - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+5 (Direct Txs) - R->>R: Create Block from Direct Txs - - Note over S,R: Sequencer Back Online - DA->>S: DA Block N+6 - S->>R: Sequencer Block N+6 - DA->>S: DA Block N+7 - S->>R: Sequencer Block N+7 - - Note over R: Timeline shows: - Note over R: 1. Normal sequencer operation - Note over R: 2. Sequencer downtime & fallback - Note over R: 3. Sequencer recovery +**Key Behaviors**: + +- **Absolute validation**: Blobs exceeding 1.5MB are permanently rejected +- **Batch size limits**: `req.MaxBytes` is NEVER exceeded in any batch +- **Transaction preservation**: + - Single sequencer: Trimmed batch txs returned to queue via `Prepend()` + - Based sequencer: Excess txs remain in `txQueue` for next batch + - Forced txs that don't fit go to `pendingForcedInclusionTxs` (single) or stay in `txQueue` (based) + +#### Transaction Queue Management + +The based sequencer uses a simplified queue to handle transactions: + +```go +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + // Always respect maxBytes, even for first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, keep remaining in queue + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // Clear queue if we processed everything + if i == len(s.txQueue)-1 { + s.txQueue = s.txQueue[:0] + } + } + + return &Batch{Transactions: batch} +} ``` -### Configuration +**Note**: The based sequencer is simpler than the single sequencer - it doesn't need a separate pending queue because `txQueue` naturally handles all transaction buffering. -The forced inclusion mechanism will be configurable with the following parameters: +### Configuration ```go -type ForcedInclusionConfig struct { - Enabled bool // Whether forced inclusion is enabled - MaxInclusionDelay uint64 // Maximum time window for transaction inclusion - SequencerDownTime uint64 // Time after which the sequencer is considered down - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type Genesis struct { + ChainID string + StartTime time.Time + InitialHeight uint64 + ProposerAddress []byte + DAStartHeight uint64 + // Number of DA blocks to scan per forced inclusion fetch + // Higher values reduce DA queries but increase latency + // Lower values increase DA queries but improve responsiveness + DAEpochForcedInclusion uint64 +} + +type DAConfig struct { + // ... existing fields ... + + // Namespace for forced inclusion transactions + ForcedInclusionNamespace string +} + +type NodeConfig struct { + // ... existing fields ... + + // Run node with based sequencer (requires aggregator mode) + BasedSequencer bool } ``` +### Configuration Examples + +#### Traditional Sequencer with Forced Inclusion + +```yaml +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 10 # Scan 10 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = false # Use traditional sequencer +``` + +#### Based Sequencer (DA-Only) + +```yaml +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 5 # Scan 5 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = true # Use based sequencer +``` + +### Sequencer Operation Flows + +#### Single Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Fetch forced inclusion txs from DA (via DA Retriever) + - Only at epoch boundaries + - Scan epoch range for forced transactions +3. Get batch from mempool queue +4. Prepend forced txs to batch +5. Return batch for block production +``` + +#### Based Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Check transaction queue for buffered txs +3. If queue empty or epoch boundary: + - Fetch forced inclusion txs from DA + - Add to queue +4. Create batch from queue (respecting MaxBytes) +5. Return batch for block production +``` + +### Full Node Verification Flow + +``` +1. Receive block from DA or P2P +2. Before applying block: + a. Fetch forced inclusion txs from DA at block's DA height + b. Build map of transactions in block + c. Verify all forced txs are in block + d. If missing: reject block, flag malicious proposer +3. Apply block if verification passes +``` + ### Efficiency Considerations -- DA layer scanning is integrated into the core block processing pipeline for continuous monitoring -- Direct transactions are indexed by hash for quick lookups -- The sequencer status is tracked by DA block time rather than block heights -- Time-based tracking simplifies the implementation and reduces overhead -- DA block height tracking adds minimal overhead to existing block processing +1. **Epoch-Based Fetching**: Reduces DA queries by batching multiple DA heights +2. **Deterministic Epochs**: All nodes calculate same epoch boundaries +3. **Fetch at Epoch Start**: Prevents duplicate fetches as DA height progresses +4. **Transaction Queue**: Buffers excess transactions across multiple blocks +5. **Conditional Fetching**: Only when forced inclusion namespace is configured +6. **Size Pre-validation**: Invalid blobs rejected early, before batch construction +7. **Efficient Queue Operations**: + - Single sequencer: `Prepend()` reuses space before head position + - Based sequencer: Simple slice operations for queue management + +**DA Query Frequency**: + +Every `DAEpochForcedInclusion` DA blocks ### Security Considerations -- The mechanism ensures that only valid direct transactions can be included in the chain -- Time window validation prevents delayed inclusion of transactions -- The configurable time threshold prevents premature switching to fallback mode due to temporary sequencer issues -- All transactions, whether sequencer-batched or direct, undergo the same validation rules -- MinDADelay provides protection against DA layer censorship by requiring multiple block proposers to collude -- Block-based delay prevents single block proposer censorship by ensuring transactions must be visible across multiple DA layer blocks -- The delay mechanism is inspired by the "Based Sequencing with Soft Confirmations" design from [Sovereign SDK #408](https://github.com/Sovereign-Labs/sovereign-sdk/issues/408), which uses deferred execution to prevent DA layer block proposers from censoring transactions +1. **Malicious Proposer Detection**: Full nodes reject blocks missing forced transactions +2. **No Timing Attacks**: Epoch boundaries are deterministic, no time-based logic +3. **Blob Size Limits**: Two-tier size validation prevents DoS + - Absolute limit (1.5MB): Blobs exceeding this are permanently rejected + - Batch limit (`MaxBytes`): Ensures no batch exceeds DA submission limits +4. **Graceful Degradation**: Continues operation if forced inclusion not configured +5. **Height Validation**: Handles "height from future" errors without state corruption +6. **Transaction Preservation**: No valid transactions are lost due to size constraints +7. **Strict MaxBytes Enforcement**: Batches NEVER exceed `req.MaxBytes`, preventing DA layer rejections -### Privacy Considerations +**Attack Vectors**: -- Direct transactions posted to the DA layer are publicly visible, just like sequencer-batched transactions -- No additional privacy concerns are introduced beyond the existing model +- **Censorship**: Mitigated by forced inclusion verification +- **DA Spam**: Limited by DA layer's native spam protection and two-tier blob size limits +- **Block Withholding**: Full nodes can fetch and verify from DA independently +- **Oversized Batches**: Prevented by strict size validation at multiple levels ### Testing Strategy -1. **Unit Tests**: - - Test individual components of the forced inclusion mechanism - - Verify time window validation logic - - Test the DA scanner functionality - - Test transaction inclusion timing constraints - - Test MinDADelay validation - -2. **Integration Tests**: - - Test the interaction between the sequencer and the DA layer - - Verify correct inclusion of direct transactions within time windows - - Test DA block delay validation - - Verify both time and block delay constraints - -3. **End-to-End Tests**: - - Simulate sequencer downtime and verify chain progression - - Test the transition between normal and fallback modes - - Verify the sequencer's recovery process after downtime - - Test transaction inclusion with various delay configurations - -4. **Performance Testing**: - - Measure the overhead introduced by the DA scanner - - Benchmark the system's performance in fallback mode - - Evaluate the impact of time-based tracking - - Measure the performance impact of DA block delay validation +#### Unit Tests + +1. **DA Retriever**: + - Epoch boundary calculations + - Height from future handling + - Blob size validation + - Empty epoch handling + +2. **Size Validation**: + - Individual blob size validation (absolute limit) + - Cumulative size checking (batch limit) + - Edge cases (empty blobs, exact limits, exceeding limits) + +3. **Single Sequencer**: + - Forced transaction prepending with size constraints + - Batch trimming when forced + batch exceeds MaxBytes + - Trimmed transactions returned to queue via Prepend + - Pending forced inclusion queue management + - DA height tracking + - Error handling + +4. **BatchQueue**: + - Prepend operation (empty queue, with items, after consuming) + - Multiple prepends (LIFO ordering) + - Space reuse before head position + +5. **Based Sequencer**: + - Queue management with size validation + - Batch size limits strictly enforced + - Transaction buffering across batches + - DA-only operation + - Always checking for new forced txs + +6. **Syncer Verification**: + - All forced txs included (pass) + - Missing forced txs (fail) + - No forced txs (pass) + +#### Integration Tests + +1. **Single Sequencer Integration**: + - Submit to mempool and forced inclusion + - Verify both included in block + - Forced txs appear first + +2. **Based Sequencer Integration**: + - Submit only to DA forced inclusion + - Verify block production + - Mempool submissions ignored + +3. **Verification Flow**: + - Full node rejects block missing forced tx + - Full node accepts block with all forced txs + +#### End-to-End Tests + +1. **User Flow**: + - User submits tx to forced inclusion namespace + - Sequencer includes tx in next epoch + - Full nodes verify inclusion + +2. **Based Rollup**: + - Start network with based sequencer + - Submit transactions to DA + - Verify block production and finalization + +3. **Censorship Resistance**: + - Sequencer ignores specific transaction + - User submits to forced inclusion + - Transaction included in next epoch + - Attempting to exclude causes block rejection ### Breaking Changes -This enhancement introduces no breaking changes to the existing API or data structures. It extends the current functionality by implementing time-based transaction tracking and inclusion rules, along with DA block-based delay validation, without modifying the core interfaces that developers interact with. +1. **Sequencer Initialization**: Requires `DARetriever` and `Genesis` parameters +2. **Configuration**: New fields in `DAConfig` and `NodeConfig` +3. **Syncer**: New verification step in block processing + +**Migration Path**: + +- Forced inclusion is optional (enabled when namespace configured) +- Existing deployments work without configuration changes +- Can enable incrementally per network ## Status -Proposed +Accepted and Implemented ## Consequences ### Positive -- Improves the liveness guarantees of Evolve-based chains -- Provides a path for Evolve to meet Stage 1 L2 requirements per the L2 Beat framework -- Creates an "unstoppable" property for applications, enhancing their reliability -- Maintains a deterministic chain state regardless of sequencer availability -- More predictable deadlines in DA time -- Easier to reason about for users and developers -- Prevents DA layer censorship by requiring multiple block proposers to collude +1. **Censorship Resistance**: Users have guaranteed path to include transactions +2. **Verifiable**: Full nodes enforce forced inclusion, detecting malicious sequencers +3. **Simple Design**: No complex timing mechanisms or fallback modes +4. **Based Rollup Option**: Fully DA-driven transaction ordering available (simplified implementation) +5. **Optional**: Forced inclusion can be disabled for permissioned deployments +6. **Efficient**: Epoch-based fetching minimizes DA queries +7. **Flexible**: Configurable epoch size allows tuning latency vs efficiency +8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections +9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost +10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures ### Negative -- Adds complexity to the block processing and validation logic -- Introduces overhead from scanning the DA layer for direct transactions -- Could potentially slow block production during fallback mode -- May need careful tuning of time window parameters -- Could be affected by variations in block production rate -- Additional complexity from tracking DA block heights for delay validation +1. **Increased Latency**: Forced transactions subject to epoch boundaries +2. **DA Dependency**: Requires DA layer to support multiple namespaces +3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion +4. **Additional Complexity**: New component (DA Retriever) and verification logic +5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) ### Neutral -- Requires application developers to consider both sequencer-batched and direct transaction flows -- Introduces configuration options that developers need to understand and set appropriately -- Changes the mental model of how the chain progresses, from purely sequencer-driven to a hybrid approach -- Users will need to use external tools or services to submit direct transactions to the DA layer during sequencer downtime +1. **Two Sequencer Types**: Choice between single (hybrid) and based (DA-only) +2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path +3. **Monitoring**: Operators should monitor forced inclusion namespace usage +4. **Documentation**: Users need guidance on when to use forced inclusion +5. **Genesis Parameter**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis ## References diff --git a/go.mod b/go.mod index e1495d2ec..81a458d74 100644 --- a/go.mod +++ b/go.mod @@ -84,7 +84,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -163,3 +163,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) + +replace github.com/evstack/ev-node/core => ./core diff --git a/go.sum b/go.sum index 120f8e51d..91a7b5f72 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.5 h1:lgxE8XiF3U9pcFgh7xuKMgsOGvLBGRyd9kc9MR4WL0o= -github.com/evstack/ev-node/core v1.0.0-beta.5/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -229,8 +227,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index 30f848982..4dbc87687 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -26,8 +26,6 @@ import ( "github.com/evstack/ev-node/pkg/signer/file" ) -const DefaultMaxBlobSize = 2 * 1024 * 1024 // 2MB - // ParseConfig is an helpers that loads the node configuration and validates it. func ParseConfig(cmd *cobra.Command) (rollconf.Config, error) { nodeConfig, err := rollconf.Load(cmd) @@ -93,7 +91,7 @@ func StartNode( // create a new remote signer var signer signer.Signer - if nodeConfig.Signer.SignerType == "file" && nodeConfig.Node.Aggregator { + if nodeConfig.Signer.SignerType == "file" && (nodeConfig.Node.Aggregator && !nodeConfig.Node.BasedSequencer) { // Get passphrase file path passphraseFile, err := cmd.Flags().GetString(rollconf.FlagSignerPassphraseFile) if err != nil { diff --git a/pkg/config/config.go b/pkg/config/config.go index d6b1f1553..48e2ba11e 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -33,6 +33,8 @@ const ( // FlagAggregator is a flag for running node in aggregator mode FlagAggregator = FlagPrefixEvnode + "node.aggregator" + // FlagBasedSequencer is a flag for enabling based sequencer mode (requires aggregator mode) + FlagBasedSequencer = FlagPrefixEvnode + "node.based_sequencer" // FlagLight is a flag for running the node in light mode FlagLight = FlagPrefixEvnode + "node.light" // FlagBlockTime is a flag for specifying the block time @@ -62,6 +64,8 @@ const ( FlagDANamespace = FlagPrefixEvnode + "da.namespace" // FlagDADataNamespace is a flag for specifying the DA data namespace ID FlagDADataNamespace = FlagPrefixEvnode + "da.data_namespace" + // FlagDAForcedInclusionNamespace is a flag for specifying the DA forced inclusion namespace ID + FlagDAForcedInclusionNamespace = FlagPrefixEvnode + "da.forced_inclusion_namespace" // FlagDASubmitOptions is a flag for data availability submit options FlagDASubmitOptions = FlagPrefixEvnode + "da.submit_options" // FlagDASigningAddresses is a flag for specifying multiple DA signing addresses @@ -153,15 +157,16 @@ type Config struct { // DAConfig contains all Data Availability configuration parameters type DAConfig struct { - Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` - AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` - SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` - SigningAddresses []string `mapstructure:"signing_addresses" yaml:"signing_addresses" comment:"List of addresses to use for DA submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches. Useful for high-throughput chains."` - Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` - DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` - BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` - MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` - MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` + Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` + AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` + SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` + SigningAddresses []string `mapstructure:"signing_addresses" yaml:"signing_addresses" comment:"List of addresses to use for DA submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches. Useful for high-throughput chains."` + Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` + DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` + ForcedInclusionNamespace string `mapstructure:"forced_inclusion_namespace" yaml:"forced_inclusion_namespace" comment:"Namespace ID for forced inclusion transactions on the DA layer."` + BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` + MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` + MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` } // GetNamespace returns the namespace for header submissions. @@ -178,11 +183,17 @@ func (d *DAConfig) GetDataNamespace() string { return d.GetNamespace() } +// GetForcedInclusionNamespace returns the namespace for forced inclusion transactions +func (d *DAConfig) GetForcedInclusionNamespace() string { + return d.ForcedInclusionNamespace +} + // NodeConfig contains all Rollkit specific configuration parameters type NodeConfig struct { // Node mode configuration - Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` - Light bool `yaml:"light" comment:"Run node in light mode"` + Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` + BasedSequencer bool `yaml:"based_sequencer" comment:"Run node with based sequencer (fetches transactions only from DA forced inclusion namespace). Requires aggregator mode to be enabled."` + Light bool `yaml:"light" comment:"Run node in light mode"` // Block management configuration BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Block time (duration). Examples: \"500ms\", \"1s\", \"5s\", \"1m\", \"2m30s\", \"10m\"."` @@ -234,6 +245,11 @@ func (c *Config) Validate() error { return fmt.Errorf("could not create directory %q: %w", fullDir, err) } + // Validate based sequencer requires aggregator mode + if c.Node.BasedSequencer && !c.Node.Aggregator { + return fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + // Validate namespaces if err := validateNamespace(c.DA.GetNamespace()); err != nil { return fmt.Errorf("could not validate namespace (%s): %w", c.DA.GetNamespace(), err) @@ -245,6 +261,14 @@ func (c *Config) Validate() error { } } + if len(c.DA.GetForcedInclusionNamespace()) > 0 { + // if err := validateNamespace(c.DA.GetForcedInclusionNamespace()); err != nil { + // return fmt.Errorf("could not validate forced inclusion namespace (%s): %w", c.DA.GetForcedInclusionNamespace(), err) + // } + return fmt.Errorf("forced inclusion is not yet live") + + } + // Validate lazy mode configuration if c.Node.LazyMode && c.Node.LazyBlockInterval.Duration <= c.Node.BlockTime.Duration { return fmt.Errorf("LazyBlockInterval (%v) must be greater than BlockTime (%v) in lazy mode", @@ -301,8 +325,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Bool(FlagClearCache, def.ClearCache, "clear the cache") // Node configuration flags - cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node in aggregator mode") - cmd.Flags().Bool(FlagLight, def.Node.Light, "run light client") + cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node as an aggregator") + cmd.Flags().Bool(FlagBasedSequencer, def.Node.BasedSequencer, "run node with based sequencer (requires aggregator mode)") + cmd.Flags().Bool(FlagLight, def.Node.Light, "run node in light mode") cmd.Flags().Duration(FlagBlockTime, def.Node.BlockTime.Duration, "block time (for aggregator mode)") cmd.Flags().Bool(FlagLazyAggregator, def.Node.LazyMode, "produce blocks only when transactions are available or after lazy block time") cmd.Flags().Uint64(FlagMaxPendingHeadersAndData, def.Node.MaxPendingHeadersAndData, "maximum headers or data pending DA confirmation before pausing block production (0 for no limit)") @@ -316,6 +341,7 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Duration(FlagDABlockTime, def.DA.BlockTime.Duration, "DA chain block time (for syncing)") cmd.Flags().String(FlagDANamespace, def.DA.Namespace, "DA namespace for header (or blob) submissions") cmd.Flags().String(FlagDADataNamespace, def.DA.DataNamespace, "DA namespace for data submissions") + cmd.Flags().String(FlagDAForcedInclusionNamespace, def.DA.ForcedInclusionNamespace, "DA namespace for forced inclusion transactions") cmd.Flags().String(FlagDASubmitOptions, def.DA.SubmitOptions, "DA submit options") cmd.Flags().StringSlice(FlagDASigningAddresses, def.DA.SigningAddresses, "Comma-separated list of addresses for DA submissions (used in round-robin)") cmd.Flags().Uint64(FlagDAMempoolTTL, def.DA.MempoolTTL, "number of DA blocks until transaction is dropped from the mempool") @@ -343,6 +369,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().String(FlagSignerType, def.Signer.SignerType, "type of signer to use (file, grpc)") cmd.Flags().String(FlagSignerPath, def.Signer.SignerPath, "path to the signer file or address") cmd.Flags().String(FlagSignerPassphraseFile, "", "path to file containing the signer passphrase (required for file signer and if aggregator is enabled)") + + // flag constraints + cmd.MarkFlagsMutuallyExclusive(FlagLight, FlagAggregator) } // Load loads the node configuration in the following order of precedence: diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 7834e42aa..d58c3348b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -50,9 +50,11 @@ func TestAddFlags(t *testing.T) { // Test specific flags assertFlagValue(t, flags, FlagDBPath, DefaultConfig().DBPath) + assertFlagValue(t, flags, FlagClearCache, DefaultConfig().ClearCache) // Node flags assertFlagValue(t, flags, FlagAggregator, DefaultConfig().Node.Aggregator) + assertFlagValue(t, flags, FlagBasedSequencer, DefaultConfig().Node.BasedSequencer) assertFlagValue(t, flags, FlagLight, DefaultConfig().Node.Light) assertFlagValue(t, flags, FlagBlockTime, DefaultConfig().Node.BlockTime.Duration) assertFlagValue(t, flags, FlagLazyAggregator, DefaultConfig().Node.LazyMode) @@ -66,6 +68,8 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagDAAuthToken, DefaultConfig().DA.AuthToken) assertFlagValue(t, flags, FlagDABlockTime, DefaultConfig().DA.BlockTime.Duration) assertFlagValue(t, flags, FlagDANamespace, DefaultConfig().DA.Namespace) + assertFlagValue(t, flags, FlagDADataNamespace, DefaultConfig().DA.DataNamespace) + assertFlagValue(t, flags, FlagDAForcedInclusionNamespace, DefaultConfig().DA.ForcedInclusionNamespace) assertFlagValue(t, flags, FlagDASubmitOptions, DefaultConfig().DA.SubmitOptions) assertFlagValue(t, flags, FlagDASigningAddresses, DefaultConfig().DA.SigningAddresses) assertFlagValue(t, flags, FlagDAMempoolTTL, DefaultConfig().DA.MempoolTTL) @@ -89,6 +93,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, persistentFlags, FlagLogLevel, DefaultConfig().Log.Level) assertFlagValue(t, persistentFlags, FlagLogFormat, "text") assertFlagValue(t, persistentFlags, FlagLogTrace, false) + assertFlagValue(t, persistentFlags, FlagRootDir, DefaultRootDirWithName("test")) // Signer flags assertFlagValue(t, flags, FlagSignerPassphraseFile, "") @@ -97,9 +102,10 @@ func TestAddFlags(t *testing.T) { // RPC flags assertFlagValue(t, flags, FlagRPCAddress, DefaultConfig().RPC.Address) + assertFlagValue(t, flags, FlagRPCEnableDAVisualization, DefaultConfig().RPC.EnableDAVisualization) // Count the number of flags we're explicitly checking - expectedFlagCount := 37 // Update this number if you add more flag checks above + expectedFlagCount := 43 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 @@ -368,3 +374,57 @@ func assertFlagValue(t *testing.T, flags *pflag.FlagSet, name string, expectedVa } } } + +func TestBasedSequencerValidation(t *testing.T) { + tests := []struct { + name string + aggregator bool + basedSeq bool + expectError bool + errorMsg string + }{ + { + name: "based sequencer without aggregator should fail", + aggregator: false, + basedSeq: true, + expectError: true, + errorMsg: "based sequencer mode requires aggregator mode to be enabled", + }, + { + name: "based sequencer with aggregator should pass", + aggregator: true, + basedSeq: true, + expectError: false, + }, + { + name: "aggregator without based sequencer should pass", + aggregator: true, + basedSeq: false, + expectError: false, + }, + { + name: "neither aggregator nor based sequencer should pass", + aggregator: false, + basedSeq: false, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := DefaultConfig() + cfg.RootDir = t.TempDir() + cfg.Node.Aggregator = tt.aggregator + cfg.Node.BasedSequencer = tt.basedSeq + + err := cfg.Validate() + + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 6a6f813a3..d31d2acb8 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -70,11 +70,12 @@ func DefaultConfig() Config { ReadinessMaxBlocksBehind: calculateReadinessMaxBlocksBehind(defaultBlockTime.Duration, defaultReadinessWindowSeconds), }, DA: DAConfig{ - Address: "http://localhost:7980", - BlockTime: DurationWrapper{6 * time.Second}, - MaxSubmitAttempts: 30, - Namespace: randString(10), - DataNamespace: "", + Address: "http://localhost:7980", + BlockTime: DurationWrapper{6 * time.Second}, + MaxSubmitAttempts: 30, + Namespace: randString(10), + DataNamespace: "", + ForcedInclusionNamespace: "", }, Instrumentation: DefaultInstrumentationConfig(), Log: LogConfig{ diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 1fae14643..65cbed173 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -11,11 +11,12 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - DAStartHeight uint64 `json:"da_start_height"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + DAStartHeight uint64 `json:"da_start_height"` + DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` } // NewGenesis creates a new Genesis instance. @@ -26,11 +27,12 @@ func NewGenesis( proposerAddress []byte, ) Genesis { genesis := Genesis{ - ChainID: chainID, - StartTime: startTime, - InitialHeight: initialHeight, - ProposerAddress: proposerAddress, - DAStartHeight: 0, + ChainID: chainID, + StartTime: startTime, + InitialHeight: initialHeight, + ProposerAddress: proposerAddress, + DAStartHeight: 0, + DAEpochForcedInclusion: 50, // Default epoch size } return genesis @@ -54,5 +56,9 @@ func (g Genesis) Validate() error { return fmt.Errorf("proposer_address cannot be nil") } + if g.DAEpochForcedInclusion < 1 { + return fmt.Errorf("da_epoch_forced_inclusion must be at least 1, got %d", g.DAEpochForcedInclusion) + } + return nil } diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index a5c1d280d..da3cc14b1 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -72,50 +72,55 @@ func TestGenesis_Validate(t *testing.T) { { name: "valid genesis - chain ID can contain any character", genesis: Genesis{ - ChainID: "test@chain#123!", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test@chain#123!", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid - empty chain_id", genesis: Genesis{ - ChainID: "", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 0, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 0, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero time DA start height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: time.Time{}, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: time.Time{}, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - nil proposer address", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: nil, + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: nil, + DAEpochForcedInclusion: 1, }, wantErr: true, }, diff --git a/pkg/genesis/io_test.go b/pkg/genesis/io_test.go index fb6f22307..7c8b882a6 100644 --- a/pkg/genesis/io_test.go +++ b/pkg/genesis/io_test.go @@ -30,40 +30,44 @@ func TestLoadAndSaveGenesis(t *testing.T) { { name: "valid genesis", genesis: Genesis{ - ChainID: "test-chain-1", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-1", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "valid genesis - minimal", genesis: Genesis{ - ChainID: "test-chain-2", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-2", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid genesis - empty chain ID", genesis: Genesis{ - ChainID: "", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid genesis - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - InitialHeight: 0, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 0, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, @@ -177,10 +181,11 @@ func TestSaveGenesis_InvalidPath(t *testing.T) { } genesis := Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: time.Now().UTC(), - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().UTC(), + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, } err := genesis.Save(tc.path) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go new file mode 100644 index 000000000..763629200 --- /dev/null +++ b/sequencers/based/sequencer.go @@ -0,0 +1,185 @@ +package based + +import ( + "context" + "errors" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" +) + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) +} + +var _ coresequencer.Sequencer = (*BasedSequencer)(nil) + +// BasedSequencer is a sequencer that only retrieves transactions from the DA layer +// via the forced inclusion mechanism. It does not accept transactions from the reaper. +type BasedSequencer struct { + fiRetriever ForcedInclusionRetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + + daHeight atomic.Uint64 + txQueue [][]byte +} + +// NewBasedSequencer creates a new based sequencer instance +func NewBasedSequencer( + fiRetriever ForcedInclusionRetriever, + da coreda.DA, + config config.Config, + genesis genesis.Genesis, + logger zerolog.Logger, +) *BasedSequencer { + bs := &BasedSequencer{ + fiRetriever: fiRetriever, + da: da, + config: config, + genesis: genesis, + logger: logger.With().Str("component", "based_sequencer").Logger(), + txQueue: make([][]byte, 0), + } + bs.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor + + return bs +} + +// SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA +// This satisfies the Sequencer interface but transactions submitted here are ignored +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.SubmitBatchTxsRequest) (*coresequencer.SubmitBatchTxsResponse, error) { + s.logger.Debug().Msg("based sequencer ignores submitted transactions - only DA transactions are processed") + return &coresequencer.SubmitBatchTxsResponse{}, nil +} + +// GetNextBatch retrieves the next batch of transactions from the DA layer +// It fetches forced inclusion transactions and returns them as the next batch +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { + currentDAHeight := s.daHeight.Load() + + s.logger.Debug().Uint64("da_height", currentDAHeight).Msg("fetching forced inclusion transactions from DA") + + forcedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + // Check if forced inclusion is not configured + if errors.Is(err, block.ErrForceInclusionNotConfigured) { + s.logger.Error().Msg("forced inclusion not configured, returning empty batch") + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: nil}, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } else if errors.Is(err, coreda.ErrHeightFromFuture) { + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block + s.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + } else { + s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + return nil, err + } + } + + // Update DA height based on the retrieved event + if forcedTxsEvent.EndDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.EndDaHeight) + } else if forcedTxsEvent.StartDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.StartDaHeight) + } + + // Add forced inclusion transactions to the queue with validation + validTxs := 0 + skippedTxs := 0 + for _, tx := range forcedTxsEvent.Txs { + // Validate blob size against absolute maximum + if !seqcommon.ValidateBlobSize(tx) { + s.logger.Warn(). + Uint64("da_height", forcedTxsEvent.StartDaHeight). + Int("blob_size", len(tx)). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + skippedTxs++ + continue + } + s.txQueue = append(s.txQueue, tx) + validTxs++ + } + + s.logger.Info(). + Int("valid_tx_count", validTxs). + Int("skipped_tx_count", skippedTxs). + Int("queue_size", len(s.txQueue)). + Uint64("da_height_start", forcedTxsEvent.StartDaHeight). + Uint64("da_height_end", forcedTxsEvent.EndDaHeight). + Msg("processed forced inclusion transactions from DA") + + batch := s.createBatchFromQueue(req.MaxBytes) + + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil +} + +// createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { + if len(s.txQueue) == 0 { + return &coresequencer.Batch{Transactions: nil} + } + + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + // Always respect maxBytes, even for the first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, stop here + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // If this is the last transaction, clear the queue + if i == len(s.txQueue)-1 { + s.txQueue = s.txQueue[:0] + } + } + + return &coresequencer.Batch{Transactions: batch} +} + +// VerifyBatch verifies a batch of transactions +// For a based sequencer, we always return true as all transactions come from DA +func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBatchRequest) (*coresequencer.VerifyBatchResponse, error) { + return &coresequencer.VerifyBatchResponse{ + Status: true, + }, nil +} + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (c *BasedSequencer) SetDAHeight(height uint64) { + c.daHeight.Store(height) + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (c *BasedSequencer) GetDAHeight() uint64 { + return c.daHeight.Load() +} diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go new file mode 100644 index 000000000..5bb5acd2f --- /dev/null +++ b/sequencers/based/sequencer_test.go @@ -0,0 +1,569 @@ +package based + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" +) + +// MockDA is a mock implementation of DA for testing +type MockDA struct { + mock.Mock +} + +func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace, options) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + args := m.Called(ctx, height, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*coreda.GetIDsResult), args.Error(1) +} + +func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]coreda.Proof), args.Error(1) +} + +func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + args := m.Called(ctx, ids, proofs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]bool), args.Error(1) +} + +func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func TestNewBasedSequencer(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + require.NotNil(t, seq) + assert.Equal(t, uint64(100), seq.daHeight.Load()) + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Submit should succeed but be ignored + req := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + } + + resp, err := seq.SubmitBatchTxs(context.Background(), req) + + require.NoError(t, err) + require.NotNil(t, resp) + // Transactions should not be added to queue for based sequencer + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { + testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} + + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) + + // DA height should be updated + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + // Create config without forced inclusion namespace + cfgNoFI := config.DefaultConfig() + cfgNoFI.DA.ForcedInclusionNamespace = "" + daClient := block.NewDAClient(mockDA, cfgNoFI, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfgNoFI, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) +} + +func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { + testBlobs := [][]byte{ + make([]byte, 50), // 50 bytes + make([]byte, 60), // 60 bytes + make([]byte, 100), // 100 bytes + } + + mockDA := new(MockDA) + // First call returns forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil).Once() + + // Subsequent calls should return no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with max 100 bytes - should get first 2 txs (50 + 60 = 110, but logic allows if batch has content) + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) + assert.Equal(t, 1, len(resp.Batch.Transactions)) + assert.Equal(t, 2, len(seq.txQueue)) // 2 remaining in queue + + // Second call should get next tx from queue + resp2, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + assert.Equal(t, 1, len(seq.txQueue)) // 1 remaining in queue + + // Third call with larger maxBytes to get the 100-byte tx + req3 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + resp3, err := seq.GetNextBatch(context.Background(), req3) + require.NoError(t, err) + require.NotNil(t, resp3) + require.NotNil(t, resp3.Batch) + assert.Equal(t, 1, len(resp3.Batch.Transactions)) + assert.Equal(t, 0, len(seq.txQueue)) // Queue should be empty + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, mock.Anything, mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Pre-populate the queue + seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) + + // Queue should be empty now + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + mockDA := new(MockDA) + + // First call: return a forced tx that will be added to queue + forcedTx := make([]byte, 150) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() + + // Second call: no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 100 + // Forced tx (150 bytes) is added to queue, but batch will be empty since it exceeds maxBytes + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") + + // Verify forced tx is in queue + assert.Equal(t, 1, len(seq.txQueue), "Forced tx should be in queue") + + // Second call with larger maxBytes = 200 + // Should process tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include tx from queue") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T) { + mockDA := new(MockDA) + + // Return forced txs where combined they exceed maxBytes + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() + + // Second call + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 120 + // Should get only first forced tx (100 bytes), second stays in queue + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 1, len(resp1.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) + + // Verify second tx is still in queue + assert.Equal(t, 1, len(seq.txQueue), "Second tx should be in queue") + + // Second call - should get the second tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include second tx from queue") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_VerifyBatch(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.VerifyBatchRequest{ + Id: []byte("test-chain"), + BatchData: [][]byte{[]byte("tx1")}, + } + + resp, err := seq.VerifyBatch(context.Background(), req) + require.NoError(t, err) + assert.True(t, resp.Status) +} + +func TestBasedSequencer_SetDAHeight(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + seq.SetDAHeight(200) + assert.Equal(t, uint64(200), seq.GetDAHeight()) +} + +func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, errors.New("DA connection error")) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + _, err := seq.GetNextBatch(context.Background(), req) + require.Error(t, err) + + mockDA.AssertExpectations(t) +} diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go new file mode 100644 index 000000000..7484d3a54 --- /dev/null +++ b/sequencers/common/size_validation.go @@ -0,0 +1,27 @@ +package common + +// TODO(@julienrbrt): technically we may need to check for block gas as well + +const ( + // AbsoluteMaxBlobSize is the absolute maximum size for a single blob (DA layer limit). + // Blobs exceeding this size are invalid and should be rejected permanently. + AbsoluteMaxBlobSize = 2 * 1024 * 1024 // 2MB +) + +// ValidateBlobSize checks if a single blob exceeds the absolute maximum allowed size. +// This checks against the DA layer limit, not the per-batch limit. +// Returns true if the blob is within the absolute size limit, false otherwise. +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks if adding a blob would exceed the cumulative size limit for a batch. +// Returns true if adding the blob would exceed the limit, false otherwise. +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} + +// GetBlobSize returns the size of a blob in bytes. +func GetBlobSize(blob []byte) int { + return len(blob) +} diff --git a/sequencers/common/size_validation_test.go b/sequencers/common/size_validation_test.go new file mode 100644 index 000000000..103c66d8b --- /dev/null +++ b/sequencers/common/size_validation_test.go @@ -0,0 +1,141 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want bool + }{ + { + name: "empty blob", + blobSize: 0, + want: true, + }, + { + name: "small blob", + blobSize: 100, + want: true, + }, + { + name: "exactly at limit", + blobSize: int(AbsoluteMaxBlobSize), + want: true, + }, + { + name: "one byte over limit", + blobSize: int(AbsoluteMaxBlobSize) + 1, + want: false, + }, + { + name: "far exceeds limit", + blobSize: int(AbsoluteMaxBlobSize) * 2, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := ValidateBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestWouldExceedCumulativeSize(t *testing.T) { + tests := []struct { + name string + currentSize int + blobSize int + maxBytes uint64 + want bool + }{ + { + name: "empty batch, small blob", + currentSize: 0, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would fit exactly", + currentSize: 50, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would exceed by one byte", + currentSize: 50, + blobSize: 51, + maxBytes: 100, + want: true, + }, + { + name: "far exceeds", + currentSize: 80, + blobSize: 100, + maxBytes: 100, + want: true, + }, + { + name: "zero max bytes", + currentSize: 0, + blobSize: 1, + maxBytes: 0, + want: true, + }, + { + name: "current already at limit", + currentSize: 100, + blobSize: 1, + maxBytes: 100, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := WouldExceedCumulativeSize(tt.currentSize, tt.blobSize, tt.maxBytes) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want int + }{ + { + name: "empty blob", + blobSize: 0, + want: 0, + }, + { + name: "small blob", + blobSize: 42, + want: 42, + }, + { + name: "large blob", + blobSize: 1024 * 1024, + want: 1024 * 1024, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := GetBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/sequencers/single/queue.go b/sequencers/single/queue.go index dd69c26a2..d992535ea 100644 --- a/sequencers/single/queue.go +++ b/sequencers/single/queue.go @@ -83,6 +83,26 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch coresequencer.Batch) e return nil } +// Prepend adds a batch to the front of the queue (before head position). +// This is used to return transactions that couldn't fit in the current batch. +// The batch is NOT persisted to the DB since these are transactions that were +// already in the queue or were just processed. +func (bq *BatchQueue) Prepend(ctx context.Context, batch coresequencer.Batch) error { + bq.mu.Lock() + defer bq.mu.Unlock() + + // If we have room before head, use it + if bq.head > 0 { + bq.head-- + bq.queue[bq.head] = batch + } else { + // Need to expand the queue at the front + bq.queue = append([]coresequencer.Batch{batch}, bq.queue...) + } + + return nil +} + // Next extracts a batch of transactions from the queue and marks it as processed in the WAL func (bq *BatchQueue) Next(ctx context.Context) (*coresequencer.Batch, error) { bq.mu.Lock() diff --git a/sequencers/single/queue_test.go b/sequencers/single/queue_test.go index 0ede59a90..b7665ee67 100644 --- a/sequencers/single/queue_test.go +++ b/sequencers/single/queue_test.go @@ -12,6 +12,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" dssync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -567,3 +568,156 @@ func TestBatchQueue_QueueLimit_Concurrency(t *testing.T) { t.Logf("Successfully added %d batches, rejected %d due to queue being full", addedCount, errorCount) } + +func TestBatchQueue_Prepend(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + + t.Run("prepend to empty queue", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-empty", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + batch := coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + } + + err = queue.Prepend(ctx, batch) + require.NoError(t, err) + + assert.Equal(t, 1, queue.Size()) + + // Next should return the prepended batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 2, len(nextBatch.Transactions)) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) + + t.Run("prepend to queue with items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-with-items", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add some batches first + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + + assert.Equal(t, 2, queue.Size()) + + // Prepend a batch + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Next should return the prepended batch first + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(nextBatch.Transactions)) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + // Then the original batches + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + }) + + t.Run("prepend after consuming some items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-after-consume", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add batches + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + batch3 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx3")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch3) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Consume first batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + assert.Equal(t, 2, queue.Size()) + + // Prepend - should reuse the head position + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Should get prepended, then tx2, then tx3 + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), nextBatch.Transactions[0]) + + assert.Equal(t, 0, queue.Size()) + }) + + t.Run("multiple prepends", func(t *testing.T) { + queue := NewBatchQueue(db, "test-multiple-prepends", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add a batch + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + + // Prepend multiple batches + prepend1 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend1")}} + prepend2 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend2")}} + prepend3 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend3")}} + + err = queue.Prepend(ctx, prepend1) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend2) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend3) + require.NoError(t, err) + + assert.Equal(t, 4, queue.Size()) + + // Should get in reverse order of prepending (LIFO for prepended items) + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend3"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) +} diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index dbc5bc567..e97d7a157 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -5,21 +5,36 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" ) -// ErrInvalidId is returned when the chain id is invalid var ( + // ErrInvalidId is returned when the chain id is invalid ErrInvalidId = errors.New("invalid chain id") ) -var _ coresequencer.Sequencer = &Sequencer{} +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) +} + +// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 +} + +var _ coresequencer.Sequencer = (*Sequencer)(nil) // Sequencer implements core sequencing interface type Sequencer struct { @@ -35,6 +50,12 @@ type Sequencer struct { queue *BatchQueue // single queue for immediate availability metrics *Metrics + + // Forced inclusion support + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx } // NewSequencer creates a new Single Sequencer @@ -47,31 +68,23 @@ func NewSequencer( batchTime time.Duration, metrics *Metrics, proposer bool, -) (*Sequencer, error) { - return NewSequencerWithQueueSize(ctx, logger, db, da, id, batchTime, metrics, proposer, 1000) -} - -// NewSequencerWithQueueSize creates a new Single Sequencer with configurable queue size -func NewSequencerWithQueueSize( - ctx context.Context, - logger zerolog.Logger, - db ds.Batching, - da coreda.DA, - id []byte, - batchTime time.Duration, - metrics *Metrics, - proposer bool, maxQueueSize int, + fiRetriever ForcedInclusionRetriever, + gen genesis.Genesis, ) (*Sequencer, error) { s := &Sequencer{ - logger: logger, - da: da, - batchTime: batchTime, - Id: id, - queue: NewBatchQueue(db, "batches", maxQueueSize), - metrics: metrics, - proposer: proposer, + logger: logger, + da: da, + batchTime: batchTime, + Id: id, + queue: NewBatchQueue(db, "batches", maxQueueSize), + metrics: metrics, + proposer: proposer, + fiRetriever: fiRetriever, + genesis: gen, + pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), } + s.SetDAHeight(gen.DAStartHeight) // will be overridden by the executor loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -117,14 +130,97 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB return nil, ErrInvalidId } + currentDAHeight := c.daHeight.Load() + + forcedEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + // Continue without forced txs. Add logging for clarity. + + if errors.Is(err, coreda.ErrHeightFromFuture) { + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + } else if !errors.Is(err, block.ErrForceInclusionNotConfigured) { + c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + } + + // Still create an empty forced inclusion event + forcedEvent = &block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: currentDAHeight, + EndDaHeight: currentDAHeight, + } + } + + // Always try to process forced inclusion transactions (including pending from previous epochs) + forcedTxs := c.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + if forcedEvent.EndDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.EndDaHeight) + } else if forcedEvent.StartDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.StartDaHeight) + } + + c.logger.Debug(). + Int("tx_count", len(forcedTxs)). + Uint64("da_height_start", forcedEvent.StartDaHeight). + Uint64("da_height_end", forcedEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") + + // Calculate size used by forced inclusion transactions + forcedTxsSize := 0 + for _, tx := range forcedTxs { + forcedTxsSize += len(tx) + } + batch, err := c.queue.Next(ctx) if err != nil { return nil, err } + // Prepend forced inclusion transactions to the batch + // and ensure total size doesn't exceed maxBytes + if len(forcedTxs) > 0 { + // Trim batch transactions to fit within maxBytes + remainingBytes := int(req.MaxBytes) - forcedTxsSize + trimmedBatchTxs := make([][]byte, 0, len(batch.Transactions)) + currentBatchSize := 0 + + for i, tx := range batch.Transactions { + txSize := len(tx) + if currentBatchSize+txSize > remainingBytes { + // Would exceed limit, return remaining txs to the front of the queue + excludedBatch := coresequencer.Batch{Transactions: batch.Transactions[i:]} + if err := c.queue.Prepend(ctx, excludedBatch); err != nil { + c.logger.Error().Err(err). + Int("excluded_count", len(batch.Transactions)-i). + Msg("failed to prepend excluded transactions back to queue") + } else { + c.logger.Debug(). + Int("excluded_count", len(batch.Transactions)-i). + Msg("returned excluded batch transactions to front of queue") + } + break + } + trimmedBatchTxs = append(trimmedBatchTxs, tx) + currentBatchSize += txSize + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + + c.logger.Debug(). + Int("forced_tx_count", len(forcedTxs)). + Int("forced_txs_size", forcedTxsSize). + Int("batch_tx_count", len(trimmedBatchTxs)). + Int("batch_size", currentBatchSize). + Int("total_tx_count", len(batch.Transactions)). + Int("total_size", forcedTxsSize+currentBatchSize). + Msg("combined forced inclusion and batch transactions") + } + return &coresequencer.GetNextBatchResponse{ Batch: batch, Timestamp: time.Now(), + BatchData: req.LastBatchData, }, nil } @@ -171,3 +267,106 @@ func (c *Sequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBat func (c *Sequencer) isValid(Id []byte) bool { return bytes.Equal(c.Id, Id) } + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (c *Sequencer) SetDAHeight(height uint64) { + c.daHeight.Store(height) + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (c *Sequencer) GetDAHeight() uint64 { + return c.daHeight.Load() +} + +// processForcedInclusionTxs processes forced inclusion transactions with size validation and pending queue management +func (c *Sequencer) processForcedInclusionTxs(event *block.ForcedInclusionEvent, maxBytes uint64) [][]byte { + currentSize := 0 + var newPendingTxs []pendingForcedInclusionTx + var validatedTxs [][]byte + + // First, process any pending transactions from previous epochs + for _, pendingTx := range c.pendingForcedInclusionTxs { + txSize := seqcommon.GetBlobSize(pendingTx.Data) + + if !seqcommon.ValidateBlobSize(pendingTx.Data) { + c.logger.Warn(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Msg("pending forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("pending blob would exceed max size for this epoch - deferring again") + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += txSize + + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed pending forced inclusion transaction") + } + + // Now process new transactions from this epoch + for _, tx := range event.Txs { + txSize := seqcommon.GetBlobSize(tx) + + if !seqcommon.ValidateBlobSize(tx) { + c.logger.Warn(). + Uint64("da_height", event.StartDaHeight). + Int("blob_size", txSize). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("da_height", event.StartDaHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("blob would exceed max size for this epoch - deferring to pending queue") + + // Store for next call + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + + validatedTxs = append(validatedTxs, tx) + currentSize += txSize + + c.logger.Debug(). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed forced inclusion transaction") + } + + // Update pending queue + c.pendingForcedInclusionTxs = newPendingTxs + if len(newPendingTxs) > 0 { + c.logger.Info(). + Int("new_pending_count", len(newPendingTxs)). + Msg("stored pending forced inclusion transactions for next epoch") + } + + c.logger.Info(). + Int("processed_tx_count", len(validatedTxs)). + Int("pending_tx_count", len(newPendingTxs)). + Int("current_size", currentSize). + Msg("completed processing forced inclusion transactions") + + return validatedTxs +} diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 5362b4904..90da6fb3a 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,11 +13,26 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" damocks "github.com/evstack/ev-node/test/mocks" ) +// MockForcedInclusionRetriever is a mock implementation of DARetriever for testing +type MockForcedInclusionRetriever struct { + mock.Mock +} + +func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) { + args := m.Called(ctx, daHeight) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) +} + func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) @@ -26,7 +41,10 @@ func TestNewSequencer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -59,7 +77,10 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -112,7 +133,10 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") defer func() { err := db.Close() @@ -152,10 +176,14 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -188,10 +216,14 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -247,13 +279,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Proposer Mode", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: true, - da: mockDA, - queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + logger: logger, + Id: Id, + proposer: true, + da: mockDA, + queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + fiRetriever: mockRetriever, } res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: seq.Id, BatchData: batchData}) @@ -269,12 +305,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Valid Proofs", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "valid_proofs_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "valid_proofs_queue", 0), + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -290,12 +330,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid Proof", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_proof_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_proof_queue", 0), + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -311,12 +355,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("GetProofs Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "getproofs_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "getproofs_err_queue", 0), + fiRetriever: mockRetriever, } expectedErr := errors.New("get proofs failed") @@ -333,12 +381,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Validate Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "validate_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "validate_err_queue", 0), + fiRetriever: mockRetriever, } expectedErr := errors.New("validate failed") @@ -355,13 +407,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid ID", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_queue", 0), + fiRetriever: mockRetriever, } invalidId := []byte("invalid") @@ -385,7 +441,10 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -431,6 +490,254 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { mockDA.AssertExpectations(t) } +func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + // Create in-memory datastore + db := ds.NewMapDatastore() + + // Create mock forced inclusion retriever with txs that are 50 bytes each + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 50) + forcedTx2 := make([]byte, 60) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, // Total 110 bytes + StartDaHeight: 100, + EndDaHeight: 100, + }, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit batch txs that are 40 bytes each + batchTx1 := make([]byte, 40) + batchTx2 := make([]byte, 40) + batchTx3 := make([]byte, 40) + + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx1, batchTx2, batchTx3}, // Total 120 bytes + }, + } + + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // Request batch with maxBytes = 150 + // Forced inclusion: 110 bytes (50 + 60) + // Batch txs: 120 bytes (40 + 40 + 40) + // Combined would be 230 bytes, exceeds 150 + // Should return forced txs + only 1 batch tx (110 + 40 = 150) + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 150, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + + // Should have forced txs (2) + partial batch txs + // Total size should not exceed 150 bytes + totalSize := 0 + for _, tx := range resp.Batch.Transactions { + totalSize += len(tx) + } + assert.LessOrEqual(t, totalSize, 150, "Total batch size should not exceed maxBytes") + + // First 2 txs should be forced inclusion txs + assert.GreaterOrEqual(t, len(resp.Batch.Transactions), 2, "Should have at least forced inclusion txs") + assert.Equal(t, forcedTx1, resp.Batch.Transactions[0]) + assert.Equal(t, forcedTx2, resp.Batch.Transactions[1]) + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + // Create forced inclusion txs where combined they exceed maxBytes + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) // This would be deferred + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call should process pending tx + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Request batch with maxBytes = 120 + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 120, + LastBatchData: nil, + } + + // First call - should get only first forced tx (100 bytes) + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp.Batch.Transactions[0])) + + // Verify pending tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Second tx should be pending") + + // Second call - should get the pending forced tx + resp2, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + mockFI := &MockForcedInclusionRetriever{} + + // First call returns a large forced tx that gets deferred + largeForcedTx := make([]byte, 150) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{largeForcedTx}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call returns no new forced txs, but pending should still be processed + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit a batch tx + batchTx := make([]byte, 50) + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx}, + }, + } + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // First call with maxBytes = 100 + // Large forced tx (150 bytes) won't fit, gets deferred + // Batch tx (50 bytes) should be returned + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should have batch tx only") + assert.Equal(t, 50, len(resp.Batch.Transactions[0])) + + // Verify pending forced tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Large forced tx should be pending") + + // Second call with larger maxBytes = 200 + // Should process pending forced tx first + getReq2 := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(ctx, getReq2) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + // TestSequencer_RecordMetrics tests the RecordMetrics method to ensure it properly updates metrics. func TestSequencer_RecordMetrics(t *testing.T) { t.Run("With Metrics", func(t *testing.T) { @@ -523,16 +830,20 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { defer db.Close() mockDA := &damocks.MockDA{} + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() // Create a sequencer with a small queue limit for testing logger := zerolog.Nop() seq := &Sequencer{ - logger: logger, - da: mockDA, - batchTime: time.Second, - Id: []byte("test"), - queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing - proposer: true, + logger: logger, + da: mockDA, + batchTime: time.Second, + Id: []byte("test"), + queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing + proposer: true, + fiRetriever: mockRetriever, } ctx := context.Background() @@ -641,7 +952,10 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Create sequencer with small queue size to trigger throttling quickly queueSize := 3 // Small for testing logger := zerolog.Nop() - seq, err := NewSequencerWithQueueSize( + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer( context.Background(), logger, db, @@ -651,6 +965,8 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { nil, // metrics true, // proposer queueSize, + mockRetriever, // fiRetriever + genesis.Genesis{}, // genesis ) require.NoError(t, err) diff --git a/test/mocks/sequencer.go b/test/mocks/sequencer.go index c3894f846..e1ef0afb4 100644 --- a/test/mocks/sequencer.go +++ b/test/mocks/sequencer.go @@ -38,6 +38,50 @@ func (_m *MockSequencer) EXPECT() *MockSequencer_Expecter { return &MockSequencer_Expecter{mock: &_m.Mock} } +// GetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) GetDAHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetDAHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// MockSequencer_GetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDAHeight' +type MockSequencer_GetDAHeight_Call struct { + *mock.Call +} + +// GetDAHeight is a helper method to define mock.On call +func (_e *MockSequencer_Expecter) GetDAHeight() *MockSequencer_GetDAHeight_Call { + return &MockSequencer_GetDAHeight_Call{Call: _e.mock.On("GetDAHeight")} +} + +func (_c *MockSequencer_GetDAHeight_Call) Run(run func()) *MockSequencer_GetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) Return(v uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) RunAndReturn(run func() uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(run) + return _c +} + // GetNextBatch provides a mock function for the type MockSequencer func (_mock *MockSequencer) GetNextBatch(ctx context.Context, req sequencer.GetNextBatchRequest) (*sequencer.GetNextBatchResponse, error) { ret := _mock.Called(ctx, req) @@ -106,6 +150,46 @@ func (_c *MockSequencer_GetNextBatch_Call) RunAndReturn(run func(ctx context.Con return _c } +// SetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) SetDAHeight(height uint64) { + _mock.Called(height) + return +} + +// MockSequencer_SetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDAHeight' +type MockSequencer_SetDAHeight_Call struct { + *mock.Call +} + +// SetDAHeight is a helper method to define mock.On call +// - height uint64 +func (_e *MockSequencer_Expecter) SetDAHeight(height interface{}) *MockSequencer_SetDAHeight_Call { + return &MockSequencer_SetDAHeight_Call{Call: _e.mock.On("SetDAHeight", height)} +} + +func (_c *MockSequencer_SetDAHeight_Call) Run(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) Return() *MockSequencer_SetDAHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) RunAndReturn(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Run(run) + return _c +} + // SubmitBatchTxs provides a mock function for the type MockSequencer func (_mock *MockSequencer) SubmitBatchTxs(ctx context.Context, req sequencer.SubmitBatchTxsRequest) (*sequencer.SubmitBatchTxsResponse, error) { ret := _mock.Called(ctx, req) diff --git a/types/CLAUDE.md b/types/CLAUDE.md index 9cd5496e5..aafdd289a 100644 --- a/types/CLAUDE.md +++ b/types/CLAUDE.md @@ -77,17 +77,16 @@ The types package defines the core data structures and types used throughout ev- - Signature verification - Identity validation -### DA Integration (`da.go`, `da_test.go`) +### DA Integration -- **Purpose**: Data Availability layer helpers -- **Key Functions**: - - `SubmitWithHelpers`: DA submission with error handling +- **Purpose**: Data Availability layer helpers moved to `block/internal/da` package +- **See**: `block/internal/da/client.go` for DA submission and retrieval logic - **Key Features**: - - Error mapping to status codes + - Error mapping to status codes (in DA Client) - Namespace support - Gas price configuration - Submission options handling -- **Status Codes**: +- **Status Codes** (defined in `core/da`): - `StatusContextCanceled`: Submission canceled - `StatusNotIncludedInBlock`: Transaction timeout - `StatusAlreadyInMempool`: Duplicate transaction diff --git a/types/epoch.go b/types/epoch.go new file mode 100644 index 000000000..75d43e804 --- /dev/null +++ b/types/epoch.go @@ -0,0 +1,50 @@ +package types + +// CalculateEpochNumber returns the deterministic epoch number for a given DA height. +// Epoch 1 starts at daStartHeight. +// +// Parameters: +// - daHeight: The DA height to calculate the epoch for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means all blocks in epoch 1) +// +// Returns: +// - Epoch number (0 if before daStartHeight, 1+ otherwise) +func CalculateEpochNumber(daHeight, daStartHeight, daEpochSize uint64) uint64 { + if daHeight < daStartHeight { + return 0 + } + + if daEpochSize == 0 { + return 1 + } + + return ((daHeight - daStartHeight) / daEpochSize) + 1 +} + +// CalculateEpochBoundaries returns the start and end DA heights for the epoch +// containing the given DA height. The boundaries are inclusive. +// +// Parameters: +// - daHeight: The DA height to calculate boundaries for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means single epoch) +// +// Returns: +// - start: The first DA height in the epoch (inclusive) +// - end: The last DA height in the epoch (inclusive) +func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end uint64) { + if daEpochSize == 0 { + return daStartHeight, daStartHeight + } + + if daHeight < daStartHeight { + return daStartHeight, daStartHeight + daEpochSize - 1 + } + + epochNum := CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) + start = daStartHeight + (epochNum-1)*daEpochSize + end = daStartHeight + epochNum*daEpochSize - 1 + + return start, end +} diff --git a/types/epoch_test.go b/types/epoch_test.go new file mode 100644 index 000000000..295395d7b --- /dev/null +++ b/types/epoch_test.go @@ -0,0 +1,300 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateEpochNumber(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedEpoch uint64 + }{ + { + name: "first epoch - start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 100, + expectedEpoch: 1, + }, + { + name: "first epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedEpoch: 1, + }, + { + name: "first epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 109, + expectedEpoch: 1, + }, + { + name: "second epoch - start", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedEpoch: 2, + }, + { + name: "second epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 115, + expectedEpoch: 2, + }, + { + name: "tenth epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 195, + expectedEpoch: 10, + }, + { + name: "before start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedEpoch: 0, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedEpoch: 1, + }, + { + name: "large epoch size", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 2500, + expectedEpoch: 2, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedEpoch: 3, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedEpoch: 6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + epoch := CalculateEpochNumber(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedEpoch, epoch) + }) + } +} + +func TestCalculateEpochBoundaries(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + { + name: "first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "second epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedStart: 110, + expectedEnd: 119, + }, + { + name: "third epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 129, + expectedStart: 120, + expectedEnd: 129, + }, + { + name: "before start height returns first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "before start height with zero epoch size", + daStartHeight: 2, + daEpochSize: 0, + daHeight: 1, + expectedStart: 2, + expectedEnd: 2, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedStart: 100, + expectedEnd: 100, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 1500, + expectedStart: 1000, + expectedEnd: 1999, + }, + { + name: "epoch boundary exact start", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 100, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact end of first epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 149, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact start of second epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 150, + expectedStart: 150, + expectedEnd: 199, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedStart: 10, + expectedEnd: 14, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedStart: 105, + expectedEnd: 105, + }, + { + name: "very large numbers", + daStartHeight: 1000000, + daEpochSize: 100000, + daHeight: 5500000, + expectedStart: 5500000, + expectedEnd: 5599999, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + start, end := CalculateEpochBoundaries(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedStart, start, "start height mismatch") + assert.Equal(t, tt.expectedEnd, end, "end height mismatch") + }) + } +} + +func TestEpochConsistency(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + }{ + { + name: "standard epoch", + daStartHeight: 100, + daEpochSize: 10, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + }, + { + name: "small epoch", + daStartHeight: 0, + daEpochSize: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test that all heights in an epoch return the same epoch number + // and boundaries + for epoch := uint64(1); epoch <= 10; epoch++ { + // Calculate expected boundaries for this epoch + expectedStart := tt.daStartHeight + (epoch-1)*tt.daEpochSize + expectedEnd := tt.daStartHeight + epoch*tt.daEpochSize - 1 + + // Test every height in the epoch + for h := expectedStart; h <= expectedEnd; h++ { + epochNum := CalculateEpochNumber(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, epoch, epochNum, "height %d should be in epoch %d", h, epoch) + + start, end := CalculateEpochBoundaries(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, expectedStart, start, "height %d should have start %d", h, expectedStart) + assert.Equal(t, expectedEnd, end, "height %d should have end %d", h, expectedEnd) + } + } + }) + } +} + +func TestEpochBoundaryTransitions(t *testing.T) { + daStartHeight := uint64(100) + daEpochSize := uint64(10) + + // Test that epoch boundaries are correctly calculated at transitions + transitions := []struct { + height uint64 + expectedEpoch uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + {100, 1, 100, 109}, // First height of epoch 1 + {109, 1, 100, 109}, // Last height of epoch 1 + {110, 2, 110, 119}, // First height of epoch 2 + {119, 2, 110, 119}, // Last height of epoch 2 + {120, 3, 120, 129}, // First height of epoch 3 + } + + for _, tr := range transitions { + epoch := CalculateEpochNumber(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedEpoch, epoch, "height %d epoch mismatch", tr.height) + + start, end := CalculateEpochBoundaries(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedStart, start, "height %d start mismatch", tr.height) + assert.Equal(t, tr.expectedEnd, end, "height %d end mismatch", tr.height) + } +} diff --git a/types/state.go b/types/state.go index a439f6c34..4b87dc6b5 100644 --- a/types/state.go +++ b/types/state.go @@ -30,7 +30,8 @@ type State struct { // LastHeaderHash is the hash of the header of the last block LastHeaderHash Hash - // DAHeight identifies DA block containing the latest applied Evolve block. + // DAHeight identifies DA block containing the latest applied Evolve block for a syncing node. + // In the case of an aggregator, this corresponds as the last fetched DA block height for forced inclused transactions. DAHeight uint64 // the latest AppHash we've received from calling abci.Commit() From 46087a4f7b602e8c9a9b4a1e18250270339c4f7e Mon Sep 17 00:00:00 2001 From: julienrbrt Date: Mon, 24 Nov 2025 14:30:25 +0100 Subject: [PATCH 02/15] Apply suggestions from code review Co-authored-by: Marko --- docs/adr/adr-019-forced-inclusion-mechanism.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 51e1be63c..019e3269c 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -394,7 +394,7 @@ func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) b **Key Behaviors**: -- **Absolute validation**: Blobs exceeding 1.5MB are permanently rejected +- **Absolute validation**: Blobs exceeding 2MB are permanently rejected - **Batch size limits**: `req.MaxBytes` is NEVER exceeded in any batch - **Transaction preservation**: - Single sequencer: Trimmed batch txs returned to queue via `Prepend()` From a928c93577bf1b7f77f8651b2b1a8512356abaa9 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 26 Nov 2025 14:26:40 +0100 Subject: [PATCH 03/15] docs: add description about da epoch --- pkg/genesis/genesis.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 65cbed173..b551e2310 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -11,12 +11,16 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - DAStartHeight uint64 `json:"da_start_height"` - DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + // DAStartHeight corresponds to the height at which the first DA header/data has been published. + // This value is meant to be updated after genesis and shared to all syncing nodes for speeding up syncing via DA. + DAStartHeight uint64 `json:"da_start_height"` + // DaEpochForcedInclusion corresponds to the amount of DA blocks are considered an epochs + // When forced inclusion is enabled, the epoch size determines at what frequency the forced included transactions are executed by the application. + DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` } // NewGenesis creates a new Genesis instance. From fcf4b08488ab9fb7eef0df3a1a7ae4071180d9be Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 26 Nov 2025 15:12:39 +0100 Subject: [PATCH 04/15] add smoothing checks --- block/internal/syncing/syncer.go | 140 +++++-- .../syncing/syncer_forced_inclusion_test.go | 343 +++++++++++++++++- 2 files changed, 448 insertions(+), 35 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 8d97da6b4..e2ba206a8 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -42,6 +42,7 @@ type Syncer struct { config config.Config genesis genesis.Genesis options common.BlockOptions + logger zerolog.Logger // State management lastState *atomic.Pointer[types.State] @@ -63,8 +64,8 @@ type Syncer struct { fiRetriever *da.ForcedInclusionRetriever p2pHandler p2pHandler - // Logging - logger zerolog.Logger + // Forced inclusion tracking + pendingForcedInclusionTxs sync.Map // map[string]pendingForcedInclusionTx // Lifecycle ctx context.Context @@ -75,6 +76,14 @@ type Syncer struct { p2pWaitState atomic.Value // stores p2pWaitState } +// pendingForcedInclusionTx represents a forced inclusion transaction that hasn't been included yet +type pendingForcedInclusionTx struct { + Data []byte + EpochStart uint64 + EpochEnd uint64 + TxHash string +} + // NewSyncer creates a new block syncer func NewSyncer( store store.Store, @@ -90,20 +99,23 @@ func NewSyncer( options common.BlockOptions, errorCh chan<- error, ) *Syncer { + daRetrieverHeight := &atomic.Uint64{} + daRetrieverHeight.Store(genesis.DAStartHeight) + return &Syncer{ store: store, exec: exec, - daClient: daClient, cache: cache, metrics: metrics, config: config, genesis: genesis, options: options, + lastState: &atomic.Pointer[types.State]{}, + daClient: daClient, + daRetrieverHeight: daRetrieverHeight, headerStore: headerStore, dataStore: dataStore, - lastState: &atomic.Pointer[types.State]{}, - daRetrieverHeight: &atomic.Uint64{}, - heightInCh: make(chan common.DAHeightEvent, 1_000), + heightInCh: make(chan common.DAHeightEvent, 100), errorCh: errorCh, logger: logger.With().Str("component", "syncer").Logger(), } @@ -665,13 +677,16 @@ func hashTx(tx []byte) string { return hex.EncodeToString(hash[:]) } -// verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block +// verifyForcedInclusionTxs verifies that forced inclusion transactions from DA are properly handled. +// Note: Due to block size constraints (MaxBytes), sequencers may defer forced inclusion transactions +// to future blocks (smoothing). This is legitimate behavior within an epoch. +// However, ALL forced inclusion txs from an epoch MUST be included before the next epoch begins. func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { if s.fiRetriever == nil { return nil } - // Retrieve forced inclusion transactions from DA + // Retrieve forced inclusion transactions from DA for current epoch forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(s.ctx, currentState.DAHeight) if err != nil { if errors.Is(err, da.ErrForceInclusionNotConfigured) { @@ -682,42 +697,105 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. return fmt.Errorf("failed to retrieve forced included txs from DA: %w", err) } - // If no forced inclusion transactions found, nothing to verify - if len(forcedIncludedTxsEvent.Txs) == 0 { - s.logger.Debug().Uint64("da_height", currentState.DAHeight).Msg("no forced inclusion transactions to verify") - return nil - } - + // Build map of transactions in current block blockTxMap := make(map[string]struct{}) for _, tx := range data.Txs { blockTxMap[hashTx(tx)] = struct{}{} } - // Check if all forced inclusion transactions are present in the block - var missingTxs [][]byte + // Check if any pending forced inclusion txs from previous epochs are included + var stillPending []pendingForcedInclusionTx + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + pending := value.(pendingForcedInclusionTx) + if _, ok := blockTxMap[pending.TxHash]; ok { + s.logger.Debug(). + Uint64("height", data.Height()). + Uint64("epoch_start", pending.EpochStart). + Uint64("epoch_end", pending.EpochEnd). + Str("tx_hash", pending.TxHash[:16]). + Msg("pending forced inclusion transaction included in block") + s.pendingForcedInclusionTxs.Delete(key) + } else { + stillPending = append(stillPending, pending) + } + return true + }) + + // Add new forced inclusion transactions from current epoch + var newPendingCount, includedCount int for _, forcedTx := range forcedIncludedTxsEvent.Txs { - if _, ok := blockTxMap[hashTx(forcedTx)]; !ok { - missingTxs = append(missingTxs, forcedTx) + txHash := hashTx(forcedTx) + if _, ok := blockTxMap[txHash]; ok { + // Transaction is included in this block + includedCount++ + } else { + // Transaction not included, add to pending + stillPending = append(stillPending, pendingForcedInclusionTx{ + Data: forcedTx, + EpochStart: forcedIncludedTxsEvent.StartDaHeight, + EpochEnd: forcedIncludedTxsEvent.EndDaHeight, + TxHash: txHash, + }) + newPendingCount++ } } - if len(missingTxs) > 0 { + // Check if we've moved past any epoch boundaries with pending txs + var maliciousTxs, remainingPending []pendingForcedInclusionTx + for _, pending := range stillPending { + // If current DA height is past this epoch's end, these txs should have been included + if currentState.DAHeight > pending.EpochEnd { + maliciousTxs = append(maliciousTxs, pending) + } else { + remainingPending = append(remainingPending, pending) + } + } + + // Update pending map - clear old entries and store only remaining pending + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Delete(key) + return true + }) + for _, pending := range remainingPending { + s.pendingForcedInclusionTxs.Store(pending.TxHash, pending) + } + + // If there are transactions from past epochs that weren't included, sequencer is malicious + if len(maliciousTxs) > 0 { s.logger.Error(). Uint64("height", data.Height()). - Uint64("da_height", currentState.DAHeight). - Uint64("da_epoch_start", forcedIncludedTxsEvent.StartDaHeight). - Uint64("da_epoch_end", forcedIncludedTxsEvent.EndDaHeight). - Int("missing_count", len(missingTxs)). - Int("total_forced", len(forcedIncludedTxsEvent.Txs)). - Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions missing from block") - return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs))) + Uint64("current_da_height", currentState.DAHeight). + Int("malicious_count", len(maliciousTxs)). + Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions from past epoch(s) not included") + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions from past epoch(s) not included", len(maliciousTxs))) + } + + // Log current state + if len(forcedIncludedTxsEvent.Txs) > 0 { + if newPendingCount > 0 { + totalPending := 0 + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + totalPending++ + return true + }) + + s.logger.Info(). + Uint64("height", data.Height()). + Uint64("da_height", currentState.DAHeight). + Uint64("epoch_start", forcedIncludedTxsEvent.StartDaHeight). + Uint64("epoch_end", forcedIncludedTxsEvent.EndDaHeight). + Int("included_count", includedCount). + Int("deferred_count", newPendingCount). + Int("total_pending", totalPending). + Msg("forced inclusion transactions processed - some deferred due to block size constraints") + } else { + s.logger.Debug(). + Uint64("height", data.Height()). + Int("forced_txs", len(forcedIncludedTxsEvent.Txs)). + Msg("all forced inclusion transactions included in block") + } } - s.logger.Debug(). - Uint64("height", data.Height()). - Int("forced_txs", len(forcedIncludedTxsEvent.Txs)). - Msg("all forced inclusion transactions verified in block") - return nil } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 1948109d9..a72dc39f9 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -189,11 +189,24 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { currentState := s.GetLastState() currentState.DAHeight = 0 - // Verify - should fail since forced tx blob is missing + // Verify - should pass since forced tx blob may be legitimately deferred within the epoch err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) + + // Mock DA for next epoch to return no forced inclusion transactions + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Now simulate moving to next epoch - should fail if tx still not included + currentState.DAHeight = 1 // Move past epoch end (epoch was [0, 0]) + data2 := makeData(gen.ChainID, 2, 1) + data2.Txs[0] = types.Tx([]byte("regular_tx_3")) + + err = s.verifyForcedInclusionTxs(currentState, data2) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "1 forced inclusion transactions not included") + require.Contains(t, err.Error(), "forced inclusion transactions from past epoch(s) not included") } func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { @@ -279,11 +292,24 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { currentState := s.GetLastState() currentState.DAHeight = 0 - // Verify - should fail since dataBin2 is missing + // Verify - should pass since dataBin2 may be legitimately deferred within the epoch err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) + + // Mock DA for next epoch to return no forced inclusion transactions + mockDA.EXPECT().GetIDs(mock.Anything, uint64(1), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Now simulate moving to next epoch - should fail if dataBin2 still not included + currentState.DAHeight = 1 // Move past epoch end (epoch was [0, 0]) + data2 := makeData(gen.ChainID, 2, 1) + data2.Txs[0] = types.Tx([]byte("regular_tx_3")) + + err = s.verifyForcedInclusionTxs(currentState, data2) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "1 forced inclusion transactions not included") + require.Contains(t, err.Error(), "forced inclusion transactions from past epoch(s) not included") } func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { @@ -426,3 +452,312 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { err = s.verifyForcedInclusionTxs(currentState, data) require.NoError(t, err) } + +// TestVerifyForcedInclusionTxs_DeferralWithinEpoch tests that forced inclusion transactions +// can be legitimately deferred to a later block within the same epoch due to block size constraints +func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 100, + DAEpochForcedInclusion: 5, // Epoch spans 5 DA blocks + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blobs + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + + // Mock DA retrieval for first block at DA height 100 + // Epoch boundaries: [100, 104] (epoch size is 5) + // The retriever will fetch all heights in the epoch: 100, 101, 102, 103, 104 + + // Height 100 (epoch start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2}, nil).Once() + + // Heights 101, 102, 103 (intermediate heights in epoch) + for height := uint64(101); height <= 103; height++ { + mockDA.EXPECT().GetIDs(mock.Anything, height, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + } + + // Height 104 (epoch end) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(104), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // First block only includes dataBin1 (dataBin2 deferred due to size constraints) + data1 := makeData(gen.ChainID, 1, 2) + data1.Txs[0] = types.Tx(dataBin1) + data1.Txs[1] = types.Tx([]byte("regular_tx_1")) + + currentState := s.GetLastState() + currentState.DAHeight = 100 + + // Verify - should pass since dataBin2 can be deferred within epoch + err = s.verifyForcedInclusionTxs(currentState, data1) + require.NoError(t, err) + + // Verify that dataBin2 is now tracked as pending + pendingCount := 0 + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + pendingCount++ + return true + }) + require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") + + // Mock DA for second verification at same epoch (height 100) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2}, nil).Once() + + for height := uint64(101); height <= 103; height++ { + mockDA.EXPECT().GetIDs(mock.Anything, height, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + } + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(104), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Second block includes BOTH the previously included dataBin1 AND the deferred dataBin2 + // This simulates the block containing both forced inclusion txs + data2 := makeData(gen.ChainID, 2, 2) + data2.Txs[0] = types.Tx(dataBin1) // Already included, but that's ok + data2.Txs[1] = types.Tx(dataBin2) // The deferred one we're waiting for + + // Verify - should pass since dataBin2 is now included and clears pending + err = s.verifyForcedInclusionTxs(currentState, data2) + require.NoError(t, err) + + // Verify that pending queue is now empty (dataBin2 was included) + pendingCount = 0 + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + pendingCount++ + return true + }) + require.Equal(t, 0, pendingCount, "should have no pending forced inclusion txs") +} + +// TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd tests that missing forced inclusion +// transactions are detected as malicious when the epoch ends without them being included +func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 100, + DAEpochForcedInclusion: 3, // Epoch spans 3 DA blocks + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // Mock DA retrieval for DA height 100 + // Epoch boundaries: [100, 102] (epoch size is 3) + // The retriever will fetch heights 100, 101, 102 + + // Height 100 (epoch start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Height 101 (intermediate) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(101), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Height 102 (epoch end) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(102), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // First block doesn't include the forced inclusion tx + data1 := makeData(gen.ChainID, 1, 1) + data1.Txs[0] = types.Tx([]byte("regular_tx_1")) + + currentState := s.GetLastState() + currentState.DAHeight = 100 + + // Verify - should pass, tx can be deferred within epoch + err = s.verifyForcedInclusionTxs(currentState, data1) + require.NoError(t, err) + + // Verify that the forced tx is tracked as pending + pendingCount := 0 + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + pendingCount++ + return true + }) + require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") + + // Process another block within same epoch - forced tx still not included + // Mock DA for second verification at same epoch (height 100) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(101), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(102), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + data2 := makeData(gen.ChainID, 2, 1) + data2.Txs[0] = types.Tx([]byte("regular_tx_2")) + + // Still at epoch 100, should still pass + err = s.verifyForcedInclusionTxs(currentState, data2) + require.NoError(t, err) + + // Mock DA retrieval for next epoch (DA height 103) + // Epoch boundaries: [103, 105] + // The retriever will fetch heights 103, 104, 105 + + // Height 103 (epoch start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(103), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Height 104 (intermediate) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(104), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Height 105 (epoch end) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(105), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Third block is in the next epoch (past 102) without including the forced tx + data3 := makeData(gen.ChainID, 3, 1) + data3.Txs[0] = types.Tx([]byte("regular_tx_3")) + + currentState.DAHeight = 103 // Past epoch end [100, 102] + + // Verify - should FAIL since forced tx from previous epoch was never included + err = s.verifyForcedInclusionTxs(currentState, data3) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "forced inclusion transactions from past epoch(s) not included") +} From d7875757b90c8e9d852920ebaafd2be15a2a41e8 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 26 Nov 2025 18:28:51 +0100 Subject: [PATCH 05/15] add docs --- docs/.vitepress/config.ts | 1 + docs/learn/sequencing/based.md | 72 ++++++++++++++++++++++++++ docs/learn/sequencing/overview.md | 13 ++--- docs/learn/sequencing/single.md | 86 +++++++++++++++++++++++++++++-- 4 files changed, 163 insertions(+), 9 deletions(-) create mode 100644 docs/learn/sequencing/based.md diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 399a567c1..6e85cc62d 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -194,6 +194,7 @@ function sidebarHome() { items: [ { text: "Overview", link: "/learn/sequencing/overview" }, { text: "Single", link: "/learn/sequencing/single" }, + { text: "Based", link: "/learn/sequencing/based" }, ], }, { diff --git a/docs/learn/sequencing/based.md b/docs/learn/sequencing/based.md new file mode 100644 index 000000000..cf9c18c31 --- /dev/null +++ b/docs/learn/sequencing/based.md @@ -0,0 +1,72 @@ +# Based Sequencing + +Based sequencing is a decentralized sequencing model where transaction ordering is determined by the base layer (Celestia) rather than a centralized sequencer. In this model, **every full node acts as its own proposer** by independently and deterministically deriving the next batch of transactions directly from the base layer. + +## How Based Sequencing Works + +### Transaction Submission + +Users submit transactions to the base layer's forced inclusion namespace. These transactions are posted as blobs to the DA layer, where they become part of the canonical transaction ordering. + +```text +User → Base Layer (DA) → Full Nodes retrieve and execute +``` + +### Deterministic Batch Construction + +All full nodes independently construct identical batches by: + +1. **Retrieving forced inclusion transactions** from the base layer at epoch boundaries +2. **Applying forkchoice rules** to determine batch composition: + - `MaxBytes`: Maximum byte size per batch (respects block size limits) + - DA epoch boundaries +3. **Smoothing large transactions** across multiple blocks when necessary + +### Epoch-Based Processing + +Forced inclusion transactions are retrieved in epochs defined by `DAEpochForcedInclusion`. For example, with an epoch size of 10: + +- DA heights 100-109 form one epoch +- DA heights 110-119 form the next epoch +- Transactions from each epoch must be included before the epoch ends + +## Block Smoothing + +When forced inclusion transactions exceed the `MaxBytes` limit for a single block, they can be "smoothed" across multiple blocks within the same epoch. This ensures that: + +- Large transactions don't block the chain +- All transactions are eventually included +- The system remains censorship-resistant + +### Example + +```text +Epoch [100, 104]: + - Block 1: Includes 1.5 MB of forced inclusion txs (partial) + - Block 2: Includes remaining 0.5 MB + new regular txs + - All epoch transactions included before DA height 105 +``` + +## Trust Assumptions + +Based sequencing minimizes trust assumptions: + +- **No trusted sequencer** - ordering comes from the base layer +- **No proposer selection** - every full node derives blocks independently +- **Deterministic consensus** - all honest nodes converge on the same chain +- **Base layer security** - inherits the security guarantees of the DA layer +- **No malicious actor concern** - invalid blocks are automatically rejected by validation rules + +## Comparison with Single Sequencer + +| Feature | Based Sequencing | Single Sequencer | +| --------------------- | --------------------------- | --------------------------- | +| Decentralization | ✅ Fully decentralized | ❌ Single point of control | +| Censorship Resistance | ✅ Guaranteed by base layer | ⚠️ Guaranteed by base layer | +| Latency | ⚠️ Depends on DA layer | ✅ Low latency | +| Trust Assumptions | ✅ Minimal (only DA layer) | ❌ Trust the sequencer | + +## Further Reading + +- [Data Availability](../data-availability.md) - Understanding the DA layer +- [Transaction Flow](../transaction-flow.md) - How transactions move through the system diff --git a/docs/learn/sequencing/overview.md b/docs/learn/sequencing/overview.md index fbda85b7a..4e22e0952 100644 --- a/docs/learn/sequencing/overview.md +++ b/docs/learn/sequencing/overview.md @@ -1,6 +1,6 @@ # Sequencing - Sequencing is the essential first step for handling your transactions. Think of it as an organizer that takes all incoming transactions, puts them in a clear order, and then groups them into batches. This process is vital for keeping everything consistent and making the chain run. Evolve uses a "Sequencing Interface" with key functions like submitting, retrieving, and verifying these transaction batches, ensuring smooth communication between the chain and the sequencing mechanism, which often acts as a bridge to the underlying network. +Sequencing is the essential first step for handling your transactions. Think of it as an organizer that takes all incoming transactions, puts them in a clear order, and then groups them into batches. This process is vital for keeping everything consistent and making the chain run. Evolve uses a "Sequencing Interface" with key functions like submitting, retrieving, and verifying these transaction batches, ensuring smooth communication between the chain and the sequencing mechanism, which often acts as a bridge to the underlying network. ## Sequencing Interface {#sequencing-interface} @@ -32,13 +32,14 @@ type Sequencer interface { It mainly consists of: -* `SubmitBatchTxs` relays the chain transactions from Evolve chain to the sequencing network -* `GetNextBatch` returns the next batch of transactions along with a deterministic timestamp -* `VerifyBatch` validates the sequenced batch +- `SubmitBatchTxs` relays the chain transactions from Evolve chain to the sequencing network +- `GetNextBatch` returns the next batch of transactions along with a deterministic timestamp +- `VerifyBatch` validates the sequenced batch ## Sequencing Implementations {#sequencing-implementations} An implementation of the sequencing interface mainly acts as a middleware that connects Evolve chain and the sequencing layer. It implements the sequencing interface functions described above. -There are several implementations of the sequencer but for now only one is available in Evolve. +There are several implementations of the sequencer available in Evolve: -* [single-sequencer](./single.md) - The simplest and most widely used sequencing model, where a single node (the sequencer) is responsible for ordering transactions and producing blocks. +- [single-sequencer](./single.md) - The simplest and most widely used sequencing model, where a single node (the sequencer) is responsible for ordering transactions and producing blocks. +- [based-sequencer](./based.md) - A decentralized sequencing model where transaction ordering is determined by the base layer, and every full node acts as its own proposer. diff --git a/docs/learn/sequencing/single.md b/docs/learn/sequencing/single.md index 107b2d53f..38494af3e 100644 --- a/docs/learn/sequencing/single.md +++ b/docs/learn/sequencing/single.md @@ -11,10 +11,10 @@ A single sequencer is the simplest sequencing architecture for an Evolve-based c - The sequencer requests a batch of transactions from the execution environment to be included in the next block. 3. **Block Production:** - **Without lazy mode:** the sequencer produces new blocks at fixed intervals. - - **With lazy mode:** the sequencer produces a block once either - - enough transactions are collected + - **With lazy mode:** the sequencer produces a block once either + - enough transactions are collected - the lazy-mode block interval elapses - More info [here](../config.md#lazy-mode-lazy-aggregator). + More info in the [lazy mode configuration guide](../config.md#lazy-mode-lazy-aggregator). - Each block contains a batch of ordered transactions and metadata. 4. **Data Availability Posting:** @@ -41,8 +41,88 @@ sequenceDiagram ExecutionEnv->>User: State/query response ``` +## Forced Inclusion + +While the single sequencer controls transaction ordering, the system provides a censorship-resistance mechanism called **forced inclusion**. This ensures users can always include their transactions even if the sequencer refuses to process them. + +### How Forced Inclusion Works + +1. **Direct DA Submission:** + - Users can submit transactions directly to the DA layer's forced inclusion namespace + - These transactions bypass the sequencer entirely + +2. **Epoch-Based Retrieval:** + - The sequencer retrieves forced inclusion transactions from the DA layer at epoch boundaries + - Epochs are defined by `DAEpochForcedInclusion` in the genesis configuration + +3. **Mandatory Inclusion:** + - The sequencer MUST include all forced inclusion transactions from an epoch before the epoch ends + - Full nodes verify that forced inclusion transactions are properly included + +4. **Smoothing:** + - If forced inclusion transactions exceed block size limits (`MaxBytes`), they can be spread across multiple blocks within the same epoch + - All transactions must be included before moving to the next epoch + +### Example + +```text +Epoch [100, 109] (epoch size = 10): + - User submits tx directly to DA at height 102 + - Sequencer retrieves forced txs at epoch start (height 100) + - Sequencer includes forced tx in blocks before height 110 +``` + +See [Based Sequencing](./based.md) for a fully decentralized alternative that relies entirely on forced inclusion. + +## Detecting Malicious Sequencer Behavior + +Full nodes continuously monitor the sequencer to ensure it follows consensus rules, particularly around forced inclusion: + +### Censorship Detection + +If a sequencer fails to include forced inclusion transactions past their epoch boundary, full nodes will: + +1. **Detect the violation** - missing transactions from past epochs +2. **Reject invalid blocks** - do not build on top of censoring blocks +3. **Log the violation** with transaction hashes and epoch details +4. **Halt consensus** - the chain cannot progress with a malicious sequencer + +### Recovery from Malicious Sequencer + +When a malicious sequencer is detected (censoring forced inclusion transactions): + +**All nodes must restart the chain in based sequencing mode:** + +```bash +# Restart with based sequencing enabled +./evnode start --node.aggregator --node.based_sequencer +``` + +**In based sequencing mode:** + +- No single sequencer controls transaction ordering +- Every full node derives blocks independently from the DA layer +- Forced inclusion becomes the primary (and only) transaction submission method +- Censorship becomes impossible as ordering comes from the DA layer + +**Important considerations:** + +- All full nodes should coordinate the switch to based mode +- The chain continues from the last valid state +- Users submit transactions directly to the DA layer going forward +- This is a one-way transition - moving back to single sequencer requires social consensus + +See [Based Sequencing documentation](./based.md) for details on operating in this mode. + ## Advantages - **Simplicity:** Easy to set up and operate, making it ideal for development, testing, and small-scale deployments compared to other more complex sequencers. - **Low Latency:** Fast block production and transaction inclusion, since there is no consensus overhead among multiple sequencers. - **Independence from DA block time:** The sequencer can produce blocks on its own schedule, without being tied to the block time of the DA layer, enabling more flexible transaction processing than DA-timed sequencers. +- **Forced inclusion fallback:** Users can always submit transactions via the DA layer if the sequencer is unresponsive or censoring. + +## Disadvantages + +- **Single point of failure:** If the sequencer goes offline, block production stops (though the chain can transition to based mode). +- **Trust requirement:** Users must trust the sequencer to include their transactions in a timely manner (mitigated by forced inclusion). +- **Censorship risk:** A malicious sequencer can temporarily censor transactions until forced inclusion activates or the chain transitions to based mode. From f6c7b0116082b495f26a1d3bc9e88e09e5624ee0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 27 Nov 2025 15:21:53 +0100 Subject: [PATCH 06/15] ai docs --- docs/adr/adr-012-based-sequencing.md | 337 ++++++++++-------- .../adr/adr-019-forced-inclusion-mechanism.md | 81 ++++- docs/learn/sequencing/based.md | 16 +- 3 files changed, 279 insertions(+), 155 deletions(-) diff --git a/docs/adr/adr-012-based-sequencing.md b/docs/adr/adr-012-based-sequencing.md index 33c822943..2e3456a60 100644 --- a/docs/adr/adr-012-based-sequencing.md +++ b/docs/adr/adr-012-based-sequencing.md @@ -5,21 +5,18 @@ - 2025-04-09: Initial draft - 2025-04-09: Added optional UX optimization where full nodes can relay user transactions to base layer - 2025-04-09: Added rationale for VerifyBatch utility in a based setup -- 2025-04-09: Reworded forkchoice rule to use maxHeightDrift instead of time-based maxLatency - 2025-04-10: Added Relaying Costs and Fee Compensation via EVM +- 2025-11-27: Updated to reflect actual implementation with epoch-based forced inclusion ## Context -Most chains today rely on single sequencers to form batches of user transactions, despite the availability of base layers (like Celestia) that provide data availability and canonical ordering guarantees. A Single sequencer introduces liveness and censorship risks, as well as complexity in proposer election, fault tolerance, and bridge security. +Most chains today rely on single sequencers to form batches of user transactions, despite the availability of base layers (like Celestia) that provide data availability and canonical ordering guarantees. A single sequencer introduces liveness and censorship risks, as well as complexity in proposer election, fault tolerance, and bridge security. -Based sequencing eliminates this reliance by having the base layer determine transaction ordering. However, previous implementations still assumed the existence of a proposer to prepare batches. +Based sequencing eliminates this reliance by having the base layer determine transaction ordering. This ADR describes the **epoch-based forced inclusion** implementation where **every full node acts as its own proposer** by independently: -This ADR proposes a **based sequencing model** in which **every full node acts as its own proposer** by independently: - -- Reading chain-specific blobs from the base layer -- Applying a deterministic forkchoice rule -- Constructing blocks -- Executing batches to compute state updates +- Reading forced inclusion transactions from the base layer at epoch boundaries +- Applying deterministic batching rules +- Executing transactions to compute state updates This approach ensures consistency, removes the need for trusted intermediaries, and improves decentralization and resilience. @@ -35,235 +32,291 @@ This approach ensures consistency, removes the need for trusted intermediaries, - Some nodes are elected to act as proposers for efficiency. - Still introduces trust assumptions, coordination complexity, and MEV-related risks. -### Trusted Light Client Commitments +### Continuous DA Polling -- Blocks are committed to L1 (e.g., Ethereum) and verified by a light client. -- Adds delay and dependency on L1 finality, and often still relies on centralized proposers. +- Full nodes continuously poll DA and form batches based on size thresholds. +- More complex coordination and can lead to inconsistent batch boundaries across nodes. -None of these provide the decentralization and self-sovereignty enabled by a fully deterministic, proposerless, based sequencing model. +The epoch-based approach provides deterministic batch boundaries while minimizing DA queries and ensuring all honest nodes derive identical blocks. ## Decision -We adopt a based sequencing model where **every full node in the network acts as its own proposer** by deterministically deriving the next batch using only: +We adopt a based sequencing model where every full node in the network acts as its own proposer using an epoch-based forced inclusion mechanism: -- Base-layer data (e.g., Celestia blobs tagged by ChainID) -- A forkchoice rule: MaxBytes + Bounded L1 Height Drift (maxHeightDrift) -- Local execution (e.g., EVM via reth) +### Core Principles -This model removes the need for: +1. **Epoch Boundaries**: Transactions are retrieved from DA in epochs defined by `DAEpochForcedInclusion` +2. **Deterministic Batch Formation**: All nodes apply the same rules to form batches from queued transactions +3. **MaxBytes Enforcement**: Individual blocks respect a maximum byte limit (2MB default) +4. **Transaction Smoothing**: Large transaction sets can be smoothed across multiple blocks within an epoch +5. **No Trusted Sequencer**: All ordering comes from the base layer -- A designated sequencer -- Coordination mechanisms -- Sequencer signatures +### Sequencing Model -The canonical chain state becomes a **function of the base layer** and a well-defined forkchoice rule. +The `BasedSequencer` implementation: -Additionally, to improve UX for users who do not operate a base layer client or wallet, **full nodes may optionally relay user-submitted transactions to the base layer**. This maintains decentralization while improving accessibility. +- **Only retrieves transactions from DA** via forced inclusion namespace +- **Ignores transactions submitted via `SubmitBatchTxs`** (no mempool) +- **Fetches at epoch boundaries** to minimize DA queries +- **Queues transactions** and creates batches respecting `MaxBytes` +- **Validates blob sizes** against absolute maximum to prevent oversized submissions -The following sequence diagram demonstrates two full nodes independently preparing batches and chain states that are identical using the user transactions that are directly submitted to the base layer. +### Transaction Flow ```mermaid sequenceDiagram - participant L1 as Base Layer (e.g., Celestia) + participant User + participant DA as Base Layer (Celestia) participant NodeA as Full Node A participant NodeB as Full Node B participant ExecA as Execution Engine A participant ExecB as Execution Engine B - Note over L1: Users post transactions as blobs tagged with ChainID + Note over User: User posts transaction to DA
forced inclusion namespace + + User->>DA: Submit blob to forced inclusion namespace + + Note over NodeA,NodeB: At epoch start (e.g., DA height 100, 110, 120...) + + NodeA->>DA: RetrieveForcedIncludedTxs(epochStart) + NodeB->>DA: RetrieveForcedIncludedTxs(epochStart) - NodeA->>L1: Retrieve new blobs since last DA height - NodeB->>L1: Retrieve new blobs since last DA height + DA-->>NodeA: Txs from epoch [100-109] + DA-->>NodeB: Txs from epoch [100-109] - NodeA->>NodeA: Apply forkchoice rule
MaxBytes or MaxHeightDrift met? - NodeB->>NodeB: Apply forkchoice rule
MaxBytes or MaxHeightDrift met? + Note over NodeA,NodeB: Queue transactions and create batches
respecting MaxBytes - NodeA->>ExecA: ExecuteTxs(blobs) - NodeB->>ExecB: ExecuteTxs(blobs) + NodeA->>NodeA: createBatchFromQueue(MaxBytes) + NodeB->>NodeB: createBatchFromQueue(MaxBytes) - ExecA-->>NodeA: Updated state root - ExecB-->>NodeB: Updated state root + NodeA->>ExecA: ExecuteTxs(batch) + NodeB->>ExecB: ExecuteTxs(batch) - NodeA->>NodeA: Build block with batch + state root - NodeB->>NodeB: Build block with batch + state root + ExecA-->>NodeA: State root + ExecB-->>NodeB: State root - Note over NodeA, NodeB: Both nodes independently reach the same block & state + Note over NodeA,NodeB: Both nodes produce identical blocks ``` -The following sequence diagram shows the case where the user utilizes the full node to relay the transaction to the base layer and the light client in action. +## Detailed Design -```mermaid -sequenceDiagram - participant User as User - participant Node as Full Node - participant Base as Base Layer (e.g., Celestia) - participant Exec as Execution Engine - participant LightClient as Light Client +### Epoch-Based Retrieval - Note over User: User submits transaction +**Epoch Calculation**: - User->>Node: SubmitTx(tx) - Node->>Base: Post tx blob (DA blob with ChainID) +- Epoch number: `((daHeight - daStartHeight) / daEpochSize) + 1` +- Epoch boundaries: `[start, end]` where transactions must be included - Note over Node: Node continuously scans base layer +**Example with `DAEpochForcedInclusion = 10`**: - Node->>Base: Retrieve blobs since last height - Node->>Node: Apply forkchoice rule (MaxBytes or MaxHeightDrift) - Node->>Exec: ExecuteTxs(blobs) - Exec-->>Node: Updated state root - Node->>Node: Build block +- DA heights 100-109 = Epoch 1 +- DA heights 110-119 = Epoch 2 +- DA heights 120-129 = Epoch 3 - Note over LightClient: Re-executes forkchoice & verifies state +**Retrieval Logic** (`ForcedInclusionRetriever`): - LightClient->>Base: Retrieve blobs - LightClient->>LightClient: Apply forkchoice & re-execute - LightClient->>LightClient: Verify state root & inclusion -``` +1. Check if DA height is at epoch start +2. If not at epoch start, return empty transaction set +3. If at epoch start, fetch all blobs from forced inclusion namespace for entire epoch +4. Return `ForcedInclusionEvent` with transactions and DA height range -## Detailed Design +### Batch Formation -### User Requirements +**BasedSequencer Queue Management**: -- Users submit transactions by: - - Posting them directly to the base layer in tagged blobs, **or** - - Sending them to any full node's RPC endpoint, which will relay them to the base layer on their behalf -- Users can verify finality by checking light clients or DA inclusion +```go +// On GetNextBatch: +1. Retrieve forced inclusion transactions for current epoch +2. Validate blob sizes (skip oversized blobs) +3. Add valid transactions to internal queue +4. Create batch from queue respecting MaxBytes +5. Return batch (may be partial if queue exceeds MaxBytes) +``` -### Systems Affected +**Batch Creation** (`createBatchFromQueue`): -- Full nodes -- Light clients -- Batch building and execution logic +- Iterate through queued transactions +- Accumulate until `totalBytes + txSize > MaxBytes` +- Stop at limit and preserve remaining transactions for next block +- Clear queue when all transactions consumed -### Forkchoice Rule +### Block Production -A batch is constructed when: +**Executor Flow** (`block/internal/executing/executor.go`): -1. The accumulated size of base-layer blobs >= `MAX_BYTES` -2. OR the L1 height difference since the last batch exceeds `MAX_HEIGHT_DRIFT` +1. **Retrieve Batch**: Call `sequencer.GetNextBatch(MaxBytes: 2MB)` +2. **Handle Empty Batch**: Skip block production if no transactions +3. **Create Block**: Form block header and data with batch transactions +4. **Execute**: Apply transactions via execution engine +5. **Update State**: Store DA height from sequencer in state +6. **Sign Header**: Based sequencer returns empty signature +7. **Persist**: Save block to store +8. **Broadcast**: Propagate header and data to P2P network -All full nodes: +### Transaction Smoothing -- Track base-layer heights and timestamps -- Fetch all chain-specific tagged blobs -- Apply the rule deterministically -- Execute batches to update state +When forced inclusion transactions exceed `MaxBytes`: -Without forkchoice parameters, full nodes cannot independently produce identical blocks (i.e., matching state roots or headers), as they wouldn’t know how to consistently form batches—specifically, how many transactions to include per batch. The maxHeightDrift parameter addresses this by enabling progress when the maxBytes threshold isn’t met, without relying on global time synchronization. Relying on timestamps could lead to inconsistencies due to clock drift between nodes, so using L1-based timestamps or heights provides a reliable and deterministic reference for batching. +**Block 1**: -#### Configurable Forkchoice rule +``` +Epoch [100-109] contains 3MB of transactions +Block at DA height 100: 2MB (partial) +Remaining in queue: 1MB +``` -By default, the based sequencing supports max bytes along with max height drift as the forkchoice rule; however, this can be made configurable to support different forkchoice strategies, such as prioritizing highest to lowest fee-paying transactions, earliest submitted transactions, application-specific prioritization, or even hybrid strategies that balance throughput, latency, and economic incentives, allowing chain operators to customize batch construction policies according to their needs. +**Block 2**: -### Light Clients +``` +Block at DA height 101: 1MB (remainder) + new regular txs +Queue cleared +``` -Light clients (once implemented) are not expected to re-execute transactions to derive headers. Instead, they will perform verification only. These clients will typically receive headers either: +This ensures all epoch transactions are eventually included while respecting block size limits. -- via the p2p network along with accompanying proofs, or -- from a connected full node, in which case they will still require validity proofs for the received headers. +### Forced Inclusion Verification -This design ensures that light clients remain lightweight and efficient, relying on cryptographic proofs rather than execution to validate the chain state. +Full nodes verify that batches include all required forced inclusion transactions via `Syncer.verifyForcedInclusionTxs`: -### Data Structures +1. Retrieve forced inclusion transactions for current DA height +2. Check all forced txs are present in block +3. Allow deferral within epoch boundaries +4. Reject blocks that: + - Censor forced inclusion transactions after epoch end + - Skip forced transactions without valid reason -- Blob index: to track chain-specific blobs by height and timestamp -- Batch metadata: includes L1 timestamps, blob IDs, and state roots +### Data Structures -### APIs +**ForcedInclusionEvent**: -- `GetNextBatch(lastBatchData, maxBytes, maxHeightDrift)`: deterministically builds batch, `maxHeightDrift` can be configured locally instead of passing on every call. -- `VerifyBatch(batchData)`: re-derives and checks state -- `SubmitBatchTxs(batch [][]byte)`: relays a user transaction(s) to the base layer +```go +type ForcedInclusionEvent struct { + StartDaHeight uint64 // Epoch start DA height + EndDaHeight uint64 // Last processed DA height in epoch + Txs [][]byte // All transactions from epoch +} +``` -In based sequencing, full nodes do not need VerifyBatch to participate in consensus or build the canonical chain state — because they derive everything deterministically from L1. However, VerifyBatch may still be useful in sync, light clients, testing, or cross-domain verification. +**BasedSequencer State**: -- Light Clients: L1 or cross-domain light clients can use VerifyBatch to validate that a given chain state root or message was derived according to the forkchoice rule and execution logic. +```go +type BasedSequencer struct { + daHeight atomic.Uint64 // Current DA height + txQueue [][]byte // Queued transactions awaiting inclusion +} +``` -- Syncing Peers: During peer synchronization or state catch-up, nodes may download batches and use VerifyBatch to confirm correctness before trusting the result. +### Configuration -- Auditing / Indexers: Off-chain services may verify batches as part of building state snapshots, fraud monitoring, or historical replays. +**Genesis Configuration**: -- Testing: Developers and test frameworks can validate batch formation correctness and execution determinism using VerifyBatch. +- `DAStartHeight`: Starting DA height for the chain +- `DAEpochForcedInclusion`: Number of DA blocks per epoch (e.g., 10) -In all of these cases, VerifyBatch acts as a stateless, replayable re-computation check using base-layer data and chain rules. +**Constants**: -### Relaying Costs and Fee Compensation via EVM +- `DefaultMaxBlobSize`: 2MB per batch/block +- Enforced both at submission and retrieval -In a based sequencing architecture, users may choose to submit their transactions directly to the base layer (e.g., Celestia) or rely on full nodes to relay the transactions on their behalf. When full nodes act as relayers, they are responsible for covering the base layer data availability (DA) fees. To make this economically viable, the protocol must include a mechanism to compensate these full nodes for their relaying service—ideally without modifying the EVM or execution engine. +### Systems Affected -To achieve this, we leverage existing EVM transaction fee mechanisms and Engine API standards. Specifically, we utilize the suggestedFeeRecipient field in the engine_forkchoiceUpdatedV3 call. This field is included in the PayloadAttributes sent by the consensus client (Rollkit) to the execution client (reth) when proposing a new block payload. By setting suggestedFeeRecipient to the full node’s address, we instruct the execution engine to assign the transaction priority fees (tip) and base fees to the relaying full node when the payload is created. +- **BasedSequencer**: Implements epoch-based transaction retrieval +- **ForcedInclusionRetriever**: Fetches transactions from DA at epochs +- **Executor**: Drives block production using sequencer batches +- **Syncer**: Verifies forced inclusion compliance +- **DA Client**: Must support forced inclusion namespace -The user transaction itself includes a maxFeePerGas and maxPriorityFeePerGas—standard EIP-1559 fields. These fees are used as usual by the execution engine during payload construction. Since the full node is named as the fee recipient, it directly receives the gas fees when the transaction is executed, effectively covering its cost of DA submission on the user’s behalf. This approach requires no changes to the EVM engine, remains backward compatible with Ethereum infrastructure, and aligns incentives for honest full nodes to participate in relaying and batching. +### APIs -This design ensures: +**Sequencer Interface**: -- Fee accountability: Users pay for DA inclusion via standard gas fees. -- Node neutrality: Any full node can relay a transaction and get compensated. -- No execution-layer changes: Works with a modified reth clients. -- Security: Users retain flexibility to either self-submit or rely on decentralized relayers. +```go +// Returns empty response - based sequencer ignores submissions +SubmitBatchTxs(ctx, req) (*SubmitBatchTxsResponse, error) -Additional enhancements like dynamic fee markets, relayer reputation, or chain-native incentives can be layered atop this base mechanism in the future. +// Retrieves next batch from forced inclusion queue +GetNextBatch(ctx, req) (*GetNextBatchResponse, error) -### Efficiency +// Always returns true for based sequencer +VerifyBatch(ctx, req) (*VerifyBatchResponse, error) +``` -- Deterministic block production without overhead of consensus -- Bound latency ensures timely progress even with low traffic +**Forced Inclusion Retrieval**: -### Observability +```go +// Retrieves forced inclusion txs at DA height (epoch start) +RetrieveForcedIncludedTxs(ctx, daHeight) (*ForcedInclusionEvent, error) +``` -- Each node can log forkchoice decisions, skipped blobs, and batch triggers +### Block Time Characteristics -### Security +- **Block time is a function of DA layer block time** +- With `DAEpochForcedInclusion = 10` and Celestia ~12s block time: + - Minimum block time: ~12s (if transactions present) + - Maximum epoch duration: ~120s (10 blocks) +- **Lazy mode has no effect** - based sequencing inherently follows DA timing +- **No headers are published to DA** - only forced inclusion blobs -- No sequencer key or proposer trust required -- Replayable from public data (DA layer) -- Optional transaction relay must not allow censorship or injection +### Security Considerations -### Privacy +**Trust Model**: -- No privacy regressions; same as base-layer visibility +- No trusted sequencer required +- All nodes derive identical state from DA +- Invalid blocks are automatically rejected by execution rules -### Testing +**Attack Vectors**: -- Unit tests for forkchoice implementation -- End-to-end replay tests against base-layer data -- Mocked relayer tests for SubmitTx +- Invalid State: Rejected by execution engine during `ExecuteTxs` +- Blob Spam: Limited by DA namespace fees and size validation +- Incorrect Batch: Each node independently derives batches, inconsistent nodes fall out of sync -### Deployment +### Efficiency -- No breaking changes to existing based chain logic -- Can be rolled out by disabling proposer logic -- Optional relayer logic gated by config flag +- Minimal DA Queries: Only fetch at epoch boundaries +- Bounded Latency: Epoch duration provides upper bound +- Transaction Queuing: Smooth large batches across multiple blocks ## Status -Proposed +Implemented ## Consequences ### Positive -- Adds an alternate to single sequencer -- Fully deterministic and transparent -- Enables trustless bridges and light clients -- Optional relayer support improves UX for walletless or mobile users +- **Eliminates single sequencer dependency** - fully decentralized ordering +- **Deterministic consensus** - all nodes converge on same state +- **Censorship resistance** - forced inclusion verified by all nodes +- **Simplified architecture** - no proposer election or coordination +- **Economic sustainability** - fee recipient mechanism enables relay compensation ### Negative -- Slight increase in complexity in forkchoice validation -- Must standardize timestamp and blob access for determinism -- Must prevent relayer misuse or spam +- **Block time tied to DA layer** - cannot be independently configured +- **Minimum latency** - at least one DA block time +- **Epoch-based batching** - cannot include transactions mid-epoch ### Neutral -- Shifts latency tuning from proposer logic to forkchoice parameters +- **No mempool in based sequencer** - transactions only via forced inclusion +- **Queue management required** - full nodes maintain transaction queues +- **DA namespace dependency** - requires forced inclusion namespace support + +## Future Enhancements + +1. **Transaction Relaying**: Implement full node RPC endpoints to accept and relay user transactions to DA +2. **Dynamic Epochs**: Adjust epoch size based on transaction volume or network conditions +3. **Priority Mechanisms**: Support application-specific transaction ordering within epochs +4. **Light Client Integration**: Implement header verification without full re-execution +5. **Cross-Chain Inclusion**: Enable forced inclusion from multiple DA layers ## References -- [EthResearch: Based Chains](https://ethresear.ch/t/based-rollups-superpowers-from-l1-sequencing/15016) -- [Taiko](https://taiko.mirror.xyz/7dfMydX1FqEx9_sOvhRt3V8hJksKSIWjzhCVu7FyMZU) - -- [Surge](https://www.surge.wtf/) + +- [EthResearch: Based Rollups](https://ethresear.ch/t/based-rollups-superpowers-from-l1-sequencing/15016) +- [Taiko: Based Sequencing](https://taiko.mirror.xyz/7dfMydX1FqEx9_sOvhRt3V8hJksKSIWjzhCVu7FyMZU) +- [Surge Rollup](https://www.surge.wtf/) - [Spire](https://www.spire.dev/) -- [Unifi from Puffer](https://www.puffer.fi/unifi) +- [Puffer UniFi](https://www.puffer.fi/unifi) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 019e3269c..1095289e0 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -304,33 +304,100 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsR #### Syncer Verification -Full nodes verify forced inclusion in the sync process: +Full nodes verify forced inclusion in the sync process with support for transaction smoothing across multiple blocks: ```go func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { - // 1. Retrieve forced inclusion transactions from DA + // 1. Retrieve forced inclusion transactions from DA for current epoch forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) if err != nil { return err } - // 2. Build map of transactions in block + // 2. Build map of transactions in current block blockTxMap := make(map[string]struct{}) for _, tx := range data.Txs { - blockTxMap[string(tx)] = struct{}{} + blockTxMap[hashTx(tx)] = struct{}{} } - // 3. Verify all forced transactions are included + // 3. Check if any pending forced inclusion txs from previous epochs are included + var stillPending []pendingForcedInclusionTx + s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + pending := value.(pendingForcedInclusionTx) + if _, ok := blockTxMap[pending.TxHash]; ok { + // Transaction was included - remove from pending + s.pendingForcedInclusionTxs.Delete(key) + } else { + stillPending = append(stillPending, pending) + } + return true + }) + + // 4. Process new forced inclusion transactions from current epoch for _, forcedTx := range forcedEvent.Txs { - if _, ok := blockTxMap[string(forcedTx)]; !ok { - return errMaliciousProposer + txHash := hashTx(forcedTx) + if _, ok := blockTxMap[txHash]; !ok { + // Transaction not included yet - add to pending for deferral within epoch + stillPending = append(stillPending, pendingForcedInclusionTx{ + Data: forcedTx, + EpochStart: forcedEvent.StartDaHeight, + EpochEnd: forcedEvent.EndDaHeight, + TxHash: txHash, + }) + } + } + + // 5. Check for malicious behavior: pending txs past their epoch boundary + var maliciousTxs, remainingPending []pendingForcedInclusionTx + for _, pending := range stillPending { + // If current DA height is past this epoch's end, these txs MUST have been included + if currentState.DAHeight > pending.EpochEnd { + maliciousTxs = append(maliciousTxs, pending) + } else { + remainingPending = append(remainingPending, pending) } } + // 6. Update pending map with only remaining valid pending txs + pendingForcedInclusionTxs = remainingPending + + // 7. Reject block if sequencer censored forced txs past epoch boundary + if len(maliciousTxs) > 0 { + return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions from past epoch(s) not included", len(maliciousTxs)) + } + return nil } ``` +**Key Verification Features**: + +1. **Pending Transaction Tracking**: Maintains a map of forced inclusion transactions that haven't been included yet +2. **Epoch-Based Deferral**: Allows transactions to be deferred (smoothed) across multiple blocks within the same epoch +3. **Strict Epoch Boundary Enforcement**: Once `currentState.DAHeight > pending.EpochEnd`, all pending transactions from that epoch MUST have been included +4. **Censorship Detection**: Identifies malicious sequencers that fail to include forced transactions after epoch boundaries + +**Smoothing Example**: + +``` +Epoch [100-109] contains 3MB of forced inclusion transactions + +Block at DA height 100: + - Includes 2MB of forced txs (partial) + - Remaining 1MB added to pending map with EpochEnd=109 + - ✅ Valid - within epoch boundary + +Block at DA height 105: + - Includes remaining 1MB from pending + - Pending map cleared for those txs + - ✅ Valid - within epoch boundary + +Block at DA height 110 (next epoch): + - If any txs from epoch [100-109] still pending + - ❌ MALICIOUS - epoch boundary violated + - Block rejected, sequencer flagged +``` + ### Implementation Details #### Epoch-Based Fetching diff --git a/docs/learn/sequencing/based.md b/docs/learn/sequencing/based.md index cf9c18c31..c99bf279f 100644 --- a/docs/learn/sequencing/based.md +++ b/docs/learn/sequencing/based.md @@ -30,6 +30,9 @@ Forced inclusion transactions are retrieved in epochs defined by `DAEpochForcedI - DA heights 110-119 form the next epoch - Transactions from each epoch must be included before the epoch ends +Epochs durations determine the block time in based sequencing. +Additionally, because no headers are published, the lazy mode has no effect. The block time is a factor of the DA layer's block time. + ## Block Smoothing When forced inclusion transactions exceed the `MaxBytes` limit for a single block, they can be "smoothed" across multiple blocks within the same epoch. This ensures that: @@ -59,12 +62,13 @@ Based sequencing minimizes trust assumptions: ## Comparison with Single Sequencer -| Feature | Based Sequencing | Single Sequencer | -| --------------------- | --------------------------- | --------------------------- | -| Decentralization | ✅ Fully decentralized | ❌ Single point of control | -| Censorship Resistance | ✅ Guaranteed by base layer | ⚠️ Guaranteed by base layer | -| Latency | ⚠️ Depends on DA layer | ✅ Low latency | -| Trust Assumptions | ✅ Minimal (only DA layer) | ❌ Trust the sequencer | +| Feature | Based Sequencing | Single Sequencer | +| --------------------- | ----------------------------- | ----------------------------- | +| Decentralization | ✅ Fully decentralized | ❌ Single point of control | +| Censorship Resistance | ✅ Guaranteed by base layer | ⚠️ Guaranteed by base layer | +| Latency | ⚠️ Depends on DA layer (~12s) | ✅ Low latency (configurable) | +| Block Time Control | ❌ Factor of DA block time | ✅ Configurable by sequencer | +| Trust Assumptions | ✅ Minimal (only DA layer) | ❌ Trust the sequencer | ## Further Reading From e5ba96540c2fd127535814307e47e47ad893518d Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 1 Dec 2025 18:00:10 +0100 Subject: [PATCH 07/15] implement feedback --- .../internal/da/forced_inclusion_retriever.go | 27 +++++++++---------- block/internal/syncing/syncer.go | 6 ++--- .../syncing/syncer_forced_inclusion_test.go | 6 ++--- .../adr/adr-019-forced-inclusion-mechanism.md | 2 +- types/epoch.go | 11 ++++---- types/epoch_test.go | 6 ++--- 6 files changed, 28 insertions(+), 30 deletions(-) diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go index 5f5047338..dded3004c 100644 --- a/block/internal/da/forced_inclusion_retriever.go +++ b/block/internal/da/forced_inclusion_retriever.go @@ -51,7 +51,7 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context return nil, ErrForceInclusionNotConfigured } - epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + epochStart, epochEnd, currentEpochNumber := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) if daHeight != epochStart { r.logger.Debug(). @@ -66,9 +66,6 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context }, nil } - // We're at epoch start - fetch transactions from DA - currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) - event := &ForcedInclusionEvent{ StartDaHeight: epochStart, Txs: [][]byte{}, @@ -81,22 +78,22 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context Uint64("epoch_num", currentEpochNumber). Msg("retrieving forced included transactions from DA") - epochStartResult := r.client.RetrieveForcedInclusion(ctx, epochStart) - if epochStartResult.Code == coreda.StatusHeightFromFuture { + epochEndResult := r.client.RetrieveForcedInclusion(ctx, epochEnd) + if epochEndResult.Code == coreda.StatusHeightFromFuture { r.logger.Debug(). - Uint64("epoch_start", epochStart). - Msg("epoch start height not yet available on DA - backoff required") - return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) + Uint64("epoch_end", epochEnd). + Msg("epoch end height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) } - epochEndResult := epochStartResult + epochStartResult := epochEndResult if epochStart != epochEnd { - epochEndResult = r.client.RetrieveForcedInclusion(ctx, epochEnd) - if epochEndResult.Code == coreda.StatusHeightFromFuture { + epochStartResult = r.client.RetrieveForcedInclusion(ctx, epochStart) + if epochStartResult.Code == coreda.StatusHeightFromFuture { r.logger.Debug(). - Uint64("epoch_end", epochEnd). - Msg("epoch end height not yet available on DA - backoff required") - return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) + Uint64("epoch_start", epochStart). + Msg("epoch start height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) } } diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index e2ba206a8..2c45fd163 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -705,7 +705,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. // Check if any pending forced inclusion txs from previous epochs are included var stillPending []pendingForcedInclusionTx - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { pending := value.(pendingForcedInclusionTx) if _, ok := blockTxMap[pending.TxHash]; ok { s.logger.Debug(). @@ -752,7 +752,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. } // Update pending map - clear old entries and store only remaining pending - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { s.pendingForcedInclusionTxs.Delete(key) return true }) @@ -774,7 +774,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. if len(forcedIncludedTxsEvent.Txs) > 0 { if newPendingCount > 0 { totalPending := 0 - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { totalPending++ return true }) diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index a72dc39f9..d07cb86b3 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -556,7 +556,7 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { // Verify that dataBin2 is now tracked as pending pendingCount := 0 - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { pendingCount++ return true }) @@ -593,7 +593,7 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { // Verify that pending queue is now empty (dataBin2 was included) pendingCount = 0 - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { pendingCount++ return true }) @@ -699,7 +699,7 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { // Verify that the forced tx is tracked as pending pendingCount := 0 - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { pendingCount++ return true }) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 1095289e0..c8b63bb20 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -322,7 +322,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error // 3. Check if any pending forced inclusion txs from previous epochs are included var stillPending []pendingForcedInclusionTx - s.pendingForcedInclusionTxs.Range(func(key, value interface{}) bool { + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { pending := value.(pendingForcedInclusionTx) if _, ok := blockTxMap[pending.TxHash]; ok { // Transaction was included - remove from pending diff --git a/types/epoch.go b/types/epoch.go index 75d43e804..47e869ddd 100644 --- a/types/epoch.go +++ b/types/epoch.go @@ -33,18 +33,19 @@ func CalculateEpochNumber(daHeight, daStartHeight, daEpochSize uint64) uint64 { // Returns: // - start: The first DA height in the epoch (inclusive) // - end: The last DA height in the epoch (inclusive) -func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end uint64) { +func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end, epochNum uint64) { + epochNum = CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) + if daEpochSize == 0 { - return daStartHeight, daStartHeight + return daStartHeight, daStartHeight, epochNum } if daHeight < daStartHeight { - return daStartHeight, daStartHeight + daEpochSize - 1 + return daStartHeight, daStartHeight + daEpochSize - 1, epochNum } - epochNum := CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) start = daStartHeight + (epochNum-1)*daEpochSize end = daStartHeight + epochNum*daEpochSize - 1 - return start, end + return start, end, epochNum } diff --git a/types/epoch_test.go b/types/epoch_test.go index 295395d7b..578712618 100644 --- a/types/epoch_test.go +++ b/types/epoch_test.go @@ -218,7 +218,7 @@ func TestCalculateEpochBoundaries(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - start, end := CalculateEpochBoundaries(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + start, end, _ := CalculateEpochBoundaries(tt.daHeight, tt.daStartHeight, tt.daEpochSize) assert.Equal(t, tt.expectedStart, start, "start height mismatch") assert.Equal(t, tt.expectedEnd, end, "end height mismatch") }) @@ -262,7 +262,7 @@ func TestEpochConsistency(t *testing.T) { epochNum := CalculateEpochNumber(h, tt.daStartHeight, tt.daEpochSize) assert.Equal(t, epoch, epochNum, "height %d should be in epoch %d", h, epoch) - start, end := CalculateEpochBoundaries(h, tt.daStartHeight, tt.daEpochSize) + start, end, _ := CalculateEpochBoundaries(h, tt.daStartHeight, tt.daEpochSize) assert.Equal(t, expectedStart, start, "height %d should have start %d", h, expectedStart) assert.Equal(t, expectedEnd, end, "height %d should have end %d", h, expectedEnd) } @@ -293,7 +293,7 @@ func TestEpochBoundaryTransitions(t *testing.T) { epoch := CalculateEpochNumber(tr.height, daStartHeight, daEpochSize) assert.Equal(t, tr.expectedEpoch, epoch, "height %d epoch mismatch", tr.height) - start, end := CalculateEpochBoundaries(tr.height, daStartHeight, daEpochSize) + start, end, _ := CalculateEpochBoundaries(tr.height, daStartHeight, daEpochSize) assert.Equal(t, tr.expectedStart, start, "height %d start mismatch", tr.height) assert.Equal(t, tr.expectedEnd, end, "height %d end mismatch", tr.height) } From d46db8c1e13293dcc5aabc3a7ab5ca9e999be8db Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 1 Dec 2025 18:05:42 +0100 Subject: [PATCH 08/15] feedback 2/2 --- sequencers/based/sequencer.go | 7 +------ sequencers/based/sequencer_test.go | 6 ++---- sequencers/single/queue.go | 3 +-- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 763629200..f9ccd31b0 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -75,12 +75,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get if err != nil { // Check if forced inclusion is not configured if errors.Is(err, block.ErrForceInclusionNotConfigured) { - s.logger.Error().Msg("forced inclusion not configured, returning empty batch") - return &coresequencer.GetNextBatchResponse{ - Batch: &coresequencer.Batch{Transactions: nil}, - Timestamp: time.Now(), - BatchData: req.LastBatchData, - }, nil + return nil, errors.New("forced inclusion not configured, returning empty batch") } else if errors.Is(err, coreda.ErrHeightFromFuture) { // If we get a height from future error, keep the current DA height and return batch // We'll retry the same height on the next call until DA produces that block diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index 5bb5acd2f..ec57bd46f 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -236,10 +236,8 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { } resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Batch) - assert.Equal(t, 0, len(resp.Batch.Transactions)) + require.Error(t, err) + require.Nil(t, resp) } func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { diff --git a/sequencers/single/queue.go b/sequencers/single/queue.go index d992535ea..a208aa109 100644 --- a/sequencers/single/queue.go +++ b/sequencers/single/queue.go @@ -85,8 +85,7 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch coresequencer.Batch) e // Prepend adds a batch to the front of the queue (before head position). // This is used to return transactions that couldn't fit in the current batch. -// The batch is NOT persisted to the DB since these are transactions that were -// already in the queue or were just processed. +// TODO(@julienrbrt): The batch is currently NOT persisted to the DB since these are transactions that were already in the queue or were just processed. -- FI txs are lost, this should be tackled. func (bq *BatchQueue) Prepend(ctx context.Context, batch coresequencer.Batch) error { bq.mu.Lock() defer bq.mu.Unlock() From a587a45d184141a631b47ad1c1c17201c2028c88 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 1 Dec 2025 21:01:17 +0100 Subject: [PATCH 09/15] chore: docs nits --- sequencers/single/sequencer.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index e97d7a157..e466fbcd8 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -134,8 +134,6 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB forcedEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) if err != nil { - // Continue without forced txs. Add logging for clarity. - if errors.Is(err, coreda.ErrHeightFromFuture) { c.logger.Debug(). Uint64("da_height", currentDAHeight). @@ -362,11 +360,13 @@ func (c *Sequencer) processForcedInclusionTxs(event *block.ForcedInclusionEvent, Msg("stored pending forced inclusion transactions for next epoch") } - c.logger.Info(). - Int("processed_tx_count", len(validatedTxs)). - Int("pending_tx_count", len(newPendingTxs)). - Int("current_size", currentSize). - Msg("completed processing forced inclusion transactions") + if len(validatedTxs) > 0 { + c.logger.Info(). + Int("processed_tx_count", len(validatedTxs)). + Int("pending_tx_count", len(newPendingTxs)). + Int("current_size", currentSize). + Msg("completed processing forced inclusion transactions") + } return validatedTxs } From 3abacf4749655429ed36325470a9e0c4bcf1b181 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 2 Dec 2025 11:32:47 +0100 Subject: [PATCH 10/15] merge issue --- block/public.go | 13 +++++++------ sequencers/based/sequencer.go | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/block/public.go b/block/public.go index e2e857ef5..2326107be 100644 --- a/block/public.go +++ b/block/public.go @@ -42,12 +42,13 @@ func NewDAClient( logger zerolog.Logger, ) DAClient { return da.NewClient(da.Config{ - DA: daLayer, - Logger: logger, - Namespace: config.DA.GetNamespace(), - DefaultTimeout: config.DA.RequestTimeout.Duration, - DataNamespace: config.DA.GetDataNamespace(), - RetrieveBatchSize: config.DA.RetrieveBatchSize, + DA: daLayer, + Logger: logger, + Namespace: config.DA.GetNamespace(), + DefaultTimeout: config.DA.RequestTimeout.Duration, + DataNamespace: config.DA.GetDataNamespace(), + ForcedInclusionNamespace: config.DA.GetForcedInclusionNamespace(), + RetrieveBatchSize: config.DA.RetrieveBatchSize, }) } diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index f9ccd31b0..afd2f1ab2 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -75,7 +75,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get if err != nil { // Check if forced inclusion is not configured if errors.Is(err, block.ErrForceInclusionNotConfigured) { - return nil, errors.New("forced inclusion not configured, returning empty batch") + return nil, errors.New("forced inclusion not configured") } else if errors.Is(err, coreda.ErrHeightFromFuture) { // If we get a height from future error, keep the current DA height and return batch // We'll retry the same height on the next call until DA produces that block From ca1cb0019b262851a6c55764aabb509752380cd7 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 2 Dec 2025 12:06:32 +0100 Subject: [PATCH 11/15] test: remove unecessary tests --- sequencers/based/sequencer_test.go | 23 ------------------- sequencers/single/sequencer_test.go | 35 ----------------------------- 2 files changed, 58 deletions(-) diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index ec57bd46f..3d624dce4 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -79,29 +79,6 @@ func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ( return args.Get(0).([][]byte), args.Error(1) } -func TestNewBasedSequencer(t *testing.T) { - mockDA := new(MockDA) - gen := genesis.Genesis{ - ChainID: "test-chain", - DAStartHeight: 100, - DAEpochForcedInclusion: 10, - } - - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) - - require.NotNil(t, seq) - assert.Equal(t, uint64(100), seq.daHeight.Load()) - assert.Equal(t, 0, len(seq.txQueue)) -} - func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { mockDA := new(MockDA) gen := genesis.Genesis{ diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 90da6fb3a..7dab6b52d 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -33,41 +33,6 @@ func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Con return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) } -func TestNewSequencer(t *testing.T) { - // Create a new sequencer with mock DA client - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) - metrics, _ := NopMetrics() - db := ds.NewMapDatastore() - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - logger := zerolog.Nop() - mockRetriever := new(MockForcedInclusionRetriever) - mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). - Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) - if err != nil { - t.Fatalf("Failed to create sequencer: %v", err) - } - defer func() { - err := db.Close() - if err != nil { - t.Fatalf("Failed to close sequencer: %v", err) - } - }() - - // Check if the sequencer was created with the correct values - if seq == nil { - t.Fatal("Expected sequencer to not be nil") - } - - if seq.queue == nil { - t.Fatal("Expected batch queue to not be nil") - } - if seq.da == nil { - t.Fatal("Expected DA client to not be nil") - } -} - func TestSequencer_SubmitBatchTxs(t *testing.T) { // Initialize a new sequencer metrics, _ := NopMetrics() From 32c089e2a765dfbd03c528567660c883e22da7b9 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 2 Dec 2025 16:38:07 +0100 Subject: [PATCH 12/15] add todo for follow-up --- sequencers/based/sequencer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index afd2f1ab2..5b55f9062 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -124,7 +124,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get return &coresequencer.GetNextBatchResponse{ Batch: batch, - Timestamp: time.Now(), + Timestamp: time.Time{}, // TODO(@julienrbrt): we need to use DA block timestamp for determinism BatchData: req.LastBatchData, }, nil } From eba337639d7654ce787545909d1cf491c4aa5b8f Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 3 Dec 2025 12:07:16 +0100 Subject: [PATCH 13/15] fix: fix da height bumping and improve retriever --- .../internal/da/forced_inclusion_retriever.go | 71 +++++++++---------- sequencers/based/sequencer.go | 16 ++--- sequencers/common/size_validation.go | 2 +- sequencers/single/sequencer.go | 24 ++++--- 4 files changed, 54 insertions(+), 59 deletions(-) diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go index dded3004c..18bb18066 100644 --- a/block/internal/da/forced_inclusion_retriever.go +++ b/block/internal/da/forced_inclusion_retriever.go @@ -53,11 +53,11 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context epochStart, epochEnd, currentEpochNumber := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) - if daHeight != epochStart { + if daHeight != epochEnd { r.logger.Debug(). Uint64("da_height", daHeight). - Uint64("epoch_start", epochStart). - Msg("not at epoch start - returning empty transactions") + Uint64("epoch_end", epochEnd). + Msg("not at epoch end - returning empty transactions") return &ForcedInclusionEvent{ StartDaHeight: daHeight, @@ -68,16 +68,10 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context event := &ForcedInclusionEvent{ StartDaHeight: epochStart, + EndDaHeight: epochEnd, Txs: [][]byte{}, } - r.logger.Debug(). - Uint64("da_height", daHeight). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", epochEnd). - Uint64("epoch_num", currentEpochNumber). - Msg("retrieving forced included transactions from DA") - epochEndResult := r.client.RetrieveForcedInclusion(ctx, epochEnd) if epochEndResult.Code == coreda.StatusHeightFromFuture { r.logger.Debug(). @@ -97,44 +91,47 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context } } - lastProcessedHeight := epochStart + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", currentEpochNumber). + Msg("retrieving forced included transactions from DA") - if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochStartResult, epochStart); err != nil { - return nil, err - } + var processErrs error + err := r.processForcedInclusionBlobs(event, epochStartResult, epochStart) + processErrs = errors.Join(processErrs, err) // Process heights between start and end (exclusive) for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { result := r.client.RetrieveForcedInclusion(ctx, epochHeight) - // If any intermediate height is from future, break early - if result.Code == coreda.StatusHeightFromFuture { - r.logger.Debug(). - Uint64("epoch_height", epochHeight). - Uint64("last_processed", lastProcessedHeight). - Msg("reached future DA height within epoch - stopping") - break - } - - if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, result, epochHeight); err != nil { - return nil, err - } + err = r.processForcedInclusionBlobs(event, result, epochHeight) + processErrs = errors.Join(processErrs, err) } // Process epoch end (only if different from start) if epochEnd != epochStart { - if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { - return nil, err - } + err = r.processForcedInclusionBlobs(event, epochEndResult, epochEnd) + processErrs = errors.Join(processErrs, err) } - event.EndDaHeight = lastProcessedHeight + // any error during process, need to retry at next call + if processErrs != nil { + r.logger.Warn(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", currentEpochNumber). + Err(processErrs). + Msg("Failed to retrieve DA epoch.. retrying next iteration") - r.logger.Info(). - Uint64("epoch_start", epochStart). - Uint64("epoch_end", lastProcessedHeight). - Int("tx_count", len(event.Txs)). - Msg("retrieved forced inclusion transactions") + return &ForcedInclusionEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil + } return event, nil } @@ -142,13 +139,11 @@ func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context // processForcedInclusionBlobs processes blobs from a single DA height for forced inclusion. func (r *ForcedInclusionRetriever) processForcedInclusionBlobs( event *ForcedInclusionEvent, - lastProcessedHeight *uint64, result coreda.ResultRetrieve, height uint64, ) error { if result.Code == coreda.StatusNotFound { r.logger.Debug().Uint64("height", height).Msg("no forced inclusion blobs at height") - *lastProcessedHeight = height return nil } @@ -163,8 +158,6 @@ func (r *ForcedInclusionRetriever) processForcedInclusionBlobs( } } - *lastProcessedHeight = height - r.logger.Debug(). Uint64("height", height). Int("blob_count", len(result.Data)). diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 5b55f9062..e209365db 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -67,7 +67,7 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.S // GetNextBatch retrieves the next batch of transactions from the DA layer // It fetches forced inclusion transactions and returns them as the next batch func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { - currentDAHeight := s.daHeight.Load() + currentDAHeight := s.GetDAHeight() s.logger.Debug().Uint64("da_height", currentDAHeight).Msg("fetching forced inclusion transactions from DA") @@ -86,13 +86,13 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") return nil, err } - } - - // Update DA height based on the retrieved event - if forcedTxsEvent.EndDaHeight > currentDAHeight { - s.SetDAHeight(forcedTxsEvent.EndDaHeight) - } else if forcedTxsEvent.StartDaHeight > currentDAHeight { - s.SetDAHeight(forcedTxsEvent.StartDaHeight) + } else { + // Update DA height. + // If we are in between epochs, we still need to bump the da height. + // At the end of an epoch, we need to bump to go to the next epoch. + if forcedTxsEvent.EndDaHeight >= currentDAHeight { + s.SetDAHeight(forcedTxsEvent.EndDaHeight + 1) + } } // Add forced inclusion transactions to the queue with validation diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go index 7484d3a54..ee781ce20 100644 --- a/sequencers/common/size_validation.go +++ b/sequencers/common/size_validation.go @@ -12,7 +12,7 @@ const ( // This checks against the DA layer limit, not the per-batch limit. // Returns true if the blob is within the absolute size limit, false otherwise. func ValidateBlobSize(blob []byte) bool { - return uint64(len(blob)) <= AbsoluteMaxBlobSize + return uint64(GetBlobSize(blob)) <= AbsoluteMaxBlobSize } // WouldExceedCumulativeSize checks if adding a blob would exceed the cumulative size limit for a batch. diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index e466fbcd8..b22f3b4d3 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -130,9 +130,9 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB return nil, ErrInvalidId } - currentDAHeight := c.daHeight.Load() + currentDAHeight := c.GetDAHeight() - forcedEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + forcedTxsEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) if err != nil { if errors.Is(err, coreda.ErrHeightFromFuture) { c.logger.Debug(). @@ -143,25 +143,27 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB } // Still create an empty forced inclusion event - forcedEvent = &block.ForcedInclusionEvent{ + forcedTxsEvent = &block.ForcedInclusionEvent{ Txs: [][]byte{}, StartDaHeight: currentDAHeight, EndDaHeight: currentDAHeight, } + } else { + // Update DA height. + // If we are in between epochs, we still need to bump the da height. + // At the end of an epoch, we need to bump to go to the next epoch. + if forcedTxsEvent.EndDaHeight >= currentDAHeight { + c.SetDAHeight(forcedTxsEvent.EndDaHeight + 1) + } } // Always try to process forced inclusion transactions (including pending from previous epochs) - forcedTxs := c.processForcedInclusionTxs(forcedEvent, req.MaxBytes) - if forcedEvent.EndDaHeight > currentDAHeight { - c.SetDAHeight(forcedEvent.EndDaHeight) - } else if forcedEvent.StartDaHeight > currentDAHeight { - c.SetDAHeight(forcedEvent.StartDaHeight) - } + forcedTxs := c.processForcedInclusionTxs(forcedTxsEvent, req.MaxBytes) c.logger.Debug(). Int("tx_count", len(forcedTxs)). - Uint64("da_height_start", forcedEvent.StartDaHeight). - Uint64("da_height_end", forcedEvent.EndDaHeight). + Uint64("da_height_start", forcedTxsEvent.StartDaHeight). + Uint64("da_height_end", forcedTxsEvent.EndDaHeight). Msg("retrieved forced inclusion transactions from DA") // Calculate size used by forced inclusion transactions From 7a6112e25c99fef3e35d726a1cde49adaaa2637e Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 3 Dec 2025 14:05:18 +0100 Subject: [PATCH 14/15] fix tests --- .../da/forced_inclusion_retriever_test.go | 42 +++++++++---------- .../syncing/syncer_forced_inclusion_test.go | 16 +++---- sequencers/based/sequencer_test.go | 27 +++++++----- sequencers/single/sequencer_test.go | 16 +++---- 4 files changed, 51 insertions(+), 50 deletions(-) diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go index e58612573..e51bbe22f 100644 --- a/block/internal/da/forced_inclusion_retriever_test.go +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -148,7 +148,8 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailab retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) ctx := context.Background() - _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + // Epoch boundaries: [100, 109] - retrieval happens at epoch end (109) + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 109) assert.Assert(t, err != nil) assert.ErrorContains(t, err, "not yet available") } @@ -237,7 +238,8 @@ func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t * retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) ctx := context.Background() - event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + // Epoch boundaries: [100, 102] - retrieval happens at epoch end (102) + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 102) assert.NilError(t, err) assert.Assert(t, event != nil) assert.Equal(t, event.StartDaHeight, uint64(100)) @@ -265,12 +267,11 @@ func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) tests := []struct { - name string - result coreda.ResultRetrieve - height uint64 - expectedTxCount int - expectedLastHeight uint64 - expectError bool + name string + result coreda.ResultRetrieve + height uint64 + expectedTxCount int + expectError bool }{ { name: "success with blobs", @@ -280,10 +281,9 @@ func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { }, Data: [][]byte{[]byte("tx1"), []byte("tx2")}, }, - height: 100, - expectedTxCount: 2, - expectedLastHeight: 100, - expectError: false, + height: 100, + expectedTxCount: 2, + expectError: false, }, { name: "not found", @@ -292,10 +292,9 @@ func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { Code: coreda.StatusNotFound, }, }, - height: 100, - expectedTxCount: 0, - expectedLastHeight: 100, - expectError: false, + height: 100, + expectedTxCount: 0, + expectError: false, }, { name: "error status", @@ -316,10 +315,9 @@ func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { }, Data: [][]byte{[]byte("tx1"), {}, []byte("tx2")}, }, - height: 100, - expectedTxCount: 2, - expectedLastHeight: 100, - expectError: false, + height: 100, + expectedTxCount: 2, + expectError: false, }, } @@ -328,16 +326,14 @@ func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { event := &ForcedInclusionEvent{ Txs: [][]byte{}, } - lastHeight := uint64(0) - err := retriever.processForcedInclusionBlobs(event, &lastHeight, tt.result, tt.height) + err := retriever.processForcedInclusionBlobs(event, tt.result, tt.height) if tt.expectError { assert.Assert(t, err != nil) } else { assert.NilError(t, err) assert.Equal(t, len(event.Txs), tt.expectedTxCount) - assert.Equal(t, lastHeight, tt.expectedLastHeight) } }) } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index d07cb86b3..741432eb2 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -517,7 +517,7 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) - // Mock DA retrieval for first block at DA height 100 + // Mock DA retrieval for first block at DA height 104 (epoch end) // Epoch boundaries: [100, 104] (epoch size is 5) // The retriever will fetch all heights in the epoch: 100, 101, 102, 103, 104 @@ -548,7 +548,7 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { data1.Txs[1] = types.Tx([]byte("regular_tx_1")) currentState := s.GetLastState() - currentState.DAHeight = 100 + currentState.DAHeight = 104 // Verify - should pass since dataBin2 can be deferred within epoch err = s.verifyForcedInclusionTxs(currentState, data1) @@ -562,7 +562,7 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { }) require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") - // Mock DA for second verification at same epoch (height 100) + // Mock DA for second verification at same epoch (height 104 - epoch end) mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() @@ -691,7 +691,7 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { data1.Txs[0] = types.Tx([]byte("regular_tx_1")) currentState := s.GetLastState() - currentState.DAHeight = 100 + currentState.DAHeight = 102 // Verify - should pass, tx can be deferred within epoch err = s.verifyForcedInclusionTxs(currentState, data1) @@ -706,7 +706,7 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") // Process another block within same epoch - forced tx still not included - // Mock DA for second verification at same epoch (height 100) + // Mock DA for second verification at same epoch (height 102 - epoch end) mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() @@ -730,7 +730,7 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { err = s.verifyForcedInclusionTxs(currentState, data2) require.NoError(t, err) - // Mock DA retrieval for next epoch (DA height 103) + // Mock DA retrieval for next epoch (DA height 105 - epoch end) // Epoch boundaries: [103, 105] // The retriever will fetch heights 103, 104, 105 @@ -749,11 +749,11 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - // Third block is in the next epoch (past 102) without including the forced tx + // Third block is in the next epoch (at epoch end 105) without including the forced tx data3 := makeData(gen.ChainID, 3, 1) data3.Txs[0] = types.Tx([]byte("regular_tx_3")) - currentState.DAHeight = 103 // Past epoch end [100, 102] + currentState.DAHeight = 105 // At epoch end [103, 105], past previous epoch [100, 102] // Verify - should FAIL since forced tx from previous epoch was never included err = s.verifyForcedInclusionTxs(currentState, data3) diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index 3d624dce4..0f994f91f 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -151,8 +151,8 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) - // DA height should be updated - assert.Equal(t, uint64(100), seq.GetDAHeight()) + // DA height should be updated to epochEnd + 1 + assert.Equal(t, uint64(101), seq.GetDAHeight()) mockDA.AssertExpectations(t) } @@ -225,15 +225,16 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { } mockDA := new(MockDA) - // First call returns forced txs + // First call returns forced txs at height 100 mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, Timestamp: time.Now(), }, nil).Once() mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil).Once() - // Subsequent calls should return no new forced txs - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + // Subsequent calls at height 101 and 102 (after DA height bumps) should return no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + mockDA.On("GetIDs", mock.Anything, uint64(102), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() gen := genesis.Genesis{ ChainID: "test-chain", @@ -339,8 +340,8 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin }, nil).Once() mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() - // Second call: no new forced txs - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + // Second call: no new forced txs at height 101 (after first call bumped DA height to epochEnd + 1) + mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() gen := genesis.Genesis{ ChainID: "test-chain", @@ -406,8 +407,8 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T }, nil).Once() mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() - // Second call - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + // Second call at height 101 (after first call bumped DA height to epochEnd + 1) + mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() gen := genesis.Genesis{ ChainID: "test-chain", @@ -537,8 +538,12 @@ func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { LastBatchData: nil, } - _, err := seq.GetNextBatch(context.Background(), req) - require.Error(t, err) + // With new error handling, errors during blob processing return empty batch instead of error + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions), "Should return empty batch on DA error") mockDA.AssertExpectations(t) } diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 7dab6b52d..b8aac29da 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -554,11 +554,11 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { EndDaHeight: 100, }, nil).Once() - // Second call should process pending tx - mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + // Second call should process pending tx at DA height 101 (after first call bumped it to epochEnd + 1) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)).Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, - StartDaHeight: 100, - EndDaHeight: 100, + StartDaHeight: 101, + EndDaHeight: 101, }, nil).Once() gen := genesis.Genesis{ @@ -627,11 +627,11 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) EndDaHeight: 100, }, nil).Once() - // Second call returns no new forced txs, but pending should still be processed - mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + // Second call returns no new forced txs at height 101 (after first call bumped DA height to epochEnd + 1), but pending should still be processed + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)).Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, - StartDaHeight: 100, - EndDaHeight: 100, + StartDaHeight: 101, + EndDaHeight: 101, }, nil).Once() gen := genesis.Genesis{ From 9b478fabd9d65f1b0c3fcf9d5d9346250bd16d36 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 3 Dec 2025 15:48:42 +0100 Subject: [PATCH 15/15] merge conflict fix --- apps/testapp/cmd/run.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index a620b50c9..b2035b561 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -141,11 +141,6 @@ func createSequencer( return basedSeq, nil } - singleMetrics, err := single.NopMetrics() - if err != nil { - return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) - } - sequencer, err := single.NewSequencer( ctx, logger, @@ -153,7 +148,6 @@ func createSequencer( da, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, - singleMetrics, nodeConfig.Node.Aggregator, 1000, fiRetriever,