diff --git a/CHANGELOG.md b/CHANGELOG.md index 38a540e07..0fdb4a935 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Implement forced inclusion and based sequencing ([#2797](https://github.com/evstack/ev-node/pull/2797)) + This changes requires to add a `da_epoch_forced_inclusion` field in `genesis.json` file. + To enable this feature, set the force inclusion namespace in the `evnode.yaml`. + ### Changed - Rename `evm-single` to `evm` and `grpc-single` to `evgrpc` for clarity. [#2839](https://github.com/evstack/ev-node/pull/2839) diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index 8d7926404..5ce22e8e3 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -12,6 +12,7 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" @@ -25,6 +26,8 @@ import ( "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -55,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -101,6 +104,8 @@ func init() { } // createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. func createSequencer( ctx context.Context, logger zerolog.Logger, @@ -109,6 +114,25 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + singleMetrics, err := single.NopMetrics() if err != nil { return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) @@ -123,11 +147,18 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/evm/go.mod b/apps/evm/go.mod index 126891fed..3be04a51b 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -12,10 +12,10 @@ replace ( require ( github.com/celestiaorg/go-header v0.7.3 - github.com/ethereum/go-ethereum v1.16.5 + github.com/ethereum/go-ethereum v1.16.7 github.com/evstack/ev-node v1.0.0-beta.10 github.com/evstack/ev-node/core v1.0.0-beta.5 - github.com/evstack/ev-node/da v1.0.0-beta.6 + github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 github.com/ipfs/go-datastore v0.9.0 github.com/rs/zerolog v1.34.0 @@ -26,6 +26,7 @@ require ( connectrpc.com/connect v1.19.1 // indirect connectrpc.com/grpcreflect v1.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -33,7 +34,7 @@ require ( github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/consensys/gnark-crypto v0.18.1 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -44,7 +45,7 @@ require ( github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/ethereum/c-kzg-4844/v2 v2.1.3 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 173fb3ba0..e2f2939d4 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -16,6 +16,8 @@ github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= @@ -55,8 +57,8 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= -github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -95,12 +97,12 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= -github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= -github.com/ethereum/go-ethereum v1.16.5 h1:GZI995PZkzP7ySCxEFaOPzS8+bd8NldE//1qvQDQpe0= -github.com/ethereum/go-ethereum v1.16.5/go.mod h1:kId9vOtlYg3PZk9VwKbGlQmSACB5ESPTBGT+M9zjmok= +github.com/ethereum/go-ethereum v1.16.7 h1:qeM4TvbrWK0UC0tgkZ7NiRsmBGwsjqc64BHo20U59UQ= +github.com/ethereum/go-ethereum v1.16.7/go.mod h1:Fs6QebQbavneQTYcA39PEKv2+zIjX7rPUZ14DER46wk= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 h1:xo0mZz3CJtntP1RPLFDBubBKpNkqStImt9H9N0xysj8= diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 484f51d7a..4439aee2e 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -9,6 +9,7 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" @@ -22,6 +23,8 @@ import ( "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -57,7 +60,7 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -118,6 +121,25 @@ func createSequencer( nodeConfig config.Config, genesis genesis.Genesis, ) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + singleMetrics, err := single.NopMetrics() if err != nil { return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) @@ -132,11 +154,18 @@ func createSequencer( nodeConfig.Node.BlockTime.Duration, singleMetrics, nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, ) if err != nil { return nil, fmt.Errorf("failed to create single sequencer: %w", err) } + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + return sequencer, nil } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c72d220cd..dd3440b86 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -5,17 +5,24 @@ import ( "fmt" "path/filepath" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/da/jsonrpc" "github.com/evstack/ev-node/node" - rollcmd "github.com/evstack/ev-node/pkg/cmd" - genesispkg "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -23,16 +30,16 @@ var RunCmd = &cobra.Command{ Use: "start", Aliases: []string{"node", "run"}, Short: "Run the testapp node", - RunE: func(cmd *cobra.Command, args []string) error { - nodeConfig, err := rollcmd.ParseConfig(cmd) + RunE: func(command *cobra.Command, args []string) error { + nodeConfig, err := cmd.ParseConfig(command) if err != nil { return err } - logger := rollcmd.SetupLogger(nodeConfig.Log) + logger := cmd.SetupLogger(nodeConfig.Log) // Get KV endpoint flag - kvEndpoint, _ := cmd.Flags().GetString(flagKVEndpoint) + kvEndpoint, _ := command.Flags().GetString(flagKVEndpoint) if kvEndpoint == "" { logger.Info().Msg("KV endpoint flag not set, using default from http_server") } @@ -51,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -66,11 +73,6 @@ var RunCmd = &cobra.Command{ return err } - singleMetrics, err := single.NopMetrics() - if err != nil { - return err - } - // Start the KV executor HTTP server if kvEndpoint != "" { // Only start if endpoint is provided httpServer := kvexecutor.NewHTTPServer(executor, kvEndpoint) @@ -83,7 +85,7 @@ var RunCmd = &cobra.Command{ } genesisPath := filepath.Join(filepath.Dir(nodeConfig.ConfigPath()), "genesis.json") - genesis, err := genesispkg.LoadGenesis(genesisPath) + genesis, err := genesis.LoadGenesis(genesisPath) if err != nil { return fmt.Errorf("failed to load genesis: %w", err) } @@ -92,16 +94,8 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - sequencer, err := single.NewSequencer( - ctx, - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(ctx, logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -111,6 +105,65 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return cmd.StartNode(logger, command, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } + +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + + return sequencer, nil +} diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 0e6ed2f89..ff077783c 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -16,6 +16,7 @@ require ( github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.11.1 ) @@ -79,7 +80,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -129,7 +130,6 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index eeafba1a8..c2e0e46a7 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -229,8 +229,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/block/components.go b/block/components.go index 546cda62c..bd29f9244 100644 --- a/block/components.go +++ b/block/components.go @@ -245,6 +245,15 @@ func NewAggregatorComponents( return nil, fmt.Errorf("failed to create reaper: %w", err) } + if config.Node.BasedSequencer { // no submissions needed for bases sequencer + return &Components{ + Executor: executor, + Reaper: reaper, + Cache: cacheManager, + errorCh: errorCh, + }, nil + } + // Create DA client and submitter for aggregator nodes (with signer for submission) daClient := NewDAClient(da, config, logger) daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) diff --git a/block/components_test.go b/block/components_test.go index eadf45328..c288f5322 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -203,6 +203,9 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) { mockExec.On("InitChain", mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]byte("state-root"), uint64(1024), nil).Once() + // Mock SetDAHeight to be called during initialization + mockSeq.On("SetDAHeight", uint64(0)).Return().Once() + // Mock GetNextBatch to return empty batch mockSeq.On("GetNextBatch", mock.Anything, mock.Anything). Return(&coresequencer.GetNextBatchResponse{ diff --git a/block/internal/da/client.go b/block/internal/da/client.go index 571e5f765..01c5bf981 100644 --- a/block/internal/da/client.go +++ b/block/internal/da/client.go @@ -20,29 +20,35 @@ type Client interface { Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve + RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve GetHeaderNamespace() []byte GetDataNamespace() []byte + GetForcedInclusionNamespace() []byte + HasForcedInclusionNamespace() bool GetDA() coreda.DA } // client provides a reusable wrapper around the core DA interface // with common configuration for namespace handling and timeouts. type client struct { - da coreda.DA - logger zerolog.Logger - defaultTimeout time.Duration - namespaceBz []byte - namespaceDataBz []byte + da coreda.DA + logger zerolog.Logger + defaultTimeout time.Duration + namespaceBz []byte + namespaceDataBz []byte + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool } // Config contains configuration for the DA client. type Config struct { - DA coreda.DA - Logger zerolog.Logger - DefaultTimeout time.Duration - Namespace string - DataNamespace string + DA coreda.DA + Logger zerolog.Logger + DefaultTimeout time.Duration + Namespace string + DataNamespace string + ForcedInclusionNamespace string } // NewClient creates a new DA client with pre-calculated namespace bytes. @@ -51,12 +57,20 @@ func NewClient(cfg Config) *client { cfg.DefaultTimeout = 30 * time.Second } + hasForcedInclusionNs := cfg.ForcedInclusionNamespace != "" + var namespaceForcedInclusionBz []byte + if hasForcedInclusionNs { + namespaceForcedInclusionBz = coreda.NamespaceFromString(cfg.ForcedInclusionNamespace).Bytes() + } + return &client{ - da: cfg.DA, - logger: cfg.Logger.With().Str("component", "da_client").Logger(), - defaultTimeout: cfg.DefaultTimeout, - namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), + da: cfg.DA, + logger: cfg.Logger.With().Str("component", "da_client").Logger(), + defaultTimeout: cfg.DefaultTimeout, + namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), + namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), + namespaceForcedInclusionBz: namespaceForcedInclusionBz, + hasForcedInclusionNs: hasForcedInclusionNs, } } @@ -248,6 +262,19 @@ func (c *client) RetrieveData(ctx context.Context, height uint64) coreda.ResultR return c.Retrieve(ctx, height, c.namespaceDataBz) } +// RetrieveForcedInclusion retrieves blobs from the forced inclusion namespace at the specified height. +func (c *client) RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve { + if !c.hasForcedInclusionNs { + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "forced inclusion namespace not configured", + }, + } + } + return c.Retrieve(ctx, height, c.namespaceForcedInclusionBz) +} + // GetHeaderNamespace returns the header namespace bytes. func (c *client) GetHeaderNamespace() []byte { return c.namespaceBz @@ -258,6 +285,16 @@ func (c *client) GetDataNamespace() []byte { return c.namespaceDataBz } +// GetForcedInclusionNamespace returns the forced inclusion namespace bytes. +func (c *client) GetForcedInclusionNamespace() []byte { + return c.namespaceForcedInclusionBz +} + +// HasForcedInclusionNamespace returns whether forced inclusion namespace is configured. +func (c *client) HasForcedInclusionNamespace() bool { + return c.hasForcedInclusionNs +} + // GetDA returns the underlying DA interface for advanced usage. func (c *client) GetDA() coreda.DA { return c.da diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go index 788aab2b3..7bc7e972a 100644 --- a/block/internal/da/client_test.go +++ b/block/internal/da/client_test.go @@ -68,11 +68,12 @@ func TestNewClient(t *testing.T) { { name: "with all namespaces", cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - DefaultTimeout: 5 * time.Second, - Namespace: "test-ns", - DataNamespace: "test-data-ns", + DA: &mockDA{}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", }, }, { @@ -104,6 +105,13 @@ func TestNewClient(t *testing.T) { assert.Assert(t, len(client.namespaceBz) > 0) assert.Assert(t, len(client.namespaceDataBz) > 0) + if tt.cfg.ForcedInclusionNamespace != "" { + assert.Assert(t, client.hasForcedInclusionNs) + assert.Assert(t, len(client.namespaceForcedInclusionBz) > 0) + } else { + assert.Assert(t, !client.hasForcedInclusionNs) + } + expectedTimeout := tt.cfg.DefaultTimeout if expectedTimeout == 0 { expectedTimeout = 30 * time.Second @@ -113,12 +121,50 @@ func TestNewClient(t *testing.T) { } } +func TestClient_HasForcedInclusionNamespace(t *testing.T) { + tests := []struct { + name string + cfg Config + expected bool + }{ + { + name: "with forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }, + expected: true, + }, + { + name: "without forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + assert.Equal(t, client.HasForcedInclusionNamespace(), tt.expected) + }) + } +} + func TestClient_GetNamespaces(t *testing.T) { cfg := Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - Namespace: "test-header", - DataNamespace: "test-data", + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-header", + DataNamespace: "test-data", + ForcedInclusionNamespace: "test-fi", } client := NewClient(cfg) @@ -129,8 +175,29 @@ func TestClient_GetNamespaces(t *testing.T) { dataNs := client.GetDataNamespace() assert.Assert(t, len(dataNs) > 0) + fiNs := client.GetForcedInclusionNamespace() + assert.Assert(t, len(fiNs) > 0) + // Namespaces should be different assert.Assert(t, string(headerNs) != string(dataNs)) + assert.Assert(t, string(headerNs) != string(fiNs)) + assert.Assert(t, string(dataNs) != string(fiNs)) +} + +func TestClient_RetrieveForcedInclusion_NotConfigured(t *testing.T) { + cfg := Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + } + + client := NewClient(cfg) + ctx := context.Background() + + result := client.RetrieveForcedInclusion(ctx, 100) + assert.Equal(t, result.Code, coreda.StatusError) + assert.Assert(t, result.Message != "") } func TestClient_GetDA(t *testing.T) { diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go new file mode 100644 index 000000000..5f5047338 --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever.go @@ -0,0 +1,177 @@ +package da + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/types" +) + +// ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. +var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + +// ForcedInclusionRetriever handles retrieval of forced inclusion transactions from DA. +type ForcedInclusionRetriever struct { + client Client + genesis genesis.Genesis + logger zerolog.Logger + daEpochSize uint64 +} + +// ForcedInclusionEvent contains forced inclusion transactions retrieved from DA. +type ForcedInclusionEvent struct { + StartDaHeight uint64 + EndDaHeight uint64 + Txs [][]byte +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever. +func NewForcedInclusionRetriever( + client Client, + genesis genesis.Genesis, + logger zerolog.Logger, +) *ForcedInclusionRetriever { + return &ForcedInclusionRetriever{ + client: client, + genesis: genesis, + logger: logger.With().Str("component", "forced_inclusion_retriever").Logger(), + daEpochSize: genesis.DAEpochForcedInclusion, + } +} + +// RetrieveForcedIncludedTxs retrieves forced inclusion transactions at the given DA height. +// It respects epoch boundaries and only fetches at epoch start. +func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + if !r.client.HasForcedInclusionNamespace() { + return nil, ErrForceInclusionNotConfigured + } + + epochStart, epochEnd := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + if daHeight != epochStart { + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("not at epoch start - returning empty transactions") + + return &ForcedInclusionEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil + } + + // We're at epoch start - fetch transactions from DA + currentEpochNumber := types.CalculateEpochNumber(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + event := &ForcedInclusionEvent{ + StartDaHeight: epochStart, + Txs: [][]byte{}, + } + + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", currentEpochNumber). + Msg("retrieving forced included transactions from DA") + + epochStartResult := r.client.RetrieveForcedInclusion(ctx, epochStart) + if epochStartResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Msg("epoch start height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) + } + + epochEndResult := epochStartResult + if epochStart != epochEnd { + epochEndResult = r.client.RetrieveForcedInclusion(ctx, epochEnd) + if epochEndResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_end", epochEnd). + Msg("epoch end height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) + } + } + + lastProcessedHeight := epochStart + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochStartResult, epochStart); err != nil { + return nil, err + } + + // Process heights between start and end (exclusive) + for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { + result := r.client.RetrieveForcedInclusion(ctx, epochHeight) + + // If any intermediate height is from future, break early + if result.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_height", epochHeight). + Uint64("last_processed", lastProcessedHeight). + Msg("reached future DA height within epoch - stopping") + break + } + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, result, epochHeight); err != nil { + return nil, err + } + } + + // Process epoch end (only if different from start) + if epochEnd != epochStart { + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { + return nil, err + } + } + + event.EndDaHeight = lastProcessedHeight + + r.logger.Info(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", lastProcessedHeight). + Int("tx_count", len(event.Txs)). + Msg("retrieved forced inclusion transactions") + + return event, nil +} + +// processForcedInclusionBlobs processes blobs from a single DA height for forced inclusion. +func (r *ForcedInclusionRetriever) processForcedInclusionBlobs( + event *ForcedInclusionEvent, + lastProcessedHeight *uint64, + result coreda.ResultRetrieve, + height uint64, +) error { + if result.Code == coreda.StatusNotFound { + r.logger.Debug().Uint64("height", height).Msg("no forced inclusion blobs at height") + *lastProcessedHeight = height + return nil + } + + if result.Code != coreda.StatusSuccess { + return fmt.Errorf("failed to retrieve forced inclusion blobs at height %d: %s", height, result.Message) + } + + // Process each blob as a transaction + for _, blob := range result.Data { + if len(blob) > 0 { + event.Txs = append(event.Txs, blob) + } + } + + *lastProcessedHeight = height + + r.logger.Debug(). + Uint64("height", height). + Int("blob_count", len(result.Data)). + Msg("processed forced inclusion blobs") + + return nil +} diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go new file mode 100644 index 000000000..e58612573 --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -0,0 +1,344 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "gotest.tools/v3/assert" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" +) + +func TestNewForcedInclusionRetriever(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + assert.Assert(t, retriever != nil) + assert.Equal(t, retriever.daEpochSize, uint64(10)) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoNamespace(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + // No forced inclusion namespace + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not configured") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + // Height 105 is not an epoch start (100, 110, 120, etc. are epoch starts) + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 105) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(105)) + assert.Equal(t, event.EndDaHeight, uint64(105)) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t *testing.T) { + testBlobs := [][]byte{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return &coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + return testBlobs, nil + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + // Height 100 is an epoch start + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(100)) + assert.Equal(t, len(event.Txs), len(testBlobs)) + assert.DeepEqual(t, event.Txs[0], testBlobs[0]) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailable(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrHeightFromFuture + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not yet available") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrBlobNotFound + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t *testing.T) { + callCount := 0 + testBlobsByHeight := map[uint64][][]byte{ + 100: {[]byte("tx1"), []byte("tx2")}, + 101: {[]byte("tx3")}, + 102: {[]byte("tx4"), []byte("tx5"), []byte("tx6")}, + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + callCount++ + blobs, exists := testBlobsByHeight[height] + if !exists { + return nil, coreda.ErrBlobNotFound + } + ids := make([]coreda.ID, len(blobs)) + for i := range blobs { + ids[i] = []byte("id") + } + return &coreda.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + // Return blobs based on current call count + switch callCount { + case 1: + return testBlobsByHeight[100], nil + case 2: + return testBlobsByHeight[101], nil + case 3: + return testBlobsByHeight[102], nil + default: + return nil, errors.New("unexpected call") + } + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 3, // Epoch: 100-102 + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(102)) + + // Should have collected all txs from all heights + expectedTxCount := len(testBlobsByHeight[100]) + len(testBlobsByHeight[101]) + len(testBlobsByHeight[102]) + assert.Equal(t, len(event.Txs), expectedTxCount) +} + +func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + + tests := []struct { + name string + result coreda.ResultRetrieve + height uint64 + expectedTxCount int + expectedLastHeight uint64 + expectError bool + }{ + { + name: "success with blobs", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "not found", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + }, + }, + height: 100, + expectedTxCount: 0, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "error status", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "test error", + }, + }, + height: 100, + expectError: true, + }, + { + name: "empty blobs are skipped", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), {}, []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &ForcedInclusionEvent{ + Txs: [][]byte{}, + } + lastHeight := uint64(0) + + err := retriever.processForcedInclusionBlobs(event, &lastHeight, tt.result, tt.height) + + if tt.expectError { + assert.Assert(t, err != nil) + } else { + assert.NilError(t, err) + assert.Equal(t, len(event.Txs), tt.expectedTxCount) + assert.Equal(t, lastHeight, tt.expectedLastHeight) + } + }) + } +} diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index be969b1a7..5a6b777de 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -67,6 +68,8 @@ type Executor struct { // - State transitions and validation // - P2P broadcasting of produced blocks // - DA submission of headers and data +// +// When BasedSequencer is enabled, signer can be nil as blocks are not signed. func NewExecutor( store store.Store, exec coreexecutor.Executor, @@ -82,17 +85,20 @@ func NewExecutor( options common.BlockOptions, errorCh chan<- error, ) (*Executor, error) { - if signer == nil { - return nil, errors.New("signer cannot be nil") - } + // For based sequencer, signer is optional as blocks are not signed + if !config.Node.BasedSequencer { + if signer == nil { + return nil, errors.New("signer cannot be nil") + } - addr, err := signer.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address: %w", err) - } + addr, err := signer.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address: %w", err) + } - if !bytes.Equal(addr, genesis.ProposerAddress) { - return nil, common.ErrNotProposer + if !bytes.Equal(addr, genesis.ProposerAddress) { + return nil, common.ErrNotProposer + } } return &Executor{ @@ -204,6 +210,7 @@ func (e *Executor) initializeState() error { } e.setLastState(state) + e.sequencer.SetDAHeight(state.DAHeight) // Initialize store height using batch for atomicity batch, err := e.store.NewBatch(e.ctx) @@ -379,8 +386,12 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to apply block: %w", err) } + // set the DA height in the sequencer + newState.DAHeight = e.sequencer.GetDAHeight() + // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. + // For based sequencer, this will return an empty signature signature, err := e.signHeader(header.Header) if err != nil { return fmt.Errorf("failed to sign header: %w", err) @@ -440,8 +451,9 @@ func (e *Executor) produceBlock() error { // retrieveBatch gets the next batch of transactions from the sequencer func (e *Executor) retrieveBatch(ctx context.Context) (*BatchData, error) { req := coresequencer.GetNextBatchRequest{ - Id: []byte(e.genesis.ChainID), - MaxBytes: common.DefaultMaxBlobSize, + Id: []byte(e.genesis.ChainID), + MaxBytes: common.DefaultMaxBlobSize, + LastBatchData: [][]byte{}, // Can be populated if needed for sequencer context } res, err := e.sequencer.GetNextBatch(ctx, req) @@ -495,16 +507,28 @@ func (e *Executor) createBlock(ctx context.Context, height uint64, batchData *Ba lastSignature = *lastSignaturePtr } - // Get signer info - pubKey, err := e.signer.GetPublic() - if err != nil { - return nil, nil, fmt.Errorf("failed to get public key: %w", err) - } + // Get signer info and validator hash + var pubKey crypto.PubKey + var validatorHash types.Hash - // Get validator hash - validatorHash, err := e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) - if err != nil { - return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + if e.signer != nil { + var err error + pubKey, err = e.signer.GetPublic() + if err != nil { + return nil, nil, fmt.Errorf("failed to get public key: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } + } else { + // For based sequencer without signer, use nil pubkey and compute validator hash + var err error + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, nil) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } } // Create header @@ -586,6 +610,11 @@ func (e *Executor) applyBlock(ctx context.Context, header types.Header, data *ty // signHeader signs the block header func (e *Executor) signHeader(header types.Header) (types.Signature, error) { + // For based sequencer, return empty signature as there is no signer + if e.signer == nil { + return types.Signature{}, nil + } + bz, err := e.options.AggregatorNodeSignatureBytesProvider(&header) if err != nil { return nil, fmt.Errorf("failed to get signature payload: %w", err) diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index b72f0a856..a11cf6a1c 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -73,6 +73,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -91,6 +92,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // Direct call to produceBlock should work (this is what lazy timer does) err = exec.produceBlock() require.NoError(t, err) @@ -113,6 +116,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) @@ -183,6 +188,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -201,6 +207,8 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 9aa79d0c4..6029186e8 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -95,6 +95,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() // initialize state (creates genesis block in store and sets state) require.NoError(t, exec.initializeState()) @@ -113,6 +114,8 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // produce one block err = exec.produceBlock() require.NoError(t, err) @@ -180,6 +183,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return([]byte("i0"), uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -196,6 +200,8 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), []byte("i0")). Return([]byte("i1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + require.NoError(t, exec.produceBlock()) h1, err := memStore.Height(context.Background()) require.NoError(t, err) diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 3f0e8b500..14daccddc 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -73,6 +73,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) // Set up context for first executor @@ -92,6 +93,8 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -189,6 +192,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { require.NoError(t, err) // Initialize state for second executor (should load existing state) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) // Set up context for second executor @@ -206,7 +210,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), currentState2.AppHash). Return([]byte("new_root_2"), uint64(1024), nil).Once() - // Note: mockSeq2 should NOT receive any calls because pending block should be used + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + + // Note: mockSeq2 should NOT receive GetNextBatch calls because pending block should be used err = exec2.produceBlock() require.NoError(t, err) @@ -289,6 +295,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) exec1.ctx, exec1.cancel = context.WithCancel(context.Background()) @@ -307,6 +314,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -338,6 +347,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { ) require.NoError(t, err) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) exec2.ctx, exec2.cancel = context.WithCancel(context.Background()) defer exec2.cancel() @@ -360,6 +370,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec2.produceBlock() require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index ee69edea7..8d97da6b4 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -3,7 +3,9 @@ package syncing import ( "bytes" "context" + "crypto/sha256" "encoding/binary" + "encoding/hex" "errors" "fmt" "sync" @@ -58,6 +60,7 @@ type Syncer struct { // Handlers daRetriever DARetriever + fiRetriever *da.ForcedInclusionRetriever p2pHandler p2pHandler // Logging @@ -116,6 +119,7 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) + s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -453,6 +457,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { switch { case errors.Is(err, errInvalidBlock): // do not reschedule + case errors.Is(err, errMaliciousProposer): + s.sendCriticalError(fmt.Errorf("sequencer malicious. Restart the node with --node.aggregator --node.based_sequencer or keep the chain halted: %w", err)) case errors.Is(err, errInvalidState): s.sendCriticalError(fmt.Errorf("invalid state detected (block-height %d, state-height %d) "+ "- block references do not match local state. Manual intervention required: %w", event.Header.Height(), @@ -518,6 +524,16 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { return err } + // Verify forced inclusion transactions if configured + if err := s.verifyForcedInclusionTxs(currentState, data); err != nil { + s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") + if errors.Is(err, errMaliciousProposer) { + s.cache.RemoveHeaderDAIncluded(headerHash) + return err + } + } + + // Apply block newState, err := s.applyBlock(header.Header, data, currentState) if err != nil { return fmt.Errorf("failed to apply block: %w", err) @@ -641,6 +657,70 @@ func (s *Syncer) validateBlock(currState types.State, data *types.Data, header * return nil } +var errMaliciousProposer = errors.New("malicious proposer detected") + +// hashTx returns a hex-encoded SHA256 hash of the transaction. +func hashTx(tx []byte) string { + hash := sha256.Sum256(tx) + return hex.EncodeToString(hash[:]) +} + +// verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block +func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { + if s.fiRetriever == nil { + return nil + } + + // Retrieve forced inclusion transactions from DA + forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(s.ctx, currentState.DAHeight) + if err != nil { + if errors.Is(err, da.ErrForceInclusionNotConfigured) { + s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") + return nil + } + + return fmt.Errorf("failed to retrieve forced included txs from DA: %w", err) + } + + // If no forced inclusion transactions found, nothing to verify + if len(forcedIncludedTxsEvent.Txs) == 0 { + s.logger.Debug().Uint64("da_height", currentState.DAHeight).Msg("no forced inclusion transactions to verify") + return nil + } + + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[hashTx(tx)] = struct{}{} + } + + // Check if all forced inclusion transactions are present in the block + var missingTxs [][]byte + for _, forcedTx := range forcedIncludedTxsEvent.Txs { + if _, ok := blockTxMap[hashTx(forcedTx)]; !ok { + missingTxs = append(missingTxs, forcedTx) + } + } + + if len(missingTxs) > 0 { + s.logger.Error(). + Uint64("height", data.Height()). + Uint64("da_height", currentState.DAHeight). + Uint64("da_epoch_start", forcedIncludedTxsEvent.StartDaHeight). + Uint64("da_epoch_end", forcedIncludedTxsEvent.EndDaHeight). + Int("missing_count", len(missingTxs)). + Int("total_forced", len(forcedIncludedTxsEvent.Txs)). + Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions missing from block") + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs))) + } + + s.logger.Debug(). + Uint64("height", data.Height()). + Int("forced_txs", len(forcedIncludedTxsEvent.Txs)). + Msg("all forced inclusion transactions verified in block") + + return nil +} + // sendCriticalError sends a critical error to the error channel without blocking func (s *Syncer) sendCriticalError(err error) { if s.errorCh != nil { diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go new file mode 100644 index 000000000..1948109d9 --- /dev/null +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -0,0 +1,428 @@ +package syncing + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/store" + testmocks "github.com/evstack/ev-node/test/mocks" + "github.com/evstack/ev-node/types" +) + +func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that includes the forced transaction blob + data := makeData(gen.ChainID, 1, 1) + data.Txs[0] = types.Tx(dataBin) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since all forced txs are included + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that does NOT include the forced transaction blob + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx([]byte("regular_tx_1")) + data.Txs[1] = types.Tx([]byte("regular_tx_2")) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since forced tx blob is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return two forced inclusion transaction blobs + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create two forced inclusion transaction blobs in DA + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2}, nil).Once() + + // Create block data that includes only one of the forced transaction blobs + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx(dataBin1) + data.Txs[1] = types.Tx([]byte("regular_tx")) + // dataBin2 is missing + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since dataBin2 is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return no forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since no forced txs to verify + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + // Leave ForcedInclusionNamespace empty + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + // No ForcedInclusionNamespace - not configured + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since namespace not configured + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} diff --git a/block/public.go b/block/public.go index f084f2757..4f1a7417e 100644 --- a/block/public.go +++ b/block/public.go @@ -1,12 +1,14 @@ package block import ( + "context" "time" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" "github.com/rs/zerolog" ) @@ -41,10 +43,32 @@ func NewDAClient( logger zerolog.Logger, ) DAClient { return da.NewClient(da.Config{ - DA: daLayer, - Logger: logger, - DefaultTimeout: 10 * time.Second, - Namespace: config.DA.GetNamespace(), - DataNamespace: config.DA.GetDataNamespace(), + DA: daLayer, + Logger: logger, + DefaultTimeout: 10 * time.Second, + Namespace: config.DA.GetNamespace(), + DataNamespace: config.DA.GetDataNamespace(), + ForcedInclusionNamespace: config.DA.GetForcedInclusionNamespace(), }) } + +// ErrForceInclusionNotConfigured is returned when force inclusion is not configured. +// It is exported because sequencers needs to check for this error. +var ErrForceInclusionNotConfigured = da.ErrForceInclusionNotConfigured + +// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA +type ForcedInclusionEvent = da.ForcedInclusionEvent + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever +func NewForcedInclusionRetriever( + client DAClient, + genesis genesis.Genesis, + logger zerolog.Logger, +) ForcedInclusionRetriever { + return da.NewForcedInclusionRetriever(client, genesis, logger) +} diff --git a/core/execution/execution.go b/core/execution/execution.go index 896e2d65a..5085ebe57 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -52,6 +52,7 @@ type Executor interface { // Requirements: // - Must validate state transition against previous state root // - Must handle empty transaction list + // - Must handle gracefully gibberish transactions // - Must maintain deterministic execution // - Must respect context cancellation/timeout // - The rest of the rules are defined by the specific execution layer diff --git a/core/sequencer/dummy.go b/core/sequencer/dummy.go index 5f44dae2a..ef614173a 100644 --- a/core/sequencer/dummy.go +++ b/core/sequencer/dummy.go @@ -64,3 +64,13 @@ func (s *DummySequencer) VerifyBatch(ctx context.Context, req VerifyBatchRequest Status: true, }, nil } + +// SetDAHeight sets the current DA height for the sequencer +func (s *DummySequencer) SetDAHeight(height uint64) { + // No-op for dummy sequencer +} + +// GetDAHeight returns the current DA height for the sequencer +func (s *DummySequencer) GetDAHeight() uint64 { + return 0 +} diff --git a/core/sequencer/sequencing.go b/core/sequencer/sequencing.go index 211681589..e97ef93dd 100644 --- a/core/sequencer/sequencing.go +++ b/core/sequencer/sequencing.go @@ -7,15 +7,15 @@ import ( "time" ) -// Sequencer is a generic interface for a sequencer +// Sequencer defines the minimal sequencing interface used by the block executor. type Sequencer interface { - // SubmitBatchTxs submits a batch of transactions to the sequencer + // SubmitBatchTxs submits a batch of transactions from executor to sequencer // Id is the unique identifier for the target chain // Batch is the batch of transactions to submit // returns an error if any from the sequencer SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - // GetNextBatch returns the next batch of transactions from sequencer to + // GetNextBatch returns the next batch of transactions from sequencer and from DA to // Id is the unique identifier for the target chain // LastBatchHash is the cryptographic hash of the last batch received by the // MaxBytes is the maximum number of bytes to return in the batch @@ -27,6 +27,13 @@ type Sequencer interface { // BatchHash is the cryptographic hash of the batch to verify // returns a boolean indicating if the batch is valid and an error if any from the sequencer VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) + + // SetDAHeight sets the current DA height for the sequencer + // This allows the sequencer to track DA height for forced inclusion retrieval + SetDAHeight(height uint64) + + // GetDAHeight returns the current DA height for the sequencer + GetDAHeight() uint64 } // Batch is a collection of transactions diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 378dd9b17..019e3269c 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -4,435 +4,697 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. +- 2025-11-10: Updated to reflect actual implementation ## Context -Evolve currently supports a single sequencer implementation as described in ADR-013. While this approach provides a simple and efficient solution, it introduces a single point of failure that can impact the liveness of the network. If the sequencer goes down or becomes unresponsive, the chain cannot progress. +In a single-sequencer rollup architecture, users depend entirely on the sequencer to include their transactions in blocks. This creates several problems: -To address this limitation and improve the liveness properties of applications built with Evolve, we propose implementing a forced inclusion mechanism. This mechanism will allow transactions to be included directly from the Data Availability (DA) layer when the sequencer is unresponsive, creating an "unstoppable" property for Evolve-based chains. +1. **Censorship Risk**: A malicious or coerced sequencer can selectively exclude transactions +2. **Liveness Failure**: If the sequencer goes offline, no new transactions can be processed +3. **Centralization**: Users must trust a single entity to behave honestly +4. **No Recourse**: Users have no alternative path to submit transactions if the sequencer refuses them -This enhancement aligns with the requirements defined in the [L2 Beat framework](https://forum.l2beat.com/t/the-stages-framework/291#p-516-stage-1-requirements-3) for Stage 1 L2s, advancing Evolve's capabilities as a robust sequencer library. +While eventual solutions like decentralized sequencer networks exist, they introduce significant complexity. We need a simpler mechanism that provides censorship resistance and liveness guarantees while maintaining the performance benefits of a single sequencer. ## Alternative Approaches ### Decentralized Sequencer -A fully decentralized sequencer could solve the liveness issue by distributing sequencing responsibilities across multiple nodes. However, this approach introduces significant complexity in terms of consensus, leader election, and coordination between nodes. It would require substantial development effort and resources, making it less suitable as an immediate solution. +A fully decentralized sequencer network would eliminate single points of failure but requires: + +- Complex consensus mechanisms +- Increased latency due to coordination +- More infrastructure and operational complexity ### Automatic Sequencer Failover -Another approach would be to implement an automatic failover mechanism where backup sequencers take over when the primary sequencer fails. While simpler than a fully decentralized solution, this approach still requires managing multiple sequencers and introduces complexity in coordination and state transfer between them. +Implementing automatic failover to backup sequencers when the primary goes down requires: -## Decision +- Complex monitoring and health checks +- Coordination between sequencers to prevent forks +- Does not solve censorship issues with a malicious sequencer -We will implement a forced inclusion mechanism for the Evolve single sequencer architecture that uses a time-based inclusion delay approach. This approach will: +## Decision -1. Track when transactions are first seen in terms of DA block time -2. Require a minimum number of DA blocks to pass before including a direct transaction -3. Let full nodes enforce inclusion within a fixed period of time window +We implement a **forced inclusion mechanism** that allows users to submit transactions directly to the Data Availability (DA) layer. This approach provides: -The mechanism will be designed to maintain backward compatibility with existing Evolve deployments while providing enhanced liveness guarantees. +1. **Censorship Resistance**: Users can always bypass the sequencer by posting to DA +2. **Verifiable Inclusion**: Full nodes verify that sequencers include all forced transactions +3. **Based Rollup Option**: A based sequencer mode for fully DA-driven transaction ordering +4. **Simplicity**: No complex timing mechanisms or fallback modes ### High-Level Architecture -The following diagram illustrates the high-level architecture of the forced inclusion mechanism: - -```mermaid -flowchart TB - subgraph DAL["Data Availability Layer"] - end - - subgraph SEQ["Single Sequencer"] - subgraph NO["Normal Operation"] - direction TB - process["Process user txs"] - create["Create batches"] - include["Include direct txs from DA"] - checkDelay["Check MinDADelay"] - end - end - - subgraph FN["Full Nodes"] - subgraph NormalOp["Normal Operation"] - follow["Follow sequencer produced blocks"] - validate["Validate time windows"] - validateDelay["Validate MinDADelay"] - end - - subgraph FallbackMode["Fallback Mode"] - detect["Detect sequencer down"] - scan["Scan DA for direct txs"] - createBlocks["Create deterministic blocks from direct txs"] - end - end - - SEQ -->|"Publish Batches"| DAL - DAL -->|"Direct Txs"| SEQ - DAL -->|"Direct Txs"| FN - SEQ -->|"Blocks"| FN - NormalOp <--> FallbackMode +``` +┌─────────────────────────────────────────────────────────────────┐ +│ User Actions │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ Normal Path: Forced Inclusion Path: │ +│ Submit tx to Sequencer ────► Submit tx directly to DA │ +│ (Fast) (Censorship-resistant) │ +│ │ +└──────────┬────────────────────────────────────┬─────────────────┘ + │ │ + ▼ ▼ + ┌─────────────┐ ┌──────────────────┐ + │ Sequencer │ │ DA Layer │ + │ (Mempool) │ │ (Forced Inc. NS) │ + └──────┬──────┘ └─────────┬────────┘ + │ │ + │ 1. Fetch forced inc. txs │ + │◄────────────────────────────────────┘ + │ + │ 2. Prepend forced txs to batch + │ + ▼ + ┌─────────────┐ + │ Block │ + │ Production │ + └──────┬──────┘ + │ + │ 3. Submit block to DA + │ + ▼ + ┌─────────────┐ + │ DA Layer │ + └──────┬──────┘ + │ + │ 4. Full nodes retrieve block + │ + ▼ + ┌─────────────────────┐ + │ Full Nodes │ + │ (Verification) │ + │ │ + │ 5. Verify forced │ + │ inc. txs are │ + │ included │ + └─────────────────────┘ ``` +### Key Components + +1. **Forced Inclusion Namespace**: A dedicated DA namespace where users can post transactions +2. **DA Retriever**: Fetches forced inclusion transactions from DA using epoch-based scanning +3. **Single Sequencer**: Enhanced to include forced transactions from DA in every batch +4. **Based Sequencer**: Alternative sequencer that ONLY retrieves transactions from DA +5. **Verification**: Full nodes validate that blocks include all forced transactions + ## Detailed Design ### User Requirements -- Developers need a mechanism to ensure their chains can progress even when the single sequencer is unavailable -- The system should maintain a deterministic and consistent state regardless of sequencer availability -- The transition between sequencer-led and forced inclusion modes should be seamless -- Transactions must be included within a fixed time window from when they are first seen -- Direct transactions must wait for a minimum number of DA blocks before inclusion +Users can submit transactions in two ways: -### Systems Affected +1. **Normal Path**: Submit to sequencer's mempool/RPC (fast, low cost) +2. **Forced Inclusion Path**: Submit directly to DA forced inclusion namespace (censorship-resistant) -The implementation of the forced inclusion mechanism will affect several components of the Evolve framework: +No additional requirements or monitoring needed from users. -1. **Single Sequencer**: Must be modified to track and include direct transactions from the DA layer within the time window and after minimum DA block delay -2. **Full Node**: Must be updated to recognize and validate blocks with forced inclusions -3. **Block Processing Logic**: Must implement the modified fork choice rule -4. **DA Client**: Must be enhanced to scan for direct transactions -5. **Transaction Validation**: Must validate both sequencer-batched and direct transactions +### Systems Affected + +1. **DA Layer**: New namespace for forced inclusion transactions +2. **Sequencer (Single)**: Fetches and includes forced transactions +3. **Sequencer (Based)**: New sequencer type that only uses DA transactions +4. **DA Retriever**: New component for fetching forced transactions +5. **Syncer**: Verifies forced transaction inclusion in blocks +6. **Configuration**: New fields for forced inclusion settings ### Data Structures -#### Direct Transaction Tracking +#### Forced Inclusion Event ```go -type ForcedInclusionConfig struct { - MaxInclusionDelay uint64 // Max inclusion time in DA block time units - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type ForcedIncludedEvent struct { + Txs [][]byte // Forced inclusion transactions + StartDaHeight uint64 // Start of DA height range + EndDaHeight uint64 // End of DA height range } +``` + +#### DA Retriever Interface -type DirectTransaction struct { - TxHash common.Hash - FirstSeenAt uint64 // DA block time when the tx was seen - Included bool // Whether it has been included in a block - IncludedAt uint64 // Height at which it was included +```go +type DARetriever interface { + // Retrieve forced inclusion transactions from DA at specified height + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) } +``` -type DirectTxTracker struct { - txs map[common.Hash]DirectTransaction // Map of direct transactions - mu sync.RWMutex // Mutex for thread-safe access - latestSeenTime uint64 // Latest DA block time scanned - latestDAHeight uint64 // Latest DA block height +### APIs and Interfaces + +#### DA Retriever + +The DA Retriever component handles fetching forced inclusion transactions: + +```go +type daRetriever struct { + da coreda.DA + cache cache.CacheManager + genesis genesis.Genesis + logger zerolog.Logger + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool + daEpochSize uint64 } + +// RetrieveForcedIncludedTxsFromDA fetches forced inclusion transactions +// Only fetches at epoch boundaries to prevent redundant DA queries +func (r *daRetriever) RetrieveForcedIncludedTxsFromDA( + ctx context.Context, + daHeight uint64, +) (*ForcedIncludedEvent, error) ``` -#### Sequencer Status Tracking +#### Single Sequencer Extension + +The single sequencer is enhanced to fetch and include forced transactions: ```go -type SequencerStatus struct { - IsActive bool // Whether the sequencer is considered active - LastActiveTime uint64 // Last DA block time where sequencer posted a batch - InactiveTime uint64 // Time since last sequencer activity +type Sequencer struct { + // ... existing fields ... + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx + queue *BatchQueue +} + +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 +} + +func (s *Sequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + // 1. Fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight.Load()) + + // 2. Process forced txs with size validation and pending queue + forcedTxs := s.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + + // 3. Get batch from mempool queue + batch, err := s.queue.Next(ctx) + + // 4. Prepend forced txs and trim batch to fit MaxBytes + if len(forcedTxs) > 0 { + forcedTxsSize := calculateSize(forcedTxs) + remainingBytes := req.MaxBytes - forcedTxsSize + + // Trim batch transactions to fit + trimmedBatchTxs := trimToSize(batch.Transactions, remainingBytes) + + // Return excluded txs to front of queue + if len(trimmedBatchTxs) < len(batch.Transactions) { + excludedBatch := batch.Transactions[len(trimmedBatchTxs):] + s.queue.Prepend(ctx, Batch{Transactions: excludedBatch}) + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + } + + return &GetNextBatchResponse{Batch: batch} +} + +// processForcedInclusionTxs validates and queues forced txs +func (s *Sequencer) processForcedInclusionTxs(event *ForcedInclusionEvent, maxBytes uint64) [][]byte { + var validatedTxs [][]byte + var newPendingTxs []pendingForcedInclusionTx + currentSize := 0 + + // Process pending txs from previous epochs first + for _, pendingTx := range s.pendingForcedInclusionTxs { + if !ValidateBlobSize(pendingTx.Data) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(pendingTx.Data), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += len(pendingTx.Data) + } + + // Process new txs from this epoch + for _, tx := range event.Txs { + if !ValidateBlobSize(tx) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(tx), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + validatedTxs = append(validatedTxs, tx) + currentSize += len(tx) + } + + s.pendingForcedInclusionTxs = newPendingTxs + return validatedTxs } ``` -### APIs and Interfaces +#### Based Sequencer -#### Enhanced DA Client Interface +A new sequencer implementation that ONLY retrieves transactions from DA: ```go -type DAClient interface { - // Existing methods - // ... +type BasedSequencer struct { + fiRetriever ForcedInclusionRetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + mu sync.RWMutex + daHeight uint64 + txQueue [][]byte // Buffer for transactions exceeding batch size +} + +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + + + // Always fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight) + if err != nil && !errors.Is(err, ErrHeightFromFuture) { + return nil, err + } - // New method for forced inclusion - GetDirectTransactions(ctx context.Context, fromTime, toTime uint64) ([][]byte, error) - // Note: SubmitDirectTransaction is removed as it's not a responsibility of the node + // Validate and add transactions to queue + for _, tx := range forcedEvent.Txs { + if ValidateBlobSize(tx) { + s.txQueue = append(s.txQueue, tx) + } + } + + // Create batch from queue respecting MaxBytes + batch := s.createBatchFromQueue(req.MaxBytes) + + return &GetNextBatchResponse{Batch: batch} +} + +// SubmitBatchTxs is a no-op for based sequencer +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) { + // Based sequencer ignores submitted transactions + return &SubmitBatchTxsResponse{}, nil } ``` -#### Sequencer Interface Extensions +#### Syncer Verification + +Full nodes verify forced inclusion in the sync process: ```go -// New methods added to the Sequencer interface -func (s *Sequencer) ScanDALayerForDirectTxs(ctx context.Context) error -func (s *Sequencer) IncludeDirectTransactions(ctx context.Context, batch *Batch) error +func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { + // 1. Retrieve forced inclusion transactions from DA + forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) + if err != nil { + return err + } + + // 2. Build map of transactions in block + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[string(tx)] = struct{}{} + } + + // 3. Verify all forced transactions are included + for _, forcedTx := range forcedEvent.Txs { + if _, ok := blockTxMap[string(forcedTx)]; !ok { + return errMaliciousProposer + } + } + + return nil +} ``` -#### Full Node Interface Extensions +### Implementation Details + +#### Epoch-Based Fetching + +To avoid excessive DA queries, the DA Retriever uses epoch-based fetching: + +- **Epoch Size**: Configurable number of DA blocks (e.g., 10) +- **Epoch Boundaries**: Deterministically calculated based on `DAStartHeight` +- **Fetch Timing**: Only fetch at epoch start to prevent duplicate fetches ```go -// New methods added to the Node interface -func (n *Node) CheckSequencerStatus(ctx context.Context) (bool, error) -func (n *Node) ProcessDirectTransactions(ctx context.Context) error -func (n *Node) ValidateBlockTimeWindow(ctx context.Context, block *types.Block) error +// Calculate epoch boundaries +func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { + epochNum := r.calculateEpochNumber(daHeight) + start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize + end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 + return start, end +} + +// Only fetch at epoch start +if daHeight != epochStart { + return &ForcedIncludedEvent{Txs: [][]byte{}} +} + +// Fetch all heights in epoch range +for height := epochStart; height <= epochEnd; height++ { + // Fetch forced inclusion blobs from this DA height +} ``` -### Implementation Changes - -#### Single Sequencer Node Changes - -1. **DA Layer Scanner**: - - Implement a periodic scanner that queries the DA layer for direct transactions - - Track all direct transactions in the DirectTxTracker data structure - - Update the latest seen DA block time and height after each scan - -2. **Transaction Inclusion Logic**: - - Modify the batch creation process to include direct transactions from the DA layer - - Ensure all direct transactions are included within the MaxInclusionDelay time window - - Check that transactions have waited for MinDADelay DA blocks - - Track transaction inclusion times and enforce both delay constraints - -3. **Validation Rules**: - - Implement time window validation to ensure transactions are included within MaxInclusionDelay - - Implement DA block delay validation to ensure transactions wait for MinDADelay blocks - - Track both time-based and DA block-based delays for each transaction - -4. **Recovery Mechanism**: - - Add logic to detect when the sequencer comes back online after downtime - - Implement state synchronization to catch up with any forced inclusions that occurred during downtime - - Resume normal operation by building on top of the canonical chain tip - -#### Sequencer Operation Flow - -The following diagram illustrates the operation flow for the sequencer with forced inclusion: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Sequencer Operation Flow │ -└─────────────────┬───────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Process User Transactions │ │ 2. Periodic DA Layer Scanning │ -│ │ │ │ -│ - Accept transactions from users│ │ - Query DA layer for direct txs │ -│ - Validate and queue txs │ │ - Update DirectTxTracker │ -│ - Process queue based on policy │ │ - Track latest seen DA block time │ -└─────────────────┬───────────────┘ └────────────────────┬───────────────────┘ - │ │ - ▼ ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 3. Batch Creation │ │ 4. Direct Transaction Inclusion │ -│ │ │ │ -│ - Create batch of txs │◄─────┤ - Include unprocessed direct txs │ -│ - Apply ordering policy │ │ - Prioritize by first seen │ -│ - Calculate batch metadata │ │ - Mark included txs as processed │ -└─────────────────┬───────────────┘ └────────────────────────────────────────┘ - │ - ▼ -┌──────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 5. Time Window Validation │ │ 6. Block Production │ -│ │ │ │ -│ - Check transaction timestamps │ │ - Create block with batch │ -│ - Ensure within MaxInclusionDelay│─────►│ - Sign and publish block │ -│ - Track inclusion times │ │ │ -└──────────────────────────────────┘ └─────────────────┬──────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ 7. DA Batch Submission │ - │ │ - │ - Submit batch to DA layer │ - │ - Track submission status │ - │ - Handle retry on failure │ - └────────────────────────────────────────┘ +#### Height From Future Handling + +When DA height is not yet available: + +```go +if errors.Is(err, coreda.ErrHeightFromFuture) { + // Keep current DA height, return empty batch + // Retry same height on next call + return &ForcedIncludedEvent{Txs: [][]byte{}}, nil +} ``` -#### Full Node Operation Flow - -The following diagram illustrates the operation flow for full nodes with forced inclusion support: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Full Node Operation Flow │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Normal Operation Mode │ │ 2. Sequencer Status Monitoring │ -│ │ │ │ -│ - Receive blocks from sequencer │ │ - Monitor sequencer activity on DA │ -│ - Validate time windows │◄───►│ - Track time since last sequencer batch│ -│ - Apply state transitions │ │ - Check against downtime threshold │ -└─────────────────────────────────┘ └───────────────────┬────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ Is Sequencer Down? │ - │ (Based on configurable threshold) │ - └───────────┬───────────────┬────────────┘ - │ │ - │ Yes │ No - ▼ │ - ┌────────────────────────┐ │ - │ 3. Enter Fallback Mode │ │ - │ │ │ - │ - Switch to direct tx │ │ - │ processing │ │ - │ - Notify subsystems │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 4. DA Layer Scanning │ │ - │ │ │ - │ - Scan DA for direct │ │ - │ transactions │ │ - │ - Track latest seen │ │ - │ DA block time │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 5. Deterministic Block │ │ - │ Creation │ │ - │ │ │ - │ - Create blocks with │ │ - │ direct txs only │ │ - │ - Apply deterministic │ │ - │ ordering rules │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ 6. Block Processing and State Update │ -│ │ -│ - Execute transactions │ -│ - Update state │ -│ - Persist blocks and state │ -└─────────────────────────────────────────────────────────────────────────────────┘ +#### Size Validation and Max Bytes Handling + +Both sequencers enforce strict size limits to prevent DoS and ensure batches never exceed the DA layer's limits: + +```go +// Size validation utilities +const AbsoluteMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB DA layer limit + +// ValidateBlobSize checks against absolute DA layer limit +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks against per-batch limit +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} ``` -### Fallback Mode Transition - -The following diagram illustrates the transition between normal operation and fallback mode: - -```mermaid -sequenceDiagram - participant DA as Data Availability Layer - participant S as Sequencer - participant R as Chain - - Note over S,R: Normal Operation - DA->>S: DA Block N - S->>R: Sequencer Block N - DA->>S: DA Block N+1 - S->>R: Sequencer Block N+1 - DA->>S: DA Block N+2 - S->>R: Sequencer Block N+2 - - Note over S,R: Sequencer Down - DA->>R: DA Block N+3 (Direct Txs) - Note over R: Fallback Mode Start - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+4 (Direct Txs) - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+5 (Direct Txs) - R->>R: Create Block from Direct Txs - - Note over S,R: Sequencer Back Online - DA->>S: DA Block N+6 - S->>R: Sequencer Block N+6 - DA->>S: DA Block N+7 - S->>R: Sequencer Block N+7 - - Note over R: Timeline shows: - Note over R: 1. Normal sequencer operation - Note over R: 2. Sequencer downtime & fallback - Note over R: 3. Sequencer recovery +**Key Behaviors**: + +- **Absolute validation**: Blobs exceeding 2MB are permanently rejected +- **Batch size limits**: `req.MaxBytes` is NEVER exceeded in any batch +- **Transaction preservation**: + - Single sequencer: Trimmed batch txs returned to queue via `Prepend()` + - Based sequencer: Excess txs remain in `txQueue` for next batch + - Forced txs that don't fit go to `pendingForcedInclusionTxs` (single) or stay in `txQueue` (based) + +#### Transaction Queue Management + +The based sequencer uses a simplified queue to handle transactions: + +```go +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + // Always respect maxBytes, even for first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, keep remaining in queue + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // Clear queue if we processed everything + if i == len(s.txQueue)-1 { + s.txQueue = s.txQueue[:0] + } + } + + return &Batch{Transactions: batch} +} ``` -### Configuration +**Note**: The based sequencer is simpler than the single sequencer - it doesn't need a separate pending queue because `txQueue` naturally handles all transaction buffering. -The forced inclusion mechanism will be configurable with the following parameters: +### Configuration ```go -type ForcedInclusionConfig struct { - Enabled bool // Whether forced inclusion is enabled - MaxInclusionDelay uint64 // Maximum time window for transaction inclusion - SequencerDownTime uint64 // Time after which the sequencer is considered down - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type Genesis struct { + ChainID string + StartTime time.Time + InitialHeight uint64 + ProposerAddress []byte + DAStartHeight uint64 + // Number of DA blocks to scan per forced inclusion fetch + // Higher values reduce DA queries but increase latency + // Lower values increase DA queries but improve responsiveness + DAEpochForcedInclusion uint64 +} + +type DAConfig struct { + // ... existing fields ... + + // Namespace for forced inclusion transactions + ForcedInclusionNamespace string +} + +type NodeConfig struct { + // ... existing fields ... + + // Run node with based sequencer (requires aggregator mode) + BasedSequencer bool } ``` +### Configuration Examples + +#### Traditional Sequencer with Forced Inclusion + +```yaml +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 10 # Scan 10 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = false # Use traditional sequencer +``` + +#### Based Sequencer (DA-Only) + +```yaml +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 5 # Scan 5 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = true # Use based sequencer +``` + +### Sequencer Operation Flows + +#### Single Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Fetch forced inclusion txs from DA (via DA Retriever) + - Only at epoch boundaries + - Scan epoch range for forced transactions +3. Get batch from mempool queue +4. Prepend forced txs to batch +5. Return batch for block production +``` + +#### Based Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Check transaction queue for buffered txs +3. If queue empty or epoch boundary: + - Fetch forced inclusion txs from DA + - Add to queue +4. Create batch from queue (respecting MaxBytes) +5. Return batch for block production +``` + +### Full Node Verification Flow + +``` +1. Receive block from DA or P2P +2. Before applying block: + a. Fetch forced inclusion txs from DA at block's DA height + b. Build map of transactions in block + c. Verify all forced txs are in block + d. If missing: reject block, flag malicious proposer +3. Apply block if verification passes +``` + ### Efficiency Considerations -- DA layer scanning is integrated into the core block processing pipeline for continuous monitoring -- Direct transactions are indexed by hash for quick lookups -- The sequencer status is tracked by DA block time rather than block heights -- Time-based tracking simplifies the implementation and reduces overhead -- DA block height tracking adds minimal overhead to existing block processing +1. **Epoch-Based Fetching**: Reduces DA queries by batching multiple DA heights +2. **Deterministic Epochs**: All nodes calculate same epoch boundaries +3. **Fetch at Epoch Start**: Prevents duplicate fetches as DA height progresses +4. **Transaction Queue**: Buffers excess transactions across multiple blocks +5. **Conditional Fetching**: Only when forced inclusion namespace is configured +6. **Size Pre-validation**: Invalid blobs rejected early, before batch construction +7. **Efficient Queue Operations**: + - Single sequencer: `Prepend()` reuses space before head position + - Based sequencer: Simple slice operations for queue management + +**DA Query Frequency**: + +Every `DAEpochForcedInclusion` DA blocks ### Security Considerations -- The mechanism ensures that only valid direct transactions can be included in the chain -- Time window validation prevents delayed inclusion of transactions -- The configurable time threshold prevents premature switching to fallback mode due to temporary sequencer issues -- All transactions, whether sequencer-batched or direct, undergo the same validation rules -- MinDADelay provides protection against DA layer censorship by requiring multiple block proposers to collude -- Block-based delay prevents single block proposer censorship by ensuring transactions must be visible across multiple DA layer blocks -- The delay mechanism is inspired by the "Based Sequencing with Soft Confirmations" design from [Sovereign SDK #408](https://github.com/Sovereign-Labs/sovereign-sdk/issues/408), which uses deferred execution to prevent DA layer block proposers from censoring transactions +1. **Malicious Proposer Detection**: Full nodes reject blocks missing forced transactions +2. **No Timing Attacks**: Epoch boundaries are deterministic, no time-based logic +3. **Blob Size Limits**: Two-tier size validation prevents DoS + - Absolute limit (1.5MB): Blobs exceeding this are permanently rejected + - Batch limit (`MaxBytes`): Ensures no batch exceeds DA submission limits +4. **Graceful Degradation**: Continues operation if forced inclusion not configured +5. **Height Validation**: Handles "height from future" errors without state corruption +6. **Transaction Preservation**: No valid transactions are lost due to size constraints +7. **Strict MaxBytes Enforcement**: Batches NEVER exceed `req.MaxBytes`, preventing DA layer rejections -### Privacy Considerations +**Attack Vectors**: -- Direct transactions posted to the DA layer are publicly visible, just like sequencer-batched transactions -- No additional privacy concerns are introduced beyond the existing model +- **Censorship**: Mitigated by forced inclusion verification +- **DA Spam**: Limited by DA layer's native spam protection and two-tier blob size limits +- **Block Withholding**: Full nodes can fetch and verify from DA independently +- **Oversized Batches**: Prevented by strict size validation at multiple levels ### Testing Strategy -1. **Unit Tests**: - - Test individual components of the forced inclusion mechanism - - Verify time window validation logic - - Test the DA scanner functionality - - Test transaction inclusion timing constraints - - Test MinDADelay validation - -2. **Integration Tests**: - - Test the interaction between the sequencer and the DA layer - - Verify correct inclusion of direct transactions within time windows - - Test DA block delay validation - - Verify both time and block delay constraints - -3. **End-to-End Tests**: - - Simulate sequencer downtime and verify chain progression - - Test the transition between normal and fallback modes - - Verify the sequencer's recovery process after downtime - - Test transaction inclusion with various delay configurations - -4. **Performance Testing**: - - Measure the overhead introduced by the DA scanner - - Benchmark the system's performance in fallback mode - - Evaluate the impact of time-based tracking - - Measure the performance impact of DA block delay validation +#### Unit Tests + +1. **DA Retriever**: + - Epoch boundary calculations + - Height from future handling + - Blob size validation + - Empty epoch handling + +2. **Size Validation**: + - Individual blob size validation (absolute limit) + - Cumulative size checking (batch limit) + - Edge cases (empty blobs, exact limits, exceeding limits) + +3. **Single Sequencer**: + - Forced transaction prepending with size constraints + - Batch trimming when forced + batch exceeds MaxBytes + - Trimmed transactions returned to queue via Prepend + - Pending forced inclusion queue management + - DA height tracking + - Error handling + +4. **BatchQueue**: + - Prepend operation (empty queue, with items, after consuming) + - Multiple prepends (LIFO ordering) + - Space reuse before head position + +5. **Based Sequencer**: + - Queue management with size validation + - Batch size limits strictly enforced + - Transaction buffering across batches + - DA-only operation + - Always checking for new forced txs + +6. **Syncer Verification**: + - All forced txs included (pass) + - Missing forced txs (fail) + - No forced txs (pass) + +#### Integration Tests + +1. **Single Sequencer Integration**: + - Submit to mempool and forced inclusion + - Verify both included in block + - Forced txs appear first + +2. **Based Sequencer Integration**: + - Submit only to DA forced inclusion + - Verify block production + - Mempool submissions ignored + +3. **Verification Flow**: + - Full node rejects block missing forced tx + - Full node accepts block with all forced txs + +#### End-to-End Tests + +1. **User Flow**: + - User submits tx to forced inclusion namespace + - Sequencer includes tx in next epoch + - Full nodes verify inclusion + +2. **Based Rollup**: + - Start network with based sequencer + - Submit transactions to DA + - Verify block production and finalization + +3. **Censorship Resistance**: + - Sequencer ignores specific transaction + - User submits to forced inclusion + - Transaction included in next epoch + - Attempting to exclude causes block rejection ### Breaking Changes -This enhancement introduces no breaking changes to the existing API or data structures. It extends the current functionality by implementing time-based transaction tracking and inclusion rules, along with DA block-based delay validation, without modifying the core interfaces that developers interact with. +1. **Sequencer Initialization**: Requires `DARetriever` and `Genesis` parameters +2. **Configuration**: New fields in `DAConfig` and `NodeConfig` +3. **Syncer**: New verification step in block processing + +**Migration Path**: + +- Forced inclusion is optional (enabled when namespace configured) +- Existing deployments work without configuration changes +- Can enable incrementally per network ## Status -Proposed +Accepted and Implemented ## Consequences ### Positive -- Improves the liveness guarantees of Evolve-based chains -- Provides a path for Evolve to meet Stage 1 L2 requirements per the L2 Beat framework -- Creates an "unstoppable" property for applications, enhancing their reliability -- Maintains a deterministic chain state regardless of sequencer availability -- More predictable deadlines in DA time -- Easier to reason about for users and developers -- Prevents DA layer censorship by requiring multiple block proposers to collude +1. **Censorship Resistance**: Users have guaranteed path to include transactions +2. **Verifiable**: Full nodes enforce forced inclusion, detecting malicious sequencers +3. **Simple Design**: No complex timing mechanisms or fallback modes +4. **Based Rollup Option**: Fully DA-driven transaction ordering available (simplified implementation) +5. **Optional**: Forced inclusion can be disabled for permissioned deployments +6. **Efficient**: Epoch-based fetching minimizes DA queries +7. **Flexible**: Configurable epoch size allows tuning latency vs efficiency +8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections +9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost +10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures ### Negative -- Adds complexity to the block processing and validation logic -- Introduces overhead from scanning the DA layer for direct transactions -- Could potentially slow block production during fallback mode -- May need careful tuning of time window parameters -- Could be affected by variations in block production rate -- Additional complexity from tracking DA block heights for delay validation +1. **Increased Latency**: Forced transactions subject to epoch boundaries +2. **DA Dependency**: Requires DA layer to support multiple namespaces +3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion +4. **Additional Complexity**: New component (DA Retriever) and verification logic +5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) ### Neutral -- Requires application developers to consider both sequencer-batched and direct transaction flows -- Introduces configuration options that developers need to understand and set appropriately -- Changes the mental model of how the chain progresses, from purely sequencer-driven to a hybrid approach -- Users will need to use external tools or services to submit direct transactions to the DA layer during sequencer downtime +1. **Two Sequencer Types**: Choice between single (hybrid) and based (DA-only) +2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path +3. **Monitoring**: Operators should monitor forced inclusion namespace usage +4. **Documentation**: Users need guidance on when to use forced inclusion +5. **Genesis Parameter**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis ## References diff --git a/go.mod b/go.mod index e1495d2ec..81a458d74 100644 --- a/go.mod +++ b/go.mod @@ -84,7 +84,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -163,3 +163,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) + +replace github.com/evstack/ev-node/core => ./core diff --git a/go.sum b/go.sum index 120f8e51d..91a7b5f72 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.5 h1:lgxE8XiF3U9pcFgh7xuKMgsOGvLBGRyd9kc9MR4WL0o= -github.com/evstack/ev-node/core v1.0.0-beta.5/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -229,8 +227,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index fe42707f8..ed4584e48 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -27,8 +27,6 @@ import ( "github.com/evstack/ev-node/pkg/signer/file" ) -const DefaultMaxBlobSize = 2 * 1024 * 1024 // 2MB - // ParseConfig is an helpers that loads the node configuration and validates it. func ParseConfig(cmd *cobra.Command) (rollconf.Config, error) { nodeConfig, err := rollconf.Load(cmd) @@ -94,7 +92,7 @@ func StartNode( // create a new remote signer var signer signer.Signer - if nodeConfig.Signer.SignerType == "file" && nodeConfig.Node.Aggregator { + if nodeConfig.Signer.SignerType == "file" && (nodeConfig.Node.Aggregator && !nodeConfig.Node.BasedSequencer) { // Get passphrase file path passphraseFile, err := cmd.Flags().GetString(rollconf.FlagSignerPassphraseFile) if err != nil { diff --git a/pkg/config/config.go b/pkg/config/config.go index d6b1f1553..48e2ba11e 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -33,6 +33,8 @@ const ( // FlagAggregator is a flag for running node in aggregator mode FlagAggregator = FlagPrefixEvnode + "node.aggregator" + // FlagBasedSequencer is a flag for enabling based sequencer mode (requires aggregator mode) + FlagBasedSequencer = FlagPrefixEvnode + "node.based_sequencer" // FlagLight is a flag for running the node in light mode FlagLight = FlagPrefixEvnode + "node.light" // FlagBlockTime is a flag for specifying the block time @@ -62,6 +64,8 @@ const ( FlagDANamespace = FlagPrefixEvnode + "da.namespace" // FlagDADataNamespace is a flag for specifying the DA data namespace ID FlagDADataNamespace = FlagPrefixEvnode + "da.data_namespace" + // FlagDAForcedInclusionNamespace is a flag for specifying the DA forced inclusion namespace ID + FlagDAForcedInclusionNamespace = FlagPrefixEvnode + "da.forced_inclusion_namespace" // FlagDASubmitOptions is a flag for data availability submit options FlagDASubmitOptions = FlagPrefixEvnode + "da.submit_options" // FlagDASigningAddresses is a flag for specifying multiple DA signing addresses @@ -153,15 +157,16 @@ type Config struct { // DAConfig contains all Data Availability configuration parameters type DAConfig struct { - Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` - AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` - SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` - SigningAddresses []string `mapstructure:"signing_addresses" yaml:"signing_addresses" comment:"List of addresses to use for DA submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches. Useful for high-throughput chains."` - Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` - DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` - BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` - MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` - MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` + Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` + AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` + SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` + SigningAddresses []string `mapstructure:"signing_addresses" yaml:"signing_addresses" comment:"List of addresses to use for DA submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches. Useful for high-throughput chains."` + Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` + DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` + ForcedInclusionNamespace string `mapstructure:"forced_inclusion_namespace" yaml:"forced_inclusion_namespace" comment:"Namespace ID for forced inclusion transactions on the DA layer."` + BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` + MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` + MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` } // GetNamespace returns the namespace for header submissions. @@ -178,11 +183,17 @@ func (d *DAConfig) GetDataNamespace() string { return d.GetNamespace() } +// GetForcedInclusionNamespace returns the namespace for forced inclusion transactions +func (d *DAConfig) GetForcedInclusionNamespace() string { + return d.ForcedInclusionNamespace +} + // NodeConfig contains all Rollkit specific configuration parameters type NodeConfig struct { // Node mode configuration - Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` - Light bool `yaml:"light" comment:"Run node in light mode"` + Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` + BasedSequencer bool `yaml:"based_sequencer" comment:"Run node with based sequencer (fetches transactions only from DA forced inclusion namespace). Requires aggregator mode to be enabled."` + Light bool `yaml:"light" comment:"Run node in light mode"` // Block management configuration BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Block time (duration). Examples: \"500ms\", \"1s\", \"5s\", \"1m\", \"2m30s\", \"10m\"."` @@ -234,6 +245,11 @@ func (c *Config) Validate() error { return fmt.Errorf("could not create directory %q: %w", fullDir, err) } + // Validate based sequencer requires aggregator mode + if c.Node.BasedSequencer && !c.Node.Aggregator { + return fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + // Validate namespaces if err := validateNamespace(c.DA.GetNamespace()); err != nil { return fmt.Errorf("could not validate namespace (%s): %w", c.DA.GetNamespace(), err) @@ -245,6 +261,14 @@ func (c *Config) Validate() error { } } + if len(c.DA.GetForcedInclusionNamespace()) > 0 { + // if err := validateNamespace(c.DA.GetForcedInclusionNamespace()); err != nil { + // return fmt.Errorf("could not validate forced inclusion namespace (%s): %w", c.DA.GetForcedInclusionNamespace(), err) + // } + return fmt.Errorf("forced inclusion is not yet live") + + } + // Validate lazy mode configuration if c.Node.LazyMode && c.Node.LazyBlockInterval.Duration <= c.Node.BlockTime.Duration { return fmt.Errorf("LazyBlockInterval (%v) must be greater than BlockTime (%v) in lazy mode", @@ -301,8 +325,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Bool(FlagClearCache, def.ClearCache, "clear the cache") // Node configuration flags - cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node in aggregator mode") - cmd.Flags().Bool(FlagLight, def.Node.Light, "run light client") + cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node as an aggregator") + cmd.Flags().Bool(FlagBasedSequencer, def.Node.BasedSequencer, "run node with based sequencer (requires aggregator mode)") + cmd.Flags().Bool(FlagLight, def.Node.Light, "run node in light mode") cmd.Flags().Duration(FlagBlockTime, def.Node.BlockTime.Duration, "block time (for aggregator mode)") cmd.Flags().Bool(FlagLazyAggregator, def.Node.LazyMode, "produce blocks only when transactions are available or after lazy block time") cmd.Flags().Uint64(FlagMaxPendingHeadersAndData, def.Node.MaxPendingHeadersAndData, "maximum headers or data pending DA confirmation before pausing block production (0 for no limit)") @@ -316,6 +341,7 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Duration(FlagDABlockTime, def.DA.BlockTime.Duration, "DA chain block time (for syncing)") cmd.Flags().String(FlagDANamespace, def.DA.Namespace, "DA namespace for header (or blob) submissions") cmd.Flags().String(FlagDADataNamespace, def.DA.DataNamespace, "DA namespace for data submissions") + cmd.Flags().String(FlagDAForcedInclusionNamespace, def.DA.ForcedInclusionNamespace, "DA namespace for forced inclusion transactions") cmd.Flags().String(FlagDASubmitOptions, def.DA.SubmitOptions, "DA submit options") cmd.Flags().StringSlice(FlagDASigningAddresses, def.DA.SigningAddresses, "Comma-separated list of addresses for DA submissions (used in round-robin)") cmd.Flags().Uint64(FlagDAMempoolTTL, def.DA.MempoolTTL, "number of DA blocks until transaction is dropped from the mempool") @@ -343,6 +369,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().String(FlagSignerType, def.Signer.SignerType, "type of signer to use (file, grpc)") cmd.Flags().String(FlagSignerPath, def.Signer.SignerPath, "path to the signer file or address") cmd.Flags().String(FlagSignerPassphraseFile, "", "path to file containing the signer passphrase (required for file signer and if aggregator is enabled)") + + // flag constraints + cmd.MarkFlagsMutuallyExclusive(FlagLight, FlagAggregator) } // Load loads the node configuration in the following order of precedence: diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 7834e42aa..d58c3348b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -50,9 +50,11 @@ func TestAddFlags(t *testing.T) { // Test specific flags assertFlagValue(t, flags, FlagDBPath, DefaultConfig().DBPath) + assertFlagValue(t, flags, FlagClearCache, DefaultConfig().ClearCache) // Node flags assertFlagValue(t, flags, FlagAggregator, DefaultConfig().Node.Aggregator) + assertFlagValue(t, flags, FlagBasedSequencer, DefaultConfig().Node.BasedSequencer) assertFlagValue(t, flags, FlagLight, DefaultConfig().Node.Light) assertFlagValue(t, flags, FlagBlockTime, DefaultConfig().Node.BlockTime.Duration) assertFlagValue(t, flags, FlagLazyAggregator, DefaultConfig().Node.LazyMode) @@ -66,6 +68,8 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagDAAuthToken, DefaultConfig().DA.AuthToken) assertFlagValue(t, flags, FlagDABlockTime, DefaultConfig().DA.BlockTime.Duration) assertFlagValue(t, flags, FlagDANamespace, DefaultConfig().DA.Namespace) + assertFlagValue(t, flags, FlagDADataNamespace, DefaultConfig().DA.DataNamespace) + assertFlagValue(t, flags, FlagDAForcedInclusionNamespace, DefaultConfig().DA.ForcedInclusionNamespace) assertFlagValue(t, flags, FlagDASubmitOptions, DefaultConfig().DA.SubmitOptions) assertFlagValue(t, flags, FlagDASigningAddresses, DefaultConfig().DA.SigningAddresses) assertFlagValue(t, flags, FlagDAMempoolTTL, DefaultConfig().DA.MempoolTTL) @@ -89,6 +93,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, persistentFlags, FlagLogLevel, DefaultConfig().Log.Level) assertFlagValue(t, persistentFlags, FlagLogFormat, "text") assertFlagValue(t, persistentFlags, FlagLogTrace, false) + assertFlagValue(t, persistentFlags, FlagRootDir, DefaultRootDirWithName("test")) // Signer flags assertFlagValue(t, flags, FlagSignerPassphraseFile, "") @@ -97,9 +102,10 @@ func TestAddFlags(t *testing.T) { // RPC flags assertFlagValue(t, flags, FlagRPCAddress, DefaultConfig().RPC.Address) + assertFlagValue(t, flags, FlagRPCEnableDAVisualization, DefaultConfig().RPC.EnableDAVisualization) // Count the number of flags we're explicitly checking - expectedFlagCount := 37 // Update this number if you add more flag checks above + expectedFlagCount := 43 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 @@ -368,3 +374,57 @@ func assertFlagValue(t *testing.T, flags *pflag.FlagSet, name string, expectedVa } } } + +func TestBasedSequencerValidation(t *testing.T) { + tests := []struct { + name string + aggregator bool + basedSeq bool + expectError bool + errorMsg string + }{ + { + name: "based sequencer without aggregator should fail", + aggregator: false, + basedSeq: true, + expectError: true, + errorMsg: "based sequencer mode requires aggregator mode to be enabled", + }, + { + name: "based sequencer with aggregator should pass", + aggregator: true, + basedSeq: true, + expectError: false, + }, + { + name: "aggregator without based sequencer should pass", + aggregator: true, + basedSeq: false, + expectError: false, + }, + { + name: "neither aggregator nor based sequencer should pass", + aggregator: false, + basedSeq: false, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := DefaultConfig() + cfg.RootDir = t.TempDir() + cfg.Node.Aggregator = tt.aggregator + cfg.Node.BasedSequencer = tt.basedSeq + + err := cfg.Validate() + + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 6a6f813a3..d31d2acb8 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -70,11 +70,12 @@ func DefaultConfig() Config { ReadinessMaxBlocksBehind: calculateReadinessMaxBlocksBehind(defaultBlockTime.Duration, defaultReadinessWindowSeconds), }, DA: DAConfig{ - Address: "http://localhost:7980", - BlockTime: DurationWrapper{6 * time.Second}, - MaxSubmitAttempts: 30, - Namespace: randString(10), - DataNamespace: "", + Address: "http://localhost:7980", + BlockTime: DurationWrapper{6 * time.Second}, + MaxSubmitAttempts: 30, + Namespace: randString(10), + DataNamespace: "", + ForcedInclusionNamespace: "", }, Instrumentation: DefaultInstrumentationConfig(), Log: LogConfig{ diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 1fae14643..65cbed173 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -11,11 +11,12 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - DAStartHeight uint64 `json:"da_start_height"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + DAStartHeight uint64 `json:"da_start_height"` + DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` } // NewGenesis creates a new Genesis instance. @@ -26,11 +27,12 @@ func NewGenesis( proposerAddress []byte, ) Genesis { genesis := Genesis{ - ChainID: chainID, - StartTime: startTime, - InitialHeight: initialHeight, - ProposerAddress: proposerAddress, - DAStartHeight: 0, + ChainID: chainID, + StartTime: startTime, + InitialHeight: initialHeight, + ProposerAddress: proposerAddress, + DAStartHeight: 0, + DAEpochForcedInclusion: 50, // Default epoch size } return genesis @@ -54,5 +56,9 @@ func (g Genesis) Validate() error { return fmt.Errorf("proposer_address cannot be nil") } + if g.DAEpochForcedInclusion < 1 { + return fmt.Errorf("da_epoch_forced_inclusion must be at least 1, got %d", g.DAEpochForcedInclusion) + } + return nil } diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index a5c1d280d..da3cc14b1 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -72,50 +72,55 @@ func TestGenesis_Validate(t *testing.T) { { name: "valid genesis - chain ID can contain any character", genesis: Genesis{ - ChainID: "test@chain#123!", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test@chain#123!", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid - empty chain_id", genesis: Genesis{ - ChainID: "", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 0, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 0, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero time DA start height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: time.Time{}, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: time.Time{}, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - nil proposer address", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: nil, + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: nil, + DAEpochForcedInclusion: 1, }, wantErr: true, }, diff --git a/pkg/genesis/io_test.go b/pkg/genesis/io_test.go index fb6f22307..7c8b882a6 100644 --- a/pkg/genesis/io_test.go +++ b/pkg/genesis/io_test.go @@ -30,40 +30,44 @@ func TestLoadAndSaveGenesis(t *testing.T) { { name: "valid genesis", genesis: Genesis{ - ChainID: "test-chain-1", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-1", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "valid genesis - minimal", genesis: Genesis{ - ChainID: "test-chain-2", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-2", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid genesis - empty chain ID", genesis: Genesis{ - ChainID: "", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid genesis - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - InitialHeight: 0, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 0, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, @@ -177,10 +181,11 @@ func TestSaveGenesis_InvalidPath(t *testing.T) { } genesis := Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: time.Now().UTC(), - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().UTC(), + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, } err := genesis.Save(tc.path) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go new file mode 100644 index 000000000..763629200 --- /dev/null +++ b/sequencers/based/sequencer.go @@ -0,0 +1,185 @@ +package based + +import ( + "context" + "errors" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" +) + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) +} + +var _ coresequencer.Sequencer = (*BasedSequencer)(nil) + +// BasedSequencer is a sequencer that only retrieves transactions from the DA layer +// via the forced inclusion mechanism. It does not accept transactions from the reaper. +type BasedSequencer struct { + fiRetriever ForcedInclusionRetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + + daHeight atomic.Uint64 + txQueue [][]byte +} + +// NewBasedSequencer creates a new based sequencer instance +func NewBasedSequencer( + fiRetriever ForcedInclusionRetriever, + da coreda.DA, + config config.Config, + genesis genesis.Genesis, + logger zerolog.Logger, +) *BasedSequencer { + bs := &BasedSequencer{ + fiRetriever: fiRetriever, + da: da, + config: config, + genesis: genesis, + logger: logger.With().Str("component", "based_sequencer").Logger(), + txQueue: make([][]byte, 0), + } + bs.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor + + return bs +} + +// SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA +// This satisfies the Sequencer interface but transactions submitted here are ignored +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.SubmitBatchTxsRequest) (*coresequencer.SubmitBatchTxsResponse, error) { + s.logger.Debug().Msg("based sequencer ignores submitted transactions - only DA transactions are processed") + return &coresequencer.SubmitBatchTxsResponse{}, nil +} + +// GetNextBatch retrieves the next batch of transactions from the DA layer +// It fetches forced inclusion transactions and returns them as the next batch +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { + currentDAHeight := s.daHeight.Load() + + s.logger.Debug().Uint64("da_height", currentDAHeight).Msg("fetching forced inclusion transactions from DA") + + forcedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + // Check if forced inclusion is not configured + if errors.Is(err, block.ErrForceInclusionNotConfigured) { + s.logger.Error().Msg("forced inclusion not configured, returning empty batch") + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: nil}, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } else if errors.Is(err, coreda.ErrHeightFromFuture) { + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block + s.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + } else { + s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + return nil, err + } + } + + // Update DA height based on the retrieved event + if forcedTxsEvent.EndDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.EndDaHeight) + } else if forcedTxsEvent.StartDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.StartDaHeight) + } + + // Add forced inclusion transactions to the queue with validation + validTxs := 0 + skippedTxs := 0 + for _, tx := range forcedTxsEvent.Txs { + // Validate blob size against absolute maximum + if !seqcommon.ValidateBlobSize(tx) { + s.logger.Warn(). + Uint64("da_height", forcedTxsEvent.StartDaHeight). + Int("blob_size", len(tx)). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + skippedTxs++ + continue + } + s.txQueue = append(s.txQueue, tx) + validTxs++ + } + + s.logger.Info(). + Int("valid_tx_count", validTxs). + Int("skipped_tx_count", skippedTxs). + Int("queue_size", len(s.txQueue)). + Uint64("da_height_start", forcedTxsEvent.StartDaHeight). + Uint64("da_height_end", forcedTxsEvent.EndDaHeight). + Msg("processed forced inclusion transactions from DA") + + batch := s.createBatchFromQueue(req.MaxBytes) + + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil +} + +// createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { + if len(s.txQueue) == 0 { + return &coresequencer.Batch{Transactions: nil} + } + + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + // Always respect maxBytes, even for the first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, stop here + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // If this is the last transaction, clear the queue + if i == len(s.txQueue)-1 { + s.txQueue = s.txQueue[:0] + } + } + + return &coresequencer.Batch{Transactions: batch} +} + +// VerifyBatch verifies a batch of transactions +// For a based sequencer, we always return true as all transactions come from DA +func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBatchRequest) (*coresequencer.VerifyBatchResponse, error) { + return &coresequencer.VerifyBatchResponse{ + Status: true, + }, nil +} + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (c *BasedSequencer) SetDAHeight(height uint64) { + c.daHeight.Store(height) + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (c *BasedSequencer) GetDAHeight() uint64 { + return c.daHeight.Load() +} diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go new file mode 100644 index 000000000..5bb5acd2f --- /dev/null +++ b/sequencers/based/sequencer_test.go @@ -0,0 +1,569 @@ +package based + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" +) + +// MockDA is a mock implementation of DA for testing +type MockDA struct { + mock.Mock +} + +func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace, options) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + args := m.Called(ctx, height, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*coreda.GetIDsResult), args.Error(1) +} + +func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]coreda.Proof), args.Error(1) +} + +func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + args := m.Called(ctx, ids, proofs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]bool), args.Error(1) +} + +func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func TestNewBasedSequencer(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + require.NotNil(t, seq) + assert.Equal(t, uint64(100), seq.daHeight.Load()) + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Submit should succeed but be ignored + req := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + } + + resp, err := seq.SubmitBatchTxs(context.Background(), req) + + require.NoError(t, err) + require.NotNil(t, resp) + // Transactions should not be added to queue for based sequencer + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { + testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} + + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) + + // DA height should be updated + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + // Create config without forced inclusion namespace + cfgNoFI := config.DefaultConfig() + cfgNoFI.DA.ForcedInclusionNamespace = "" + daClient := block.NewDAClient(mockDA, cfgNoFI, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfgNoFI, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) +} + +func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { + testBlobs := [][]byte{ + make([]byte, 50), // 50 bytes + make([]byte, 60), // 60 bytes + make([]byte, 100), // 100 bytes + } + + mockDA := new(MockDA) + // First call returns forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil).Once() + + // Subsequent calls should return no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with max 100 bytes - should get first 2 txs (50 + 60 = 110, but logic allows if batch has content) + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) + assert.Equal(t, 1, len(resp.Batch.Transactions)) + assert.Equal(t, 2, len(seq.txQueue)) // 2 remaining in queue + + // Second call should get next tx from queue + resp2, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + assert.Equal(t, 1, len(seq.txQueue)) // 1 remaining in queue + + // Third call with larger maxBytes to get the 100-byte tx + req3 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + resp3, err := seq.GetNextBatch(context.Background(), req3) + require.NoError(t, err) + require.NotNil(t, resp3) + require.NotNil(t, resp3.Batch) + assert.Equal(t, 1, len(resp3.Batch.Transactions)) + assert.Equal(t, 0, len(seq.txQueue)) // Queue should be empty + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, mock.Anything, mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Pre-populate the queue + seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) + + // Queue should be empty now + assert.Equal(t, 0, len(seq.txQueue)) +} + +func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + mockDA := new(MockDA) + + // First call: return a forced tx that will be added to queue + forcedTx := make([]byte, 150) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() + + // Second call: no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 100 + // Forced tx (150 bytes) is added to queue, but batch will be empty since it exceeds maxBytes + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") + + // Verify forced tx is in queue + assert.Equal(t, 1, len(seq.txQueue), "Forced tx should be in queue") + + // Second call with larger maxBytes = 200 + // Should process tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include tx from queue") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T) { + mockDA := new(MockDA) + + // Return forced txs where combined they exceed maxBytes + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() + + // Second call + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 120 + // Should get only first forced tx (100 bytes), second stays in queue + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 1, len(resp1.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) + + // Verify second tx is still in queue + assert.Equal(t, 1, len(seq.txQueue), "Second tx should be in queue") + + // Second call - should get the second tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include second tx from queue") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_VerifyBatch(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.VerifyBatchRequest{ + Id: []byte("test-chain"), + BatchData: [][]byte{[]byte("tx1")}, + } + + resp, err := seq.VerifyBatch(context.Background(), req) + require.NoError(t, err) + assert.True(t, resp.Status) +} + +func TestBasedSequencer_SetDAHeight(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + seq.SetDAHeight(200) + assert.Equal(t, uint64(200), seq.GetDAHeight()) +} + +func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, errors.New("DA connection error")) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + _, err := seq.GetNextBatch(context.Background(), req) + require.Error(t, err) + + mockDA.AssertExpectations(t) +} diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go new file mode 100644 index 000000000..7484d3a54 --- /dev/null +++ b/sequencers/common/size_validation.go @@ -0,0 +1,27 @@ +package common + +// TODO(@julienrbrt): technically we may need to check for block gas as well + +const ( + // AbsoluteMaxBlobSize is the absolute maximum size for a single blob (DA layer limit). + // Blobs exceeding this size are invalid and should be rejected permanently. + AbsoluteMaxBlobSize = 2 * 1024 * 1024 // 2MB +) + +// ValidateBlobSize checks if a single blob exceeds the absolute maximum allowed size. +// This checks against the DA layer limit, not the per-batch limit. +// Returns true if the blob is within the absolute size limit, false otherwise. +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks if adding a blob would exceed the cumulative size limit for a batch. +// Returns true if adding the blob would exceed the limit, false otherwise. +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} + +// GetBlobSize returns the size of a blob in bytes. +func GetBlobSize(blob []byte) int { + return len(blob) +} diff --git a/sequencers/common/size_validation_test.go b/sequencers/common/size_validation_test.go new file mode 100644 index 000000000..103c66d8b --- /dev/null +++ b/sequencers/common/size_validation_test.go @@ -0,0 +1,141 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want bool + }{ + { + name: "empty blob", + blobSize: 0, + want: true, + }, + { + name: "small blob", + blobSize: 100, + want: true, + }, + { + name: "exactly at limit", + blobSize: int(AbsoluteMaxBlobSize), + want: true, + }, + { + name: "one byte over limit", + blobSize: int(AbsoluteMaxBlobSize) + 1, + want: false, + }, + { + name: "far exceeds limit", + blobSize: int(AbsoluteMaxBlobSize) * 2, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := ValidateBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestWouldExceedCumulativeSize(t *testing.T) { + tests := []struct { + name string + currentSize int + blobSize int + maxBytes uint64 + want bool + }{ + { + name: "empty batch, small blob", + currentSize: 0, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would fit exactly", + currentSize: 50, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would exceed by one byte", + currentSize: 50, + blobSize: 51, + maxBytes: 100, + want: true, + }, + { + name: "far exceeds", + currentSize: 80, + blobSize: 100, + maxBytes: 100, + want: true, + }, + { + name: "zero max bytes", + currentSize: 0, + blobSize: 1, + maxBytes: 0, + want: true, + }, + { + name: "current already at limit", + currentSize: 100, + blobSize: 1, + maxBytes: 100, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := WouldExceedCumulativeSize(tt.currentSize, tt.blobSize, tt.maxBytes) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want int + }{ + { + name: "empty blob", + blobSize: 0, + want: 0, + }, + { + name: "small blob", + blobSize: 42, + want: 42, + }, + { + name: "large blob", + blobSize: 1024 * 1024, + want: 1024 * 1024, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := GetBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/sequencers/single/queue.go b/sequencers/single/queue.go index dd69c26a2..d992535ea 100644 --- a/sequencers/single/queue.go +++ b/sequencers/single/queue.go @@ -83,6 +83,26 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch coresequencer.Batch) e return nil } +// Prepend adds a batch to the front of the queue (before head position). +// This is used to return transactions that couldn't fit in the current batch. +// The batch is NOT persisted to the DB since these are transactions that were +// already in the queue or were just processed. +func (bq *BatchQueue) Prepend(ctx context.Context, batch coresequencer.Batch) error { + bq.mu.Lock() + defer bq.mu.Unlock() + + // If we have room before head, use it + if bq.head > 0 { + bq.head-- + bq.queue[bq.head] = batch + } else { + // Need to expand the queue at the front + bq.queue = append([]coresequencer.Batch{batch}, bq.queue...) + } + + return nil +} + // Next extracts a batch of transactions from the queue and marks it as processed in the WAL func (bq *BatchQueue) Next(ctx context.Context) (*coresequencer.Batch, error) { bq.mu.Lock() diff --git a/sequencers/single/queue_test.go b/sequencers/single/queue_test.go index 0ede59a90..b7665ee67 100644 --- a/sequencers/single/queue_test.go +++ b/sequencers/single/queue_test.go @@ -12,6 +12,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" dssync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -567,3 +568,156 @@ func TestBatchQueue_QueueLimit_Concurrency(t *testing.T) { t.Logf("Successfully added %d batches, rejected %d due to queue being full", addedCount, errorCount) } + +func TestBatchQueue_Prepend(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + + t.Run("prepend to empty queue", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-empty", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + batch := coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + } + + err = queue.Prepend(ctx, batch) + require.NoError(t, err) + + assert.Equal(t, 1, queue.Size()) + + // Next should return the prepended batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 2, len(nextBatch.Transactions)) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) + + t.Run("prepend to queue with items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-with-items", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add some batches first + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + + assert.Equal(t, 2, queue.Size()) + + // Prepend a batch + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Next should return the prepended batch first + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(nextBatch.Transactions)) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + // Then the original batches + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + }) + + t.Run("prepend after consuming some items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-after-consume", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add batches + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + batch3 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx3")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch3) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Consume first batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + assert.Equal(t, 2, queue.Size()) + + // Prepend - should reuse the head position + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Should get prepended, then tx2, then tx3 + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), nextBatch.Transactions[0]) + + assert.Equal(t, 0, queue.Size()) + }) + + t.Run("multiple prepends", func(t *testing.T) { + queue := NewBatchQueue(db, "test-multiple-prepends", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add a batch + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + + // Prepend multiple batches + prepend1 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend1")}} + prepend2 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend2")}} + prepend3 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend3")}} + + err = queue.Prepend(ctx, prepend1) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend2) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend3) + require.NoError(t, err) + + assert.Equal(t, 4, queue.Size()) + + // Should get in reverse order of prepending (LIFO for prepended items) + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend3"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) +} diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index dbc5bc567..e97d7a157 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -5,21 +5,36 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" ) -// ErrInvalidId is returned when the chain id is invalid var ( + // ErrInvalidId is returned when the chain id is invalid ErrInvalidId = errors.New("invalid chain id") ) -var _ coresequencer.Sequencer = &Sequencer{} +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) +} + +// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 +} + +var _ coresequencer.Sequencer = (*Sequencer)(nil) // Sequencer implements core sequencing interface type Sequencer struct { @@ -35,6 +50,12 @@ type Sequencer struct { queue *BatchQueue // single queue for immediate availability metrics *Metrics + + // Forced inclusion support + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx } // NewSequencer creates a new Single Sequencer @@ -47,31 +68,23 @@ func NewSequencer( batchTime time.Duration, metrics *Metrics, proposer bool, -) (*Sequencer, error) { - return NewSequencerWithQueueSize(ctx, logger, db, da, id, batchTime, metrics, proposer, 1000) -} - -// NewSequencerWithQueueSize creates a new Single Sequencer with configurable queue size -func NewSequencerWithQueueSize( - ctx context.Context, - logger zerolog.Logger, - db ds.Batching, - da coreda.DA, - id []byte, - batchTime time.Duration, - metrics *Metrics, - proposer bool, maxQueueSize int, + fiRetriever ForcedInclusionRetriever, + gen genesis.Genesis, ) (*Sequencer, error) { s := &Sequencer{ - logger: logger, - da: da, - batchTime: batchTime, - Id: id, - queue: NewBatchQueue(db, "batches", maxQueueSize), - metrics: metrics, - proposer: proposer, + logger: logger, + da: da, + batchTime: batchTime, + Id: id, + queue: NewBatchQueue(db, "batches", maxQueueSize), + metrics: metrics, + proposer: proposer, + fiRetriever: fiRetriever, + genesis: gen, + pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), } + s.SetDAHeight(gen.DAStartHeight) // will be overridden by the executor loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -117,14 +130,97 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB return nil, ErrInvalidId } + currentDAHeight := c.daHeight.Load() + + forcedEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + // Continue without forced txs. Add logging for clarity. + + if errors.Is(err, coreda.ErrHeightFromFuture) { + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + } else if !errors.Is(err, block.ErrForceInclusionNotConfigured) { + c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + } + + // Still create an empty forced inclusion event + forcedEvent = &block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: currentDAHeight, + EndDaHeight: currentDAHeight, + } + } + + // Always try to process forced inclusion transactions (including pending from previous epochs) + forcedTxs := c.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + if forcedEvent.EndDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.EndDaHeight) + } else if forcedEvent.StartDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.StartDaHeight) + } + + c.logger.Debug(). + Int("tx_count", len(forcedTxs)). + Uint64("da_height_start", forcedEvent.StartDaHeight). + Uint64("da_height_end", forcedEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") + + // Calculate size used by forced inclusion transactions + forcedTxsSize := 0 + for _, tx := range forcedTxs { + forcedTxsSize += len(tx) + } + batch, err := c.queue.Next(ctx) if err != nil { return nil, err } + // Prepend forced inclusion transactions to the batch + // and ensure total size doesn't exceed maxBytes + if len(forcedTxs) > 0 { + // Trim batch transactions to fit within maxBytes + remainingBytes := int(req.MaxBytes) - forcedTxsSize + trimmedBatchTxs := make([][]byte, 0, len(batch.Transactions)) + currentBatchSize := 0 + + for i, tx := range batch.Transactions { + txSize := len(tx) + if currentBatchSize+txSize > remainingBytes { + // Would exceed limit, return remaining txs to the front of the queue + excludedBatch := coresequencer.Batch{Transactions: batch.Transactions[i:]} + if err := c.queue.Prepend(ctx, excludedBatch); err != nil { + c.logger.Error().Err(err). + Int("excluded_count", len(batch.Transactions)-i). + Msg("failed to prepend excluded transactions back to queue") + } else { + c.logger.Debug(). + Int("excluded_count", len(batch.Transactions)-i). + Msg("returned excluded batch transactions to front of queue") + } + break + } + trimmedBatchTxs = append(trimmedBatchTxs, tx) + currentBatchSize += txSize + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + + c.logger.Debug(). + Int("forced_tx_count", len(forcedTxs)). + Int("forced_txs_size", forcedTxsSize). + Int("batch_tx_count", len(trimmedBatchTxs)). + Int("batch_size", currentBatchSize). + Int("total_tx_count", len(batch.Transactions)). + Int("total_size", forcedTxsSize+currentBatchSize). + Msg("combined forced inclusion and batch transactions") + } + return &coresequencer.GetNextBatchResponse{ Batch: batch, Timestamp: time.Now(), + BatchData: req.LastBatchData, }, nil } @@ -171,3 +267,106 @@ func (c *Sequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBat func (c *Sequencer) isValid(Id []byte) bool { return bytes.Equal(c.Id, Id) } + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (c *Sequencer) SetDAHeight(height uint64) { + c.daHeight.Store(height) + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (c *Sequencer) GetDAHeight() uint64 { + return c.daHeight.Load() +} + +// processForcedInclusionTxs processes forced inclusion transactions with size validation and pending queue management +func (c *Sequencer) processForcedInclusionTxs(event *block.ForcedInclusionEvent, maxBytes uint64) [][]byte { + currentSize := 0 + var newPendingTxs []pendingForcedInclusionTx + var validatedTxs [][]byte + + // First, process any pending transactions from previous epochs + for _, pendingTx := range c.pendingForcedInclusionTxs { + txSize := seqcommon.GetBlobSize(pendingTx.Data) + + if !seqcommon.ValidateBlobSize(pendingTx.Data) { + c.logger.Warn(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Msg("pending forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("pending blob would exceed max size for this epoch - deferring again") + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += txSize + + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed pending forced inclusion transaction") + } + + // Now process new transactions from this epoch + for _, tx := range event.Txs { + txSize := seqcommon.GetBlobSize(tx) + + if !seqcommon.ValidateBlobSize(tx) { + c.logger.Warn(). + Uint64("da_height", event.StartDaHeight). + Int("blob_size", txSize). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("da_height", event.StartDaHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("blob would exceed max size for this epoch - deferring to pending queue") + + // Store for next call + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + + validatedTxs = append(validatedTxs, tx) + currentSize += txSize + + c.logger.Debug(). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed forced inclusion transaction") + } + + // Update pending queue + c.pendingForcedInclusionTxs = newPendingTxs + if len(newPendingTxs) > 0 { + c.logger.Info(). + Int("new_pending_count", len(newPendingTxs)). + Msg("stored pending forced inclusion transactions for next epoch") + } + + c.logger.Info(). + Int("processed_tx_count", len(validatedTxs)). + Int("pending_tx_count", len(newPendingTxs)). + Int("current_size", currentSize). + Msg("completed processing forced inclusion transactions") + + return validatedTxs +} diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 5362b4904..90da6fb3a 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,11 +13,26 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" damocks "github.com/evstack/ev-node/test/mocks" ) +// MockForcedInclusionRetriever is a mock implementation of DARetriever for testing +type MockForcedInclusionRetriever struct { + mock.Mock +} + +func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) { + args := m.Called(ctx, daHeight) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) +} + func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) @@ -26,7 +41,10 @@ func TestNewSequencer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -59,7 +77,10 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -112,7 +133,10 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") defer func() { err := db.Close() @@ -152,10 +176,14 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -188,10 +216,14 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -247,13 +279,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Proposer Mode", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: true, - da: mockDA, - queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + logger: logger, + Id: Id, + proposer: true, + da: mockDA, + queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + fiRetriever: mockRetriever, } res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: seq.Id, BatchData: batchData}) @@ -269,12 +305,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Valid Proofs", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "valid_proofs_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "valid_proofs_queue", 0), + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -290,12 +330,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid Proof", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_proof_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_proof_queue", 0), + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -311,12 +355,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("GetProofs Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "getproofs_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "getproofs_err_queue", 0), + fiRetriever: mockRetriever, } expectedErr := errors.New("get proofs failed") @@ -333,12 +381,16 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Validate Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "validate_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "validate_err_queue", 0), + fiRetriever: mockRetriever, } expectedErr := errors.New("validate failed") @@ -355,13 +407,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid ID", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_queue", 0), + fiRetriever: mockRetriever, } invalidId := []byte("invalid") @@ -385,7 +441,10 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -431,6 +490,254 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { mockDA.AssertExpectations(t) } +func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + // Create in-memory datastore + db := ds.NewMapDatastore() + + // Create mock forced inclusion retriever with txs that are 50 bytes each + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 50) + forcedTx2 := make([]byte, 60) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, // Total 110 bytes + StartDaHeight: 100, + EndDaHeight: 100, + }, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit batch txs that are 40 bytes each + batchTx1 := make([]byte, 40) + batchTx2 := make([]byte, 40) + batchTx3 := make([]byte, 40) + + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx1, batchTx2, batchTx3}, // Total 120 bytes + }, + } + + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // Request batch with maxBytes = 150 + // Forced inclusion: 110 bytes (50 + 60) + // Batch txs: 120 bytes (40 + 40 + 40) + // Combined would be 230 bytes, exceeds 150 + // Should return forced txs + only 1 batch tx (110 + 40 = 150) + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 150, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + + // Should have forced txs (2) + partial batch txs + // Total size should not exceed 150 bytes + totalSize := 0 + for _, tx := range resp.Batch.Transactions { + totalSize += len(tx) + } + assert.LessOrEqual(t, totalSize, 150, "Total batch size should not exceed maxBytes") + + // First 2 txs should be forced inclusion txs + assert.GreaterOrEqual(t, len(resp.Batch.Transactions), 2, "Should have at least forced inclusion txs") + assert.Equal(t, forcedTx1, resp.Batch.Transactions[0]) + assert.Equal(t, forcedTx2, resp.Batch.Transactions[1]) + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + // Create forced inclusion txs where combined they exceed maxBytes + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) // This would be deferred + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call should process pending tx + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Request batch with maxBytes = 120 + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 120, + LastBatchData: nil, + } + + // First call - should get only first forced tx (100 bytes) + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp.Batch.Transactions[0])) + + // Verify pending tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Second tx should be pending") + + // Second call - should get the pending forced tx + resp2, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + mockFI := &MockForcedInclusionRetriever{} + + // First call returns a large forced tx that gets deferred + largeForcedTx := make([]byte, 150) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{largeForcedTx}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call returns no new forced txs, but pending should still be processed + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit a batch tx + batchTx := make([]byte, 50) + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx}, + }, + } + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // First call with maxBytes = 100 + // Large forced tx (150 bytes) won't fit, gets deferred + // Batch tx (50 bytes) should be returned + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should have batch tx only") + assert.Equal(t, 50, len(resp.Batch.Transactions[0])) + + // Verify pending forced tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Large forced tx should be pending") + + // Second call with larger maxBytes = 200 + // Should process pending forced tx first + getReq2 := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(ctx, getReq2) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + // TestSequencer_RecordMetrics tests the RecordMetrics method to ensure it properly updates metrics. func TestSequencer_RecordMetrics(t *testing.T) { t.Run("With Metrics", func(t *testing.T) { @@ -523,16 +830,20 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { defer db.Close() mockDA := &damocks.MockDA{} + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() // Create a sequencer with a small queue limit for testing logger := zerolog.Nop() seq := &Sequencer{ - logger: logger, - da: mockDA, - batchTime: time.Second, - Id: []byte("test"), - queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing - proposer: true, + logger: logger, + da: mockDA, + batchTime: time.Second, + Id: []byte("test"), + queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing + proposer: true, + fiRetriever: mockRetriever, } ctx := context.Background() @@ -641,7 +952,10 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Create sequencer with small queue size to trigger throttling quickly queueSize := 3 // Small for testing logger := zerolog.Nop() - seq, err := NewSequencerWithQueueSize( + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer( context.Background(), logger, db, @@ -651,6 +965,8 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { nil, // metrics true, // proposer queueSize, + mockRetriever, // fiRetriever + genesis.Genesis{}, // genesis ) require.NoError(t, err) diff --git a/test/mocks/sequencer.go b/test/mocks/sequencer.go index c3894f846..e1ef0afb4 100644 --- a/test/mocks/sequencer.go +++ b/test/mocks/sequencer.go @@ -38,6 +38,50 @@ func (_m *MockSequencer) EXPECT() *MockSequencer_Expecter { return &MockSequencer_Expecter{mock: &_m.Mock} } +// GetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) GetDAHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetDAHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// MockSequencer_GetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDAHeight' +type MockSequencer_GetDAHeight_Call struct { + *mock.Call +} + +// GetDAHeight is a helper method to define mock.On call +func (_e *MockSequencer_Expecter) GetDAHeight() *MockSequencer_GetDAHeight_Call { + return &MockSequencer_GetDAHeight_Call{Call: _e.mock.On("GetDAHeight")} +} + +func (_c *MockSequencer_GetDAHeight_Call) Run(run func()) *MockSequencer_GetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) Return(v uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) RunAndReturn(run func() uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(run) + return _c +} + // GetNextBatch provides a mock function for the type MockSequencer func (_mock *MockSequencer) GetNextBatch(ctx context.Context, req sequencer.GetNextBatchRequest) (*sequencer.GetNextBatchResponse, error) { ret := _mock.Called(ctx, req) @@ -106,6 +150,46 @@ func (_c *MockSequencer_GetNextBatch_Call) RunAndReturn(run func(ctx context.Con return _c } +// SetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) SetDAHeight(height uint64) { + _mock.Called(height) + return +} + +// MockSequencer_SetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDAHeight' +type MockSequencer_SetDAHeight_Call struct { + *mock.Call +} + +// SetDAHeight is a helper method to define mock.On call +// - height uint64 +func (_e *MockSequencer_Expecter) SetDAHeight(height interface{}) *MockSequencer_SetDAHeight_Call { + return &MockSequencer_SetDAHeight_Call{Call: _e.mock.On("SetDAHeight", height)} +} + +func (_c *MockSequencer_SetDAHeight_Call) Run(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) Return() *MockSequencer_SetDAHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) RunAndReturn(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Run(run) + return _c +} + // SubmitBatchTxs provides a mock function for the type MockSequencer func (_mock *MockSequencer) SubmitBatchTxs(ctx context.Context, req sequencer.SubmitBatchTxsRequest) (*sequencer.SubmitBatchTxsResponse, error) { ret := _mock.Called(ctx, req) diff --git a/types/CLAUDE.md b/types/CLAUDE.md index 9cd5496e5..aafdd289a 100644 --- a/types/CLAUDE.md +++ b/types/CLAUDE.md @@ -77,17 +77,16 @@ The types package defines the core data structures and types used throughout ev- - Signature verification - Identity validation -### DA Integration (`da.go`, `da_test.go`) +### DA Integration -- **Purpose**: Data Availability layer helpers -- **Key Functions**: - - `SubmitWithHelpers`: DA submission with error handling +- **Purpose**: Data Availability layer helpers moved to `block/internal/da` package +- **See**: `block/internal/da/client.go` for DA submission and retrieval logic - **Key Features**: - - Error mapping to status codes + - Error mapping to status codes (in DA Client) - Namespace support - Gas price configuration - Submission options handling -- **Status Codes**: +- **Status Codes** (defined in `core/da`): - `StatusContextCanceled`: Submission canceled - `StatusNotIncludedInBlock`: Transaction timeout - `StatusAlreadyInMempool`: Duplicate transaction diff --git a/types/epoch.go b/types/epoch.go new file mode 100644 index 000000000..75d43e804 --- /dev/null +++ b/types/epoch.go @@ -0,0 +1,50 @@ +package types + +// CalculateEpochNumber returns the deterministic epoch number for a given DA height. +// Epoch 1 starts at daStartHeight. +// +// Parameters: +// - daHeight: The DA height to calculate the epoch for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means all blocks in epoch 1) +// +// Returns: +// - Epoch number (0 if before daStartHeight, 1+ otherwise) +func CalculateEpochNumber(daHeight, daStartHeight, daEpochSize uint64) uint64 { + if daHeight < daStartHeight { + return 0 + } + + if daEpochSize == 0 { + return 1 + } + + return ((daHeight - daStartHeight) / daEpochSize) + 1 +} + +// CalculateEpochBoundaries returns the start and end DA heights for the epoch +// containing the given DA height. The boundaries are inclusive. +// +// Parameters: +// - daHeight: The DA height to calculate boundaries for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means single epoch) +// +// Returns: +// - start: The first DA height in the epoch (inclusive) +// - end: The last DA height in the epoch (inclusive) +func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end uint64) { + if daEpochSize == 0 { + return daStartHeight, daStartHeight + } + + if daHeight < daStartHeight { + return daStartHeight, daStartHeight + daEpochSize - 1 + } + + epochNum := CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) + start = daStartHeight + (epochNum-1)*daEpochSize + end = daStartHeight + epochNum*daEpochSize - 1 + + return start, end +} diff --git a/types/epoch_test.go b/types/epoch_test.go new file mode 100644 index 000000000..295395d7b --- /dev/null +++ b/types/epoch_test.go @@ -0,0 +1,300 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateEpochNumber(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedEpoch uint64 + }{ + { + name: "first epoch - start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 100, + expectedEpoch: 1, + }, + { + name: "first epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedEpoch: 1, + }, + { + name: "first epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 109, + expectedEpoch: 1, + }, + { + name: "second epoch - start", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedEpoch: 2, + }, + { + name: "second epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 115, + expectedEpoch: 2, + }, + { + name: "tenth epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 195, + expectedEpoch: 10, + }, + { + name: "before start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedEpoch: 0, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedEpoch: 1, + }, + { + name: "large epoch size", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 2500, + expectedEpoch: 2, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedEpoch: 3, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedEpoch: 6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + epoch := CalculateEpochNumber(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedEpoch, epoch) + }) + } +} + +func TestCalculateEpochBoundaries(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + { + name: "first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "second epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedStart: 110, + expectedEnd: 119, + }, + { + name: "third epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 129, + expectedStart: 120, + expectedEnd: 129, + }, + { + name: "before start height returns first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "before start height with zero epoch size", + daStartHeight: 2, + daEpochSize: 0, + daHeight: 1, + expectedStart: 2, + expectedEnd: 2, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedStart: 100, + expectedEnd: 100, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 1500, + expectedStart: 1000, + expectedEnd: 1999, + }, + { + name: "epoch boundary exact start", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 100, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact end of first epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 149, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact start of second epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 150, + expectedStart: 150, + expectedEnd: 199, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedStart: 10, + expectedEnd: 14, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedStart: 105, + expectedEnd: 105, + }, + { + name: "very large numbers", + daStartHeight: 1000000, + daEpochSize: 100000, + daHeight: 5500000, + expectedStart: 5500000, + expectedEnd: 5599999, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + start, end := CalculateEpochBoundaries(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedStart, start, "start height mismatch") + assert.Equal(t, tt.expectedEnd, end, "end height mismatch") + }) + } +} + +func TestEpochConsistency(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + }{ + { + name: "standard epoch", + daStartHeight: 100, + daEpochSize: 10, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + }, + { + name: "small epoch", + daStartHeight: 0, + daEpochSize: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test that all heights in an epoch return the same epoch number + // and boundaries + for epoch := uint64(1); epoch <= 10; epoch++ { + // Calculate expected boundaries for this epoch + expectedStart := tt.daStartHeight + (epoch-1)*tt.daEpochSize + expectedEnd := tt.daStartHeight + epoch*tt.daEpochSize - 1 + + // Test every height in the epoch + for h := expectedStart; h <= expectedEnd; h++ { + epochNum := CalculateEpochNumber(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, epoch, epochNum, "height %d should be in epoch %d", h, epoch) + + start, end := CalculateEpochBoundaries(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, expectedStart, start, "height %d should have start %d", h, expectedStart) + assert.Equal(t, expectedEnd, end, "height %d should have end %d", h, expectedEnd) + } + } + }) + } +} + +func TestEpochBoundaryTransitions(t *testing.T) { + daStartHeight := uint64(100) + daEpochSize := uint64(10) + + // Test that epoch boundaries are correctly calculated at transitions + transitions := []struct { + height uint64 + expectedEpoch uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + {100, 1, 100, 109}, // First height of epoch 1 + {109, 1, 100, 109}, // Last height of epoch 1 + {110, 2, 110, 119}, // First height of epoch 2 + {119, 2, 110, 119}, // Last height of epoch 2 + {120, 3, 120, 129}, // First height of epoch 3 + } + + for _, tr := range transitions { + epoch := CalculateEpochNumber(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedEpoch, epoch, "height %d epoch mismatch", tr.height) + + start, end := CalculateEpochBoundaries(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedStart, start, "height %d start mismatch", tr.height) + assert.Equal(t, tr.expectedEnd, end, "height %d end mismatch", tr.height) + } +} diff --git a/types/state.go b/types/state.go index a439f6c34..4b87dc6b5 100644 --- a/types/state.go +++ b/types/state.go @@ -30,7 +30,8 @@ type State struct { // LastHeaderHash is the hash of the header of the last block LastHeaderHash Hash - // DAHeight identifies DA block containing the latest applied Evolve block. + // DAHeight identifies DA block containing the latest applied Evolve block for a syncing node. + // In the case of an aggregator, this corresponds as the last fetched DA block height for forced inclused transactions. DAHeight uint64 // the latest AppHash we've received from calling abci.Commit()