From 71fe78f27ffa1bae1152ef9b5192573c8e9962ce Mon Sep 17 00:00:00 2001 From: Rampey Date: Mon, 27 May 2024 15:32:08 -0500 Subject: [PATCH] refactor: switch to custom DA client interface (#11) * refactor: switch to custom DA client interface - .go -> client.go for consistency * chore: add docker-compose and smol tweaks * refactor: remove 'da' suffix and rename to `client.go` * forge install: blobstream-contracts v4.1.0 * refactor: DABuilder -> ClientBuilder --- .gitmodules | 14 +- availda/availda.go => avail/client.go | 63 +++----- .../bindings/AvailBridge/AvailBridge.go | 0 .../bindings/AvailBridge/AvailBridge.json | 0 .../bindings/VectorVerifier/VectorVerifier.go | 0 .../contracts/.github/workflows/test.yml | 0 .../verify/contracts/.gitignore | 0 {availda => avail}/verify/contracts/README.md | 0 .../verify/contracts/foundry.toml | 0 .../verify/contracts/script/Deploy.s.sol | 0 .../verify/contracts/src/VectorVerifier.sol | 0 .../contracts/src/interfaces/IAvailBridge.sol | 0 .../contracts/src/interfaces/IVectorX.sol | 0 {availda => avail}/verify/verifier.go | 19 ++- availda/verify/contracts/lib/forge-std | 1 - celestiada/celestia.go => celestia/client.go | 74 ++++----- .../BlobstreamVerifier/BlobstreamVerifier.go | 0 .../bindings/BlobstreamX/BlobstreamX.go | 0 {celestiada => celestia}/verify/commitment.go | 2 +- .../contracts/.github/workflows/test.yml | 0 .../verify/contracts/.gitignore | 0 .../verify/contracts/README.md | 0 .../verify/contracts/foundry.toml | 0 .../verify/contracts/lib/blobstream-contracts | 0 .../contracts/src/BlobstreamVerifier.sol | 0 {celestiada => celestia}/verify/convert.go | 2 +- {celestiada => celestia}/verify/proof.go | 2 +- {celestiada => celestia}/verify/verifier.go | 12 +- celestiada/verify/contracts/lib/forge-std | 1 - cmd/blob-server/main.go | 56 +++---- da/interface.go | 44 +++++ daash.go | 69 ++++---- docker-compose.yml | 32 ++++ eigenda/eigenda.go => eigen/client.go | 35 ++-- {eigenda => eigen}/disperser.pb.go | 2 +- {eigenda => eigen}/disperser_grpc.pb.go | 2 +- go.mod | 1 - go.sum | 2 - mock/client.go | 153 ++++++++++++++++++ 39 files changed, 392 insertions(+), 194 deletions(-) rename availda/availda.go => avail/client.go (85%) rename {availda => avail}/verify/bindings/AvailBridge/AvailBridge.go (100%) rename {availda => avail}/verify/bindings/AvailBridge/AvailBridge.json (100%) rename {availda => avail}/verify/bindings/VectorVerifier/VectorVerifier.go (100%) rename {availda => avail}/verify/contracts/.github/workflows/test.yml (100%) rename {availda => avail}/verify/contracts/.gitignore (100%) rename {availda => avail}/verify/contracts/README.md (100%) rename {availda => avail}/verify/contracts/foundry.toml (100%) rename {availda => avail}/verify/contracts/script/Deploy.s.sol (100%) rename {availda => avail}/verify/contracts/src/VectorVerifier.sol (100%) rename {availda => avail}/verify/contracts/src/interfaces/IAvailBridge.sol (100%) rename {availda => avail}/verify/contracts/src/interfaces/IVectorX.sol (100%) rename {availda => avail}/verify/verifier.go (87%) delete mode 160000 availda/verify/contracts/lib/forge-std rename celestiada/celestia.go => celestia/client.go (68%) rename {celestiada => celestia}/verify/bindings/BlobstreamVerifier/BlobstreamVerifier.go (100%) rename {celestiada => celestia}/verify/bindings/BlobstreamX/BlobstreamX.go (100%) rename {celestiada => celestia}/verify/commitment.go (97%) rename {celestiada => celestia}/verify/contracts/.github/workflows/test.yml (100%) rename {celestiada => celestia}/verify/contracts/.gitignore (100%) rename {celestiada => celestia}/verify/contracts/README.md (100%) rename {celestiada => celestia}/verify/contracts/foundry.toml (100%) rename {celestiada => celestia}/verify/contracts/lib/blobstream-contracts (100%) rename {celestiada => celestia}/verify/contracts/src/BlobstreamVerifier.sol (100%) rename {celestiada => celestia}/verify/convert.go (97%) rename {celestiada => celestia}/verify/proof.go (96%) rename {celestiada => celestia}/verify/verifier.go (88%) delete mode 160000 celestiada/verify/contracts/lib/forge-std create mode 100644 da/interface.go create mode 100644 docker-compose.yml rename eigenda/eigenda.go => eigen/client.go (86%) rename {eigenda => eigen}/disperser.pb.go (99%) rename {eigenda => eigen}/disperser_grpc.pb.go (99%) create mode 100644 mock/client.go diff --git a/.gitmodules b/.gitmodules index 97011dd..f10ca30 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,9 @@ -[submodule "celestiada/verify/contracts/lib/forge-std"] - path = celestiada/verify/contracts/lib/forge-std +[submodule "celestia/verify/contracts/lib/forge-std"] + path = celestia/verify/contracts/lib/forge-std url = https://github.com/foundry-rs/forge-std -[submodule "celestiada/verify/contracts/lib/blobstream-contracts"] - path = celestiada/verify/contracts/lib/blobstream-contracts - url = https://github.com/celestiaorg/blobstream-contracts -[submodule "availda/verify/contracts/lib/forge-std"] - path = availda/verify/contracts/lib/forge-std +[submodule "avail/verify/contracts/lib/forge-std"] + path = avail/verify/contracts/lib/forge-std url = https://github.com/foundry-rs/forge-std +[submodule "celestia/verify/contracts/lib/blobstream-contracts"] + path = celestia/verify/contracts/lib/blobstream-contracts + url = https://github.com/celestiaorg/blobstream-contracts diff --git a/availda/availda.go b/avail/client.go similarity index 85% rename from availda/availda.go rename to avail/client.go index 3850b70..772c599 100644 --- a/availda/availda.go +++ b/avail/client.go @@ -1,4 +1,4 @@ -package availda +package avail import ( "context" @@ -13,12 +13,10 @@ import ( "log" - "encoding/binary" - gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" "github.com/centrifuge/go-substrate-rpc-client/v4/signature" "github.com/centrifuge/go-substrate-rpc-client/v4/types" - "github.com/rollkit/go-da" + "github.com/stackrlabs/go-daash/da" "go.uber.org/zap" "golang.org/x/crypto/sha3" ) @@ -45,7 +43,7 @@ type DataProof struct { } `json:"roots"` } -type DAClient struct { +type Client struct { Config Config API *gsrpc.SubstrateAPI Meta *types.Metadata @@ -58,8 +56,8 @@ type DAClient struct { } // Returns a newly initalised Avail DA client -func New(configPath string) (*DAClient, error) { - a := DAClient{} +func NewClient(configPath string) (*Client, error) { + a := Client{} err := a.Config.GetConfig(configPath) if err != nil { return nil, fmt.Errorf("cannot get config", err) @@ -112,14 +110,14 @@ func New(configPath string) (*DAClient, error) { } // MaxBlobSize returns the max blob size -func (c *DAClient) MaxBlobSize(ctx context.Context) (uint64, error) { +func (c *Client) MaxBlobSize(ctx context.Context) (uint64, error) { var maxBlobSize uint64 = 64 * 64 * 500 return maxBlobSize, nil } // Submit a list of blobs to Avail DA // Currently, we submit to a trusted RPC Avail node. In the future, we will submit via an Avail light client. -func (a *DAClient) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float64) ([]da.ID, []da.Proof, error) { +func (a *Client) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float64) ([]da.ID, []da.Proof, error) { // TODO: Add support for multiple blobs daBlob := daBlobs[0] log.Println("data", zap.Any("data", daBlob)) @@ -243,7 +241,7 @@ out: } dataProof := dataProofResp.Result.DataProof // NOTE: Substrate's BlockNumber type is an alias for u32 type, which is uint32 - blobID := MakeID(uint32(block.Block.Header.Number), extIndex) + blobID := ID{Height: uint64(block.Block.Header.Number), ExtIndex: uint32(extIndex)} blobIDs := make([]da.ID, 1) blobIDs[0] = blobID @@ -257,7 +255,7 @@ out: } // Get returns Blob for each given ID, or an error. -func (a *DAClient) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { +func (a *Client) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { // TODO: We are dealing with single blobs for now. We will need to handle multiple blobs in the future. ext, err := a.GetExtrinsic(ids[0]) if err != nil { @@ -269,19 +267,19 @@ func (a *DAClient) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { } // GetIDs returns IDs of all Blobs located in DA at given height. -func (a *DAClient) GetIDs(ctx context.Context, height uint64) ([]da.ID, error) { +func (a *Client) GetIDs(ctx context.Context, height uint64) ([]da.ID, error) { // TODO: Need to implement this return nil, nil } // Commit creates a Commitment for each given Blob. -func (a *DAClient) Commit(ctx context.Context, daBlobs []da.Blob) ([]da.Commitment, error) { +func (a *Client) Commit(ctx context.Context, daBlobs []da.Blob) ([]da.Commitment, error) { // TODO: Need to implement this return nil, nil } // GetProofs returns the proofs for the given IDs -func (a *DAClient) GetProof(ctx context.Context, blockHeight uint32, extIdx int) (DataProofRPCResponse, error) { +func (a *Client) GetProof(ctx context.Context, blockHeight uint32, extIdx int) (DataProofRPCResponse, error) { var dataProofResp DataProofRPCResponse blockHash, err := a.API.RPC.Chain.GetBlockHash(uint64(blockHeight)) if err != nil { @@ -311,7 +309,7 @@ func (a *DAClient) GetProof(ctx context.Context, blockHeight uint32, extIdx int) } // Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. -func (c *DAClient) Validate(ctx context.Context, ids []da.ID, daProofs []da.Proof) ([]bool, error) { +func (c *Client) Validate(ctx context.Context, ids []da.ID, daProofs []da.Proof) ([]bool, error) { // TODO: Need to implement this return nil, nil } @@ -327,7 +325,7 @@ func (b BatchDAData) IsEmpty() bool { return reflect.DeepEqual(b, BatchDAData{}) } -func (a *DAClient) GetAccountNextIndex() (types.UCompact, error) { +func (a *Client) GetAccountNextIndex() (types.UCompact, error) { // TODO: Add context to the request resp, err := http.Post(a.Config.HttpApiURL, "application/json", strings.NewReader(fmt.Sprintf("{\"id\":1,\"jsonrpc\":\"2.0\",\"method\":\"system_accountNextIndex\",\"params\":[\"%v\"]}", a.KeyringPair.Address))) //nolint: noctx if err != nil { @@ -348,25 +346,9 @@ func (a *DAClient) GetAccountNextIndex() (types.UCompact, error) { return types.NewUCompactFromUInt(uint64(accountNextIndex.Result)), nil } -// makeID creates a unique ID to reference a blob on Avail -func MakeID(blockHeight uint32, extIndex int) da.ID { - // Serialise height and leaf index to binary - heightLen := 4 - heightBytes := make([]byte, heightLen) - binary.LittleEndian.PutUint32(heightBytes, blockHeight) - extIndexBytes := make([]byte, 4) - binary.LittleEndian.PutUint32(extIndexBytes, uint32(extIndex)) - return da.ID(append(heightBytes, extIndexBytes...)) -} - -// SplitID returns the block height and leaf index from a unique ID -func SplitID(id da.ID) (uint32, uint32) { - heightLen := 4 - heightBytes := id[:heightLen] - extIdxBytes := id[heightLen:] - blockHeight := binary.LittleEndian.Uint32(heightBytes) - extIdx := binary.LittleEndian.Uint32(extIdxBytes) - return blockHeight, extIdx +type ID struct { + Height uint64 `json:"blockHeight"` + ExtIndex uint32 `json:"extIdx"` } type Config struct { @@ -400,9 +382,12 @@ func (c *Config) GetConfig(configFileName string) error { return nil } -func (a *DAClient) GetExtrinsic(id da.ID) (types.Extrinsic, error) { - blockHeight, extIdx := SplitID(id) - blockHash, err := a.API.RPC.Chain.GetBlockHash(uint64(blockHeight)) +func (a *Client) GetExtrinsic(id da.ID) (types.Extrinsic, error) { + availID, ok := id.(ID) + if !ok { + return types.Extrinsic{}, fmt.Errorf("invalid ID") + } + blockHash, err := a.API.RPC.Chain.GetBlockHash(uint64(availID.Height)) if err != nil { log.Fatalf("cannot get block hash:%w", err) } @@ -410,5 +395,5 @@ func (a *DAClient) GetExtrinsic(id da.ID) (types.Extrinsic, error) { if err != nil { log.Fatalf("cannot get block:%w", err) } - return block.Block.Extrinsics[extIdx], nil + return block.Block.Extrinsics[availID.ExtIndex], nil } diff --git a/availda/verify/bindings/AvailBridge/AvailBridge.go b/avail/verify/bindings/AvailBridge/AvailBridge.go similarity index 100% rename from availda/verify/bindings/AvailBridge/AvailBridge.go rename to avail/verify/bindings/AvailBridge/AvailBridge.go diff --git a/availda/verify/bindings/AvailBridge/AvailBridge.json b/avail/verify/bindings/AvailBridge/AvailBridge.json similarity index 100% rename from availda/verify/bindings/AvailBridge/AvailBridge.json rename to avail/verify/bindings/AvailBridge/AvailBridge.json diff --git a/availda/verify/bindings/VectorVerifier/VectorVerifier.go b/avail/verify/bindings/VectorVerifier/VectorVerifier.go similarity index 100% rename from availda/verify/bindings/VectorVerifier/VectorVerifier.go rename to avail/verify/bindings/VectorVerifier/VectorVerifier.go diff --git a/availda/verify/contracts/.github/workflows/test.yml b/avail/verify/contracts/.github/workflows/test.yml similarity index 100% rename from availda/verify/contracts/.github/workflows/test.yml rename to avail/verify/contracts/.github/workflows/test.yml diff --git a/availda/verify/contracts/.gitignore b/avail/verify/contracts/.gitignore similarity index 100% rename from availda/verify/contracts/.gitignore rename to avail/verify/contracts/.gitignore diff --git a/availda/verify/contracts/README.md b/avail/verify/contracts/README.md similarity index 100% rename from availda/verify/contracts/README.md rename to avail/verify/contracts/README.md diff --git a/availda/verify/contracts/foundry.toml b/avail/verify/contracts/foundry.toml similarity index 100% rename from availda/verify/contracts/foundry.toml rename to avail/verify/contracts/foundry.toml diff --git a/availda/verify/contracts/script/Deploy.s.sol b/avail/verify/contracts/script/Deploy.s.sol similarity index 100% rename from availda/verify/contracts/script/Deploy.s.sol rename to avail/verify/contracts/script/Deploy.s.sol diff --git a/availda/verify/contracts/src/VectorVerifier.sol b/avail/verify/contracts/src/VectorVerifier.sol similarity index 100% rename from availda/verify/contracts/src/VectorVerifier.sol rename to avail/verify/contracts/src/VectorVerifier.sol diff --git a/availda/verify/contracts/src/interfaces/IAvailBridge.sol b/avail/verify/contracts/src/interfaces/IAvailBridge.sol similarity index 100% rename from availda/verify/contracts/src/interfaces/IAvailBridge.sol rename to avail/verify/contracts/src/interfaces/IAvailBridge.sol diff --git a/availda/verify/contracts/src/interfaces/IVectorX.sol b/avail/verify/contracts/src/interfaces/IVectorX.sol similarity index 100% rename from availda/verify/contracts/src/interfaces/IVectorX.sol rename to avail/verify/contracts/src/interfaces/IVectorX.sol diff --git a/availda/verify/verifier.go b/avail/verify/verifier.go similarity index 87% rename from availda/verify/verifier.go rename to avail/verify/verifier.go index 2c3e95c..cea33e8 100644 --- a/availda/verify/verifier.go +++ b/avail/verify/verifier.go @@ -10,9 +10,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" - "github.com/rollkit/go-da" - "github.com/stackrlabs/go-daash/availda" - "github.com/stackrlabs/go-daash/availda/verify/bindings/vectorverifier" + "github.com/stackrlabs/go-daash/avail" + "github.com/stackrlabs/go-daash/avail/verify/bindings/vectorverifier" + "github.com/stackrlabs/go-daash/da" ) const ( @@ -21,7 +21,7 @@ const ( // Verfifier is used to verify availability of Avail blobs on EVM chains type Verifier struct { - daClient *availda.DAClient + daClient *avail.Client ethClient *ethclient.Client vectorXContract common.Address bridgeContract common.Address @@ -43,7 +43,7 @@ type SuccinctAPIResponse struct { } `json:"data"` } -func NewVerifier(client *availda.DAClient, ethEndpoint string, bridgeContract string, verifierContract string, vectorXContract string, availNetwork string) (*Verifier, error) { +func NewVerifier(client *avail.Client, ethEndpoint string, bridgeContract string, verifierContract string, vectorXContract string, availNetwork string) (*Verifier, error) { ethClient, err := ethclient.Dial(ethEndpoint) if err != nil { return nil, fmt.Errorf("failed to create eth client: %w", err) @@ -79,19 +79,18 @@ func (d *Verifier) IsDataAvailable(blockHeight uint64, extIndex uint64) (bool, e } // IsDataIncluded verifies that the blob data corresponding to the given block height and external index is available on DA -func (d *Verifier) IsDataIncluded(blockHeight uint64, extIndex uint64) (bool, error) { - id := availda.MakeID(uint32(blockHeight), int(extIndex)) +func (d *Verifier) IsDataIncluded(id avail.ID) (bool, error) { blobs, err := d.daClient.Get(context.Background(), []da.ID{id}) if err != nil { return false, fmt.Errorf("failed to get blob data: %w", err) } fmt.Println("size of blob data:", len(blobs[0])) - dataProof, err := d.daClient.GetProof(context.Background(), uint32(blockHeight), int(extIndex)) + dataProof, err := d.daClient.GetProof(context.Background(), uint32(id.Height), int(id.ExtIndex)) if err != nil { return false, fmt.Errorf("failed to get data proof: %w", err) } - proof, err := d.GetAggregatedProof(dataProof, blockHeight) + proof, err := d.GetAggregatedProof(dataProof, id.Height) if err != nil { return false, fmt.Errorf("failed to get aggregated proof: %w", err) } @@ -106,7 +105,7 @@ func (d *Verifier) IsDataIncluded(blockHeight uint64, extIndex uint64) (bool, er return success, nil } -func (d *Verifier) GetAggregatedProof(dataProof availda.DataProofRPCResponse, blockHeight uint64) (*vectorverifier.IAvailBridgeMerkleProofInput, error) { +func (d *Verifier) GetAggregatedProof(dataProof avail.DataProofRPCResponse, blockHeight uint64) (*vectorverifier.IAvailBridgeMerkleProofInput, error) { chainID, err := d.ethClient.ChainID(context.Background()) if err != nil { return nil, fmt.Errorf("cannot get chain id:%w", err) diff --git a/availda/verify/contracts/lib/forge-std b/availda/verify/contracts/lib/forge-std deleted file mode 160000 index 978ac6f..0000000 --- a/availda/verify/contracts/lib/forge-std +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 978ac6fadb62f5f0b723c996f64be52eddba6801 diff --git a/celestiada/celestia.go b/celestia/client.go similarity index 68% rename from celestiada/celestia.go rename to celestia/client.go index b014628..c80f235 100644 --- a/celestiada/celestia.go +++ b/celestia/client.go @@ -1,9 +1,9 @@ -package celestiada +package celestia import ( "context" - "encoding/binary" "encoding/hex" + "errors" "fmt" "log" "math" @@ -18,11 +18,11 @@ import ( sdktypes "github.com/cosmos/cosmos-sdk/types" auth "github.com/cosmos/cosmos-sdk/x/auth/types" - "github.com/rollkit/go-da" + "github.com/stackrlabs/go-daash/da" ) -// CelestiaDA implements the celestia backend for the DA interface -type DAClient struct { +// Client to interact with Celestia DA +type Client struct { client *rpc.Client Namespace share.Namespace gasPrice float64 @@ -30,7 +30,7 @@ type DAClient struct { } // Returns an intialised Celestia DA client -func New(ctx context.Context, lightClientRPCUrl string, authToken string, hexNamespace string, gasPrice float64) (*DAClient, error) { +func NewClient(ctx context.Context, lightClientRPCUrl string, authToken string, hexNamespace string, gasPrice float64) (*Client, error) { nsBytes := make([]byte, 10) _, err := hex.Decode(nsBytes, []byte(hexNamespace)) if err != nil { @@ -46,7 +46,7 @@ func New(ctx context.Context, lightClientRPCUrl string, authToken string, hexNam fmt.Printf("failed to create rpc client: %v", err) return nil, err } - return &DAClient{ + return &Client{ client: client, Namespace: namespace, gasPrice: gasPrice, @@ -55,17 +55,20 @@ func New(ctx context.Context, lightClientRPCUrl string, authToken string, hexNam } // MaxBlobSize returns the max blob size -func (c *DAClient) MaxBlobSize(ctx context.Context) (uint64, error) { +func (c *Client) MaxBlobSize(ctx context.Context) (uint64, error) { // TODO: pass-through query to node, app return appconsts.DefaultMaxBytes, nil } // Get returns Blob for each given ID, or an error. -func (c *DAClient) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { +func (c *Client) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { var blobs []da.Blob for _, id := range ids { - height, _, commitment := SplitID(id) - blob, err := c.client.Blob.Get(ctx, height, c.Namespace, commitment) + id, ok := id.(ID) + if !ok { + return nil, errors.New("invalid ID") + } + blob, err := c.client.Blob.Get(ctx, id.Height, c.Namespace, id.Commitment) if err != nil { return nil, err } @@ -75,7 +78,7 @@ func (c *DAClient) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { } // GetIDs returns IDs of all Blobs located in DA at given height. -func (c *DAClient) GetIDs(ctx context.Context, height uint64) ([]da.ID, error) { +func (c *Client) GetIDs(ctx context.Context, height uint64) ([]da.ID, error) { var ids []da.ID blobs, err := c.client.Blob.GetAll(ctx, height, []share.Namespace{c.Namespace}) if err != nil { @@ -85,19 +88,20 @@ func (c *DAClient) GetIDs(ctx context.Context, height uint64) ([]da.ID, error) { return nil, err } for _, b := range blobs { - ids = append(ids, makeID(height, b.Commitment, make([]byte, 32))) + // TODO: get txHash + ids = append(ids, ID{Height: height, Commitment: b.Commitment, TxHash: make([]byte, 32)}) } return ids, nil } // Commit creates a Commitment for each given Blob. -func (c *DAClient) Commit(ctx context.Context, daBlobs []da.Blob) ([]da.Commitment, error) { +func (c *Client) Commit(ctx context.Context, daBlobs []da.Blob) ([]da.Commitment, error) { _, commitments, err := c.blobsAndCommitments(daBlobs) return commitments, err } // Submit submits the Blobs to Data Availability layer. -func (c *DAClient) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float64) ([]da.ID, []da.Proof, error) { +func (c *Client) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float64) ([]da.ID, []da.Proof, error) { blobs, commitments, err := c.blobsAndCommitments(daBlobs) if err != nil { return nil, nil, err @@ -128,7 +132,7 @@ func (c *DAClient) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float if err != nil { return nil, nil, err } - ids[i] = makeID(uint64(txResp.Height), commitment, txHashBytes) + ids[i] = ID{Height: uint64(txResp.Height), Commitment: commitment, TxHash: txHashBytes} proof, err := c.client.Blob.GetProof(ctx, uint64(txResp.Height), c.Namespace, commitment) if err != nil { return nil, nil, err @@ -143,7 +147,7 @@ func (c *DAClient) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float } // blobsAndCommitments converts []da.Blob to []*blob.Blob and generates corresponding []da.Commitment -func (c *DAClient) blobsAndCommitments(daBlobs []da.Blob) ([]*blob.Blob, []da.Commitment, error) { +func (c *Client) blobsAndCommitments(daBlobs []da.Blob) ([]*blob.Blob, []da.Commitment, error) { var blobs []*blob.Blob var commitments []da.Commitment for _, daBlob := range daBlobs { @@ -163,7 +167,7 @@ func (c *DAClient) blobsAndCommitments(daBlobs []da.Blob) ([]*blob.Blob, []da.Co } // Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. -func (c *DAClient) Validate(ctx context.Context, ids []da.ID, daProofs []da.Proof) ([]bool, error) { +func (c *Client) Validate(ctx context.Context, ids []da.ID, daProofs []da.Proof) ([]bool, error) { var included []bool var proofs []*blob.Proof for _, daProof := range daProofs { @@ -175,37 +179,21 @@ func (c *DAClient) Validate(ctx context.Context, ids []da.ID, daProofs []da.Proo proofs = append(proofs, proof) } for i, id := range ids { - height, _, commitment := SplitID(id) + id, ok := id.(ID) + if !ok { + return nil, errors.New("invalid ID") + } // TODO(tzdybal): for some reason, if proof doesn't match commitment, API returns (false, "blob: invalid proof") // but analysis of the code in celestia-node implies this should never happen - maybe it's caused by openrpc? // there is no way of gently handling errors here, but returned value is fine for us - isIncluded, _ := c.client.Blob.Included(ctx, height, c.Namespace, proofs[i], commitment) + isIncluded, _ := c.client.Blob.Included(ctx, id.Height, c.Namespace, proofs[i], id.Commitment) included = append(included, isIncluded) } return included, nil } -// heightLen is a length (in bytes) of serialized height. -// -// This is 8 as uint64 consist of 8 bytes. -const heightLen = 8 -const txHashLen = 32 - -func makeID(height uint64, commitment da.Commitment, txHash []byte) da.ID { - id := make([]byte, heightLen+txHashLen+len(commitment)) - binary.LittleEndian.PutUint64(id, height) - copy(id[heightLen:heightLen+txHashLen], txHash) - copy(id[heightLen+txHashLen:], commitment) - return id -} - -func SplitID(id da.ID) (uint64, []byte, da.Commitment) { - if len(id) <= heightLen { - return 0, nil, nil - } - // Return only height and commitment if ID is smaller to keep it backward compatible - if len(id) <= heightLen+txHashLen { - return 0, nil, id[heightLen : heightLen+txHashLen] - } - return binary.LittleEndian.Uint64(id[:heightLen]), id[heightLen : heightLen+txHashLen], id[heightLen+txHashLen:] +type ID struct { + Height uint64 + Commitment []byte + TxHash []byte } diff --git a/celestiada/verify/bindings/BlobstreamVerifier/BlobstreamVerifier.go b/celestia/verify/bindings/BlobstreamVerifier/BlobstreamVerifier.go similarity index 100% rename from celestiada/verify/bindings/BlobstreamVerifier/BlobstreamVerifier.go rename to celestia/verify/bindings/BlobstreamVerifier/BlobstreamVerifier.go diff --git a/celestiada/verify/bindings/BlobstreamX/BlobstreamX.go b/celestia/verify/bindings/BlobstreamX/BlobstreamX.go similarity index 100% rename from celestiada/verify/bindings/BlobstreamX/BlobstreamX.go rename to celestia/verify/bindings/BlobstreamX/BlobstreamX.go diff --git a/celestiada/verify/commitment.go b/celestia/verify/commitment.go similarity index 97% rename from celestiada/verify/commitment.go rename to celestia/verify/commitment.go index 8fd0893..f8efe30 100644 --- a/celestiada/verify/commitment.go +++ b/celestia/verify/commitment.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/stackrlabs/go-daash/celestiada/verify/bindings/blobstreamx" + "github.com/stackrlabs/go-daash/celestia/verify/bindings/blobstreamx" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" diff --git a/celestiada/verify/contracts/.github/workflows/test.yml b/celestia/verify/contracts/.github/workflows/test.yml similarity index 100% rename from celestiada/verify/contracts/.github/workflows/test.yml rename to celestia/verify/contracts/.github/workflows/test.yml diff --git a/celestiada/verify/contracts/.gitignore b/celestia/verify/contracts/.gitignore similarity index 100% rename from celestiada/verify/contracts/.gitignore rename to celestia/verify/contracts/.gitignore diff --git a/celestiada/verify/contracts/README.md b/celestia/verify/contracts/README.md similarity index 100% rename from celestiada/verify/contracts/README.md rename to celestia/verify/contracts/README.md diff --git a/celestiada/verify/contracts/foundry.toml b/celestia/verify/contracts/foundry.toml similarity index 100% rename from celestiada/verify/contracts/foundry.toml rename to celestia/verify/contracts/foundry.toml diff --git a/celestiada/verify/contracts/lib/blobstream-contracts b/celestia/verify/contracts/lib/blobstream-contracts similarity index 100% rename from celestiada/verify/contracts/lib/blobstream-contracts rename to celestia/verify/contracts/lib/blobstream-contracts diff --git a/celestiada/verify/contracts/src/BlobstreamVerifier.sol b/celestia/verify/contracts/src/BlobstreamVerifier.sol similarity index 100% rename from celestiada/verify/contracts/src/BlobstreamVerifier.sol rename to celestia/verify/contracts/src/BlobstreamVerifier.sol diff --git a/celestiada/verify/convert.go b/celestia/verify/convert.go similarity index 97% rename from celestiada/verify/convert.go rename to celestia/verify/convert.go index 80e07e8..ec0a54d 100644 --- a/celestiada/verify/convert.go +++ b/celestia/verify/convert.go @@ -3,7 +3,7 @@ package verify import ( "math/big" - bv "github.com/stackrlabs/go-daash/celestiada/verify/bindings/blobstreamverifier" + bv "github.com/stackrlabs/go-daash/celestia/verify/bindings/blobstreamverifier" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" diff --git a/celestiada/verify/proof.go b/celestia/verify/proof.go similarity index 96% rename from celestiada/verify/proof.go rename to celestia/verify/proof.go index d7f6dd6..3e70596 100644 --- a/celestiada/verify/proof.go +++ b/celestia/verify/proof.go @@ -6,7 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" - bv "github.com/stackrlabs/go-daash/celestiada/verify/bindings/blobstreamverifier" + bv "github.com/stackrlabs/go-daash/celestia/verify/bindings/blobstreamverifier" "github.com/tendermint/tendermint/rpc/client/http" ) diff --git a/celestiada/verify/verifier.go b/celestia/verify/verifier.go similarity index 88% rename from celestiada/verify/verifier.go rename to celestia/verify/verifier.go index 43aab8e..2f10814 100644 --- a/celestiada/verify/verifier.go +++ b/celestia/verify/verifier.go @@ -12,18 +12,18 @@ import ( "github.com/celestiaorg/celestia-app/pkg/square" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" - bv "github.com/stackrlabs/go-daash/celestiada/verify/bindings/blobstreamverifier" + bv "github.com/stackrlabs/go-daash/celestia/verify/bindings/blobstreamverifier" "github.com/tendermint/tendermint/rpc/client/http" ) -type DAVerifier struct { +type Verifier struct { ethClient *ethclient.Client tRPCClient *http.HTTP verifierContract common.Address blobstreamXContract common.Address } -func NewDAVerifier(ethEndpoint string, tRPCEndpoint string, verifierContract string, blobstreamXContract string) (*DAVerifier, error) { +func NewVerifier(ethEndpoint string, tRPCEndpoint string, verifierContract string, blobstreamXContract string) (*Verifier, error) { ethClient, err := ethclient.Dial(ethEndpoint) if err != nil { return nil, err @@ -32,7 +32,7 @@ func NewDAVerifier(ethEndpoint string, tRPCEndpoint string, verifierContract str if err != nil { return nil, err } - return &DAVerifier{ + return &Verifier{ ethClient: ethClient, tRPCClient: trpc, verifierContract: common.HexToAddress(verifierContract), @@ -40,7 +40,7 @@ func NewDAVerifier(ethEndpoint string, tRPCEndpoint string, verifierContract str }, nil } -func (d *DAVerifier) VerifyDataAvailable(txHash string) (bool, error) { +func (d *Verifier) VerifyDataAvailable(txHash string) (bool, error) { shareRange, err := d.GetSharePointer(txHash) if err != nil { return false, fmt.Errorf("failed to get share range: %w", err) @@ -92,7 +92,7 @@ func (d *DAVerifier) VerifyDataAvailable(txHash string) (bool, error) { return true, nil } -func (d *DAVerifier) GetSharePointer(txHash string) (SharePointer, error) { +func (d *Verifier) GetSharePointer(txHash string) (SharePointer, error) { txHashBytes, err := hex.DecodeString(txHash) if err != nil { return SharePointer{}, fmt.Errorf("failed to decode transaction hash: %w", err) diff --git a/celestiada/verify/contracts/lib/forge-std b/celestiada/verify/contracts/lib/forge-std deleted file mode 160000 index 978ac6f..0000000 --- a/celestiada/verify/contracts/lib/forge-std +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 978ac6fadb62f5f0b723c996f64be52eddba6801 diff --git a/cmd/blob-server/main.go b/cmd/blob-server/main.go index 696575e..37d2d8c 100644 --- a/cmd/blob-server/main.go +++ b/cmd/blob-server/main.go @@ -11,16 +11,16 @@ import ( "github.com/cenkalti/backoff/v4" "github.com/gin-gonic/gin" - "github.com/rollkit/go-da" "github.com/stackrlabs/go-daash" - "github.com/stackrlabs/go-daash/availda" - availVerify "github.com/stackrlabs/go-daash/availda/verify" - celestiaVerify "github.com/stackrlabs/go-daash/celestiada/verify" + "github.com/stackrlabs/go-daash/avail" + availVerify "github.com/stackrlabs/go-daash/avail/verify" + celestiaVerify "github.com/stackrlabs/go-daash/celestia/verify" + "github.com/stackrlabs/go-daash/da" ) type BlobServer struct { queue chan Job - Daasher *daash.DABuilder + Daasher *daash.ClientBuilder Jobs map[string]Job // map of job ID to job sync.Mutex } @@ -29,7 +29,7 @@ func NewBlobServer() *BlobServer { return &BlobServer{ queue: make(chan Job, 10), Jobs: make(map[string]Job), - Daasher: daash.NewDABuilder(), + Daasher: daash.NewClientBuilder(), } } @@ -123,7 +123,7 @@ func main() { router.Run() } -func postToDA(c context.Context, data []byte, DAClient da.DA) ([]da.ID, []da.Proof, error) { +func postToDA(c context.Context, data []byte, DAClient da.Client) ([]da.ID, []da.Proof, error) { daProofs := make([]da.Proof, 1) daIDs := make([]da.ID, 1) err := backoff.Retry(func() error { @@ -142,7 +142,8 @@ func postToDA(c context.Context, data []byte, DAClient da.DA) ([]da.ID, []da.Pro return daIDs, daProofs, nil } -func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.DABuilder) { +func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.ClientBuilder) { + var success bool switch layer { case daash.Celestia: txHash, ok := c.GetQuery("txHash") @@ -152,7 +153,7 @@ func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.DABuilder) { }) return } - verifier, err := celestiaVerify.NewDAVerifier( + verifier, err := celestiaVerify.NewVerifier( chainMetadata["sepolia"]["rpcUrl"], celestiaRpcUrl, chainMetadata["sepolia"]["blobstreamverifierAddress"], @@ -164,17 +165,13 @@ func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.DABuilder) { }) return } - success, err := verifier.VerifyDataAvailable(txHash) + success, err = verifier.VerifyDataAvailable(txHash) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{ "message": fmt.Sprintf("failed to verify data: %v", err), }) return } - c.JSON(http.StatusOK, gin.H{ - "success": success, - "message": "data verified onchain!", - }) case daash.Avail: blockHeight, ok := c.GetQuery("blockHeight") if !ok { @@ -205,12 +202,12 @@ func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.DABuilder) { return } verifier, err := availVerify.NewVerifier( - daasher.Clients[daash.Avail].(*availda.DAClient), + daasher.Clients[daash.Avail].(*avail.Client), chainMetadata["sepolia"]["rpcUrl"], chainMetadata["sepolia"]["availBridgeAddress"], chainMetadata["sepolia"]["vectorVerifierAddress"], chainMetadata["sepolia"]["vectorXAddress"], - daasher.Clients[daash.Avail].(*availda.DAClient).Config.Network, + daasher.Clients[daash.Avail].(*avail.Client).Config.Network, ) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{ @@ -218,7 +215,7 @@ func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.DABuilder) { }) return } - success, err := verifier.IsDataIncluded(blockHeightUint, extIndexUint) + success, err = verifier.IsDataIncluded(avail.ID{Height: blockHeightUint, ExtIndex: uint32(extIndexUint)}) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{ "success": false, @@ -226,21 +223,24 @@ func verifyDA(c *gin.Context, layer daash.DALayer, daasher *daash.DABuilder) { }) return } - if !success { - c.JSON(http.StatusOK, gin.H{ - "success": success, - "message": "data availability cannot be verified onchain!", - }) - return - } - c.JSON(http.StatusOK, gin.H{ - "success": success, - "message": "data availability succesfully verified onchain!", - }) + default: c.JSON(http.StatusBadRequest, gin.H{ "message": fmt.Sprintf("DA %s not supported yet", layer), }) return } + + if !success { + c.JSON(http.StatusOK, gin.H{ + "success": success, + "message": "data availability cannot be verified onchain!", + }) + return + } + c.JSON(http.StatusOK, gin.H{ + "success": success, + "message": "data availability succesfully verified onchain!", + }) + } diff --git a/da/interface.go b/da/interface.go new file mode 100644 index 0000000..5d7024d --- /dev/null +++ b/da/interface.go @@ -0,0 +1,44 @@ +package da + +import "context" + +// Generic interface to interact with any Data Availability layers. +// Modified fork of https://github.com/rollkit/go-da +type Client interface { + // MaxBlobSize returns the max blob size + MaxBlobSize(ctx context.Context) (uint64, error) + + // Get returns Blob for each given ID, or an error. + // + // Error should be returned if ID is not formatted properly, there is no Blob for given ID or any other client-level + // error occurred (dropped connection, timeout, etc). + Get(ctx context.Context, ids []ID) ([]Blob, error) + + // GetIDs returns IDs of all Blobs located in DA at given height. + GetIDs(ctx context.Context, height uint64) ([]ID, error) + + // Commit creates a Commitment for each given Blob. + Commit(ctx context.Context, blobs []Blob) ([]Commitment, error) + + // Submit submits the Blobs to Data Availability layer. + // + // This method is synchronous. Upon successful submission to Data Availability layer, it returns ID identifying blob + // in DA and Proof of inclusion. + // If options is nil, default options are used. + Submit(ctx context.Context, blobs []Blob, gasPrice float64) ([]ID, []Proof, error) + + // Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. + Validate(ctx context.Context, ids []ID, proofs []Proof) ([]bool, error) +} + +// Blob is the data submitted/received from DA interface. +type Blob = []byte + +// ID should holds data required by the implementation to find blob in Data Availability layer. +type ID = any + +// Commitment should contain serialized cryptographic commitment to Blob value. +type Commitment = []byte + +// Proof should contain a proof of inclusion (publication) of Blob in Data Availability layer. +type Proof = []byte diff --git a/daash.go b/daash.go index 164efd8..5a86785 100644 --- a/daash.go +++ b/daash.go @@ -10,11 +10,11 @@ import ( "time" "github.com/cenkalti/backoff" - "github.com/rollkit/go-da" - "github.com/rollkit/go-da/test" - "github.com/stackrlabs/go-daash/availda" - "github.com/stackrlabs/go-daash/celestiada" - "github.com/stackrlabs/go-daash/eigenda" + "github.com/stackrlabs/go-daash/avail" + "github.com/stackrlabs/go-daash/celestia" + "github.com/stackrlabs/go-daash/da" + "github.com/stackrlabs/go-daash/eigen" + "github.com/stackrlabs/go-daash/mock" ) type DALayer string @@ -35,18 +35,18 @@ func IsValidDA(layer DALayer) bool { return false } -type DABuilder struct { - Clients map[DALayer]da.DA +type ClientBuilder struct { + Clients map[DALayer]da.Client } -func NewDABuilder() *DABuilder { - return &DABuilder{ - Clients: make(map[DALayer]da.DA), +func NewClientBuilder() *ClientBuilder { + return &ClientBuilder{ + Clients: make(map[DALayer]da.Client), } } // Initiates a new DAManager with clients from the sepcified DA layers -func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConfigPath string, celestiaAuthToken string, celestiaLightClientUrl string) (*DABuilder, error) { +func (d *ClientBuilder) InitClients(ctx context.Context, layers []DALayer, availConfigPath string, celestiaAuthToken string, celestiaLightClientUrl string) (*ClientBuilder, error) { if len(layers) == 0 { return nil, fmt.Errorf("no da layers provided") } @@ -54,10 +54,10 @@ func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConf for _, layer := range layers { switch layer { case Avail: - var avail da.DA + var availClient da.Client var err error err = backoff.Retry(func() error { - avail, err = availda.New(availConfigPath) + availClient, err = avail.NewClient(availConfigPath) return err //nolint: wrapcheck }, backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 5)) if err != nil { @@ -65,7 +65,7 @@ func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConf return nil, fmt.Errorf(" Failed to create avail client: %v", err) } log.Println("🟢 Avail DA client initialised") - d.Clients[Avail] = avail + d.Clients[Avail] = availClient case Celestia: if celestiaAuthToken == "" { @@ -74,7 +74,7 @@ func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConf } // We use a random pre-set hex string for namespace rn namespace := "9cb73e106b03d1050a13" - celestia, err := celestiada.New(ctx, celestiaLightClientUrl, celestiaAuthToken, namespace, -1) + celestia, err := celestia.NewClient(ctx, celestiaLightClientUrl, celestiaAuthToken, namespace, -1) if err != nil { return nil, err } @@ -82,7 +82,7 @@ func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConf d.Clients[Celestia] = celestia case Eigen: - eigen, err := eigenda.New("disperser-goerli.eigenda.xyz:443", time.Second*90, time.Second*5) + eigen, err := eigen.NewClient("disperser-goerli.eigenda.xyz:443", time.Second*90, time.Second*5) if err != nil { return nil, err } @@ -90,7 +90,7 @@ func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConf log.Println("🟢 Eigen DA client initialised") case Mock: - d.Clients[Mock] = test.NewDummyDA() + d.Clients[Mock] = mock.NewDummyDA() log.Println("🟢 Mock DA client initialised") default: @@ -103,36 +103,39 @@ func (d *DABuilder) InitClients(ctx context.Context, layers []DALayer, availConf func GetHumanReadableID(id da.ID, daLayer DALayer) any { switch daLayer { case Avail: - blockHeight, extIdx := availda.SplitID(id) - return struct { - BlockHeight uint32 `json:"blockHeight"` - ExtIdx uint32 `json:"extIdx"` - }{ - BlockHeight: blockHeight, - ExtIdx: extIdx, + availID, ok := id.(avail.ID) + if !ok { + return "" } + return availID case Celestia: - blockHeight, txHash, commitment := celestiada.SplitID(id) + id, ok := id.(celestia.ID) + if !ok { + return "" + } return struct { BlockHeight uint64 `json:"blockHeight"` TxHash string `json:"txHash"` Commitment da.Commitment `json:"commitment"` }{ - BlockHeight: blockHeight, - TxHash: hex.EncodeToString(txHash), - Commitment: commitment, + BlockHeight: id.Height, + TxHash: hex.EncodeToString(id.TxHash), + Commitment: id.Commitment, } default: return "" } } -func GetExplorerLink(client da.DA, ids []da.ID) (string, error) { +func GetExplorerLink(client da.Client, ids []da.ID) (string, error) { switch daClient := client.(type) { - case *celestiada.DAClient: - _, txHash, _ := celestiada.SplitID(ids[0]) - return fmt.Sprintf("https://mocha-4.celenium.io/tx/%s", hex.EncodeToString(txHash)), nil - case *availda.DAClient: + case *celestia.Client: + id, ok := ids[0].(celestia.ID) + if !ok { + return "", fmt.Errorf("invalid ID") + } + return fmt.Sprintf("https://mocha-4.celenium.io/tx/%s", hex.EncodeToString(id.TxHash)), nil + case *avail.Client: ext, err := daClient.GetExtrinsic(ids[0]) if err != nil { return "", err diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..565e377 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,32 @@ +version: "3.8" + +services: + celestia-light-client-init: + image: ghcr.io/celestiaorg/celestia-node:v0.12.4 + user: "10001:10001" + environment: + NODE_TYPE: ${NODE_TYPE} # Ensure these environment variables are set in your shell or a .env file + P2P_NETWORK: ${NETWORK} + volumes: + - ${NODE_STORE_DIR}:/home/celestia + command: celestia ${NODE_TYPE} init --p2p.network ${NETWORK} + restart: on-failure + + celestia-light-client: + image: ghcr.io/celestiaorg/celestia-node:v0.12.4 + user: "10001:10001" + networks: [go-daash] + environment: + NODE_TYPE: ${NODE_TYPE} + P2P_NETWORK: ${NETWORK} + volumes: + - ${NODE_STORE_DIR}:/home/celestia + ports: + - "26658:26658" + command: celestia ${NODE_TYPE} start --core.ip ${CELESTIA_RPC_URL} --p2p.network ${NETWORK} --rpc.addr 0.0.0.0 + depends_on: + - celestia-light-client-init + restart: always + +networks: + go-daash: diff --git a/eigenda/eigenda.go b/eigen/client.go similarity index 86% rename from eigenda/eigenda.go rename to eigen/client.go index 47f2940..f9fed74 100644 --- a/eigenda/eigenda.go +++ b/eigen/client.go @@ -1,15 +1,14 @@ -package eigenda +package eigen import ( "context" "encoding/base64" - "encoding/binary" "encoding/hex" "fmt" "log" "time" - "github.com/rollkit/go-da" + "github.com/stackrlabs/go-daash/da" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) @@ -31,8 +30,8 @@ type Client struct { DAStatusQueryRetryInterval time.Duration } -// New returns a new instance of the EigenDA client. -func New(daRpc string, daStatusQueryTimeout time.Duration, daStatusQueryRetryInterval time.Duration) (*Client, error) { +// NewClient returns a new instance of the EigenDA client. +func NewClient(daRpc string, daStatusQueryTimeout time.Duration, daStatusQueryRetryInterval time.Duration) (*Client, error) { conn, err := grpc.Dial(daRpc, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) if err != nil { fmt.Println("Unable to connect to EigenDA, aborting", "err", err) @@ -65,15 +64,21 @@ func (e *Client) Submit(ctx context.Context, daBlobs []da.Blob, gasPrice float64 if err != nil { return nil, nil, fmt.Errorf("failed to disperse blob: %v", err) } - blobID := e.makeID(blobInfo.BlobVerificationProof.BlobIndex, blobInfo.BlobVerificationProof.BatchMetadata.BatchHeaderHash) + blobID := ID{ + BlobIndex: blobInfo.BlobVerificationProof.BlobIndex, + BatchHeaderHash: blobInfo.BlobVerificationProof.BatchMetadata.BatchHeaderHash, + } return []da.ID{blobID}, []da.Proof{blobInfo.BlobVerificationProof.InclusionProof}, nil } func (e *Client) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { - blobIndex, batchHeaderHash := e.splitID(ids[0]) + blobID, ok := ids[0].(ID) + if !ok { + return nil, fmt.Errorf("invalid ID type") + } resp, err := e.disperserClient.RetrieveBlob(ctx, &RetrieveBlobRequest{ - BlobIndex: blobIndex, - BatchHeaderHash: batchHeaderHash, + BlobIndex: blobID.BlobIndex, + BatchHeaderHash: blobID.BatchHeaderHash, }) if err != nil { return nil, fmt.Errorf("failed to retrieve blob: %v", err) @@ -93,15 +98,9 @@ func (e *Client) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof) ( return nil, nil } -func (e *Client) makeID(blobIndex uint32, batchHeaderHash []byte) da.ID { - idBytes := make([]byte, 4+len(batchHeaderHash)) - binary.BigEndian.PutUint32(idBytes[:4], blobIndex) - copy(idBytes[4:], batchHeaderHash) - return idBytes -} - -func (e *Client) splitID(id da.ID) (uint32, []byte) { - return binary.BigEndian.Uint32(id[:4]), id[4:] +type ID struct { + BlobIndex uint32 + BatchHeaderHash []byte } func (e *Client) disperseBlob(ctx context.Context, txData []byte) (*BlobInfo, error) { diff --git a/eigenda/disperser.pb.go b/eigen/disperser.pb.go similarity index 99% rename from eigenda/disperser.pb.go rename to eigen/disperser.pb.go index 32a26a7..6d07e46 100644 --- a/eigenda/disperser.pb.go +++ b/eigen/disperser.pb.go @@ -4,7 +4,7 @@ // protoc v4.25.2 // source: api/proto/disperser/disperser.proto -package eigenda +package eigen import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" diff --git a/eigenda/disperser_grpc.pb.go b/eigen/disperser_grpc.pb.go similarity index 99% rename from eigenda/disperser_grpc.pb.go rename to eigen/disperser_grpc.pb.go index e51b2cf..b4ba3a1 100644 --- a/eigenda/disperser_grpc.pb.go +++ b/eigen/disperser_grpc.pb.go @@ -4,7 +4,7 @@ // - protoc v4.25.2 // source: api/proto/disperser/disperser.proto -package eigenda +package eigen import ( context "context" diff --git a/go.mod b/go.mod index 0796635..26e3f3e 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,6 @@ require ( github.com/ethereum/go-ethereum v1.13.10 github.com/gin-gonic/gin v1.9.1 github.com/joho/godotenv v1.5.1 - github.com/rollkit/go-da v0.2.0 github.com/tendermint/tendermint v0.34.29 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.21.0 diff --git a/go.sum b/go.sum index 3b3f708..1080022 100644 --- a/go.sum +++ b/go.sum @@ -2120,8 +2120,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rollkit/go-da v0.2.0 h1:rNpWBa2inczgZ955ky3wy8FbrMajzVbm0UfbBGzm5UE= -github.com/rollkit/go-da v0.2.0/go.mod h1:Kef0XI5ecEKd3TXzI8S+9knAUJnZg0svh2DuXoCsPlM= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= diff --git a/mock/client.go b/mock/client.go new file mode 100644 index 0000000..1955366 --- /dev/null +++ b/mock/client.go @@ -0,0 +1,153 @@ +package mock + +import ( + "bytes" + "context" + "crypto/ed25519" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "errors" + "sync" + + "github.com/stackrlabs/go-daash/da" +) + +// DefaultMaxBlobSize is the default max blob size +const DefaultMaxBlobSize = 64 * 64 * 482 + +// DummyDA is a simple implementation of in-memory DA. Not production ready! Intended only for testing! +// +// Data is stored in a map, where key is a serialized sequence number. This key is returned as ID. +// Commitments are simply hashes, and proofs are ED25519 signatures. +type DummyDA struct { + mu *sync.Mutex // protects data and height + data map[uint64][]kvp + maxBlobSize uint64 + height uint64 + privKey ed25519.PrivateKey + pubKey ed25519.PublicKey +} + +type kvp struct { + key, value []byte +} + +// NewDummyDA create new instance of DummyDA +func NewDummyDA(opts ...func(*DummyDA) *DummyDA) *DummyDA { + da := &DummyDA{ + mu: new(sync.Mutex), + data: make(map[uint64][]kvp), + maxBlobSize: DefaultMaxBlobSize, + } + for _, f := range opts { + da = f(da) + } + da.pubKey, da.privKey, _ = ed25519.GenerateKey(rand.Reader) + return da +} + +// MaxBlobSize returns the max blob size in bytes. +func (d *DummyDA) MaxBlobSize(ctx context.Context) (uint64, error) { + return d.maxBlobSize, nil +} + +// Get returns Blobs for given IDs. +func (d *DummyDA) Get(ctx context.Context, ids []da.ID) ([]da.Blob, error) { + d.mu.Lock() + defer d.mu.Unlock() + blobs := make([]da.Blob, len(ids)) + for i, id := range ids { + id, ok := id.(ID) + if !ok { + return nil, errors.New("invalid ID") + } + if len(id) < 8 { + return nil, errors.New("invalid ID") + } + height := binary.LittleEndian.Uint64(id) + found := false + for j := 0; !found && j < len(d.data[height]); j++ { + if bytes.Equal(d.data[height][j].key, id) { + blobs[i] = d.data[height][j].value + found = true + } + } + if !found { + return nil, errors.New("no blob for given ID") + } + } + return blobs, nil +} + +// GetIDs returns IDs of Blobs at given DA height. +func (d *DummyDA) GetIDs(ctx context.Context, height uint64) ([]da.ID, error) { + d.mu.Lock() + defer d.mu.Unlock() + kvps := d.data[height] + ids := make([]da.ID, len(kvps)) + for i, kv := range kvps { + ids[i] = kv.key + } + return ids, nil +} + +// Commit returns cryptographic Commitments for given blobs. +func (d *DummyDA) Commit(ctx context.Context, blobs []da.Blob) ([]da.Commitment, error) { + commits := make([]da.Commitment, len(blobs)) + for i, blob := range blobs { + commits[i] = d.getHash(blob) + } + return commits, nil +} + +// Submit stores blobs in DA layer. +func (d *DummyDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64) ([]da.ID, []da.Proof, error) { + d.mu.Lock() + defer d.mu.Unlock() + ids := make([]da.ID, len(blobs)) + proofs := make([]da.Proof, len(blobs)) + d.height += 1 + for i, blob := range blobs { + ids[i] = append(d.nextID(), d.getHash(blob)...) + proofs[i] = d.getProof(ids[i].(ID), blob) + + d.data[d.height] = append(d.data[d.height], kvp{ids[i].(ID), blob}) + } + + return ids, proofs, nil +} + +// Validate checks the Proofs for given IDs. +func (d *DummyDA) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof) ([]bool, error) { + if len(ids) != len(proofs) { + return nil, errors.New("number of IDs doesn't equal to number of proofs") + } + results := make([]bool, len(ids)) + for i := 0; i < len(ids); i++ { + results[i] = ed25519.Verify(d.pubKey, ids[i].(ID)[8:], proofs[i]) + } + return results, nil +} + +func (d *DummyDA) nextID() []byte { + return d.getID(d.height) +} + +type ID = []byte + +func (d *DummyDA) getID(cnt uint64) []byte { + id := make([]byte, 8) + binary.LittleEndian.PutUint64(id, cnt) + return id +} + +func (d *DummyDA) getHash(blob []byte) []byte { + sha := sha256.Sum256(blob) + return sha[:] +} + +func (d *DummyDA) getProof(id ID, blob []byte) []byte { + sign, _ := d.privKey.Sign(rand.Reader, d.getHash(blob), &ed25519.Options{}) + return sign +}