diff --git a/blob/blob.go b/blob/blob.go index 8ff7c39eec..89177b713e 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -31,8 +31,9 @@ func (com Commitment) Equal(c Commitment) bool { return bytes.Equal(com, c) } -// Proof is a collection of nmt.Proofs that verifies the inclusion of the data. -// Proof proves the WHOLE namespaced data for the particular row. +// The Proof is a set of nmt proofs that can be verified only through +// the included method (due to limitation of the nmt https://github.com/celestiaorg/nmt/issues/218). +// Proof proves the WHOLE namespaced data to the row roots. // TODO (@vgonkivs): rework `Proof` in order to prove a particular blob. // https://github.com/celestiaorg/celestia-node/issues/2303 type Proof []*nmt.Proof diff --git a/blob/parser.go b/blob/parser.go index 0d148ddde1..6c103698b0 100644 --- a/blob/parser.go +++ b/blob/parser.go @@ -7,21 +7,20 @@ import ( "github.com/celestiaorg/celestia-app/pkg/shares" ) -// parser is a helper struct that allows collecting shares and transforming them into the blob. -// it contains all necessary information that is needed to build the blob: -// * position of the blob inside the EDS; -// * blob's length; -// * shares needed to build the blob; -// * extra condition to verify the final blob. +// parser helps to collect shares and transform them into a blob. +// It can handle only one blob at a time. type parser struct { - index int - length int + // index is a position of the blob inside the EDS. + index int + // length is an amount of the shares needed to build the blob. + length int + // shares is a set of shares to build the blob. shares []shares.Share verifyFn func(blob *Blob) bool } -// NOTE: passing shares here needed to detect padding shares(as we do not need this check in -// addShares) +// set tries to find the first blob's share by skipping padding shares and +// sets the metadata of the blob(index and length) func (p *parser) set(index int, shrs []shares.Share) ([]shares.Share, error) { if len(shrs) == 0 { return nil, errEmptyShares @@ -47,9 +46,9 @@ func (p *parser) set(index int, shrs []shares.Share) ([]shares.Share, error) { return shrs, nil } -// addShares sets shares until the blob is completed and returns extra shares back. -// we do not need here extra condition to check padding shares as we do not expect it here. -// it is possible only between two blobs. +// addShares sets shares until the blob is completed and extra remaining shares back. +// It assumes that the remaining shares required for blob completeness are correct and +// do not include padding shares. func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComplete bool) { index := -1 for i, sh := range shares { @@ -71,7 +70,7 @@ func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComple return shares[index+1:], true } -// parse parses shares and creates the Blob. +// parse ensures that correct amount of shares was collected and create a blob from the existing shares. func (p *parser) parse() (*Blob, error) { if p.length != len(p.shares) { return nil, fmt.Errorf("invalid shares amount. want:%d, have:%d", p.length, len(p.shares)) @@ -111,7 +110,8 @@ func (p *parser) parse() (*Blob, error) { return blob, nil } -// skipPadding skips first share in the range if this share is the Padding share. +// skipPadding iterates through the shares until non-padding share will be found. It guarantees that +// the returned set of shares will start with non-padding share(or empty set of shares). func (p *parser) skipPadding(shares []shares.Share) ([]shares.Share, error) { if len(shares) == 0 { return nil, errEmptyShares @@ -147,6 +147,7 @@ func (p *parser) isEmpty() bool { return p.index == 0 && p.length == 0 && len(p.shares) == 0 } +// reset cleans up parser, so it can be re-used within the same verify functionality. func (p *parser) reset() { p.index = 0 p.length = 0 diff --git a/blob/service.go b/blob/service.go index d75e3a3bfd..25bf48fd1f 100644 --- a/blob/service.go +++ b/blob/service.go @@ -111,6 +111,8 @@ func (s *Service) Submit(ctx context.Context, blobs []*Blob, gasPrice GasPrice) } // Get retrieves all the blobs for given namespaces at the given height by commitment. +// Get collects all namespaced data from the EDS, constructs blobs +// and compares commitments. `ErrBlobNotFound` can be returned in case blob was not found. func (s *Service) Get( ctx context.Context, height uint64, @@ -135,7 +137,8 @@ func (s *Service) Get( } // GetProof retrieves all blobs in the given namespaces at the given height by commitment -// and returns their Proof. +// and returns their Proof. It collects all namespaced data from the EDS, constructs blobs +// and compares commitments. func (s *Service) GetProof( ctx context.Context, height uint64, @@ -227,13 +230,6 @@ func (s *Service) Included( ) // In the current implementation, LNs will have to download all shares to recompute the commitment. - // To achieve 1. we need to modify Proof structure and to store all subtree roots, that were - // involved in commitment creation and then call `merkle.HashFromByteSlices`(tendermint package). - // nmt.Proof is verifying share inclusion by recomputing row roots, so, theoretically, we can do - // the same but using subtree roots. For this case, we need an extra method in nmt.Proof - // that will perform all reconstructions, - // but we have to guarantee that all our stored subtree roots will be on the same height(e.g. one - // level above shares). // TODO(@vgonkivs): rework the implementation to perform all verification without network requests. sharesParser := &parser{verifyFn: func(blob *Blob) bool { return blob.compareCommitments(commitment) @@ -250,8 +246,8 @@ func (s *Service) Included( } // retrieve retrieves blobs and their proofs by requesting the whole namespace and -// comparing Commitments with each blob. -// Retrieving is stopped once the requested blob/proof is found. +// comparing Commitments. +// Retrieving is stopped once the `verify` condition in shareParser is met. func (s *Service) retrieve( ctx context.Context, height uint64, @@ -284,6 +280,7 @@ func (s *Service) retrieve( getCtx, getSharesSpan := tracer.Start(ctx, "get-shares-by-namespace") + // collect shares for the requested namespace namespacedShares, err := s.shareGetter.GetSharesByNamespace(getCtx, header, namespace) if err != nil { if errors.Is(err, share.ErrNotFound) { @@ -338,19 +335,20 @@ func (s *Service) retrieve( return nil, nil, err } + // update index and shares if padding shares were detected. if len(appShares) != len(shrs) { - // update index and shares if a padding share was detected. index += len(appShares) - len(shrs) appShares = shrs } } shrs, isComplete = sharesParser.addShares(appShares) + // move to the next row if the blob is incomplete if !isComplete { appShares = nil break } - + // otherwise construct blob blob, err := sharesParser.parse() if err != nil { return nil, nil, err diff --git a/blob/service_test.go b/blob/service_test.go index 46c8794682..b1d9c78998 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -9,15 +9,16 @@ import ( "testing" "time" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/pkg/shares" - "github.com/celestiaorg/go-header/store" ds "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest"