Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(shwap): Integrate shrex into shwap #3554

Draft
wants to merge 2 commits into
base: shwap
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.26.0
go.opentelemetry.io/otel/trace v1.26.0
go.opentelemetry.io/proto/otlp v1.2.0
go.uber.org/atomic v1.11.0
go.uber.org/fx v1.22.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.24.0
Expand Down Expand Up @@ -324,7 +325,6 @@ require (
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/dig v1.17.1 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
Expand Down
13 changes: 13 additions & 0 deletions libs/utils/close.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package utils

import (
"io"

logging "github.com/ipfs/go-log/v2"
)

func CloseAndLog(log logging.StandardLogger, name string, closer io.Closer) {
if err := closer.Close(); err != nil {
log.Warnf("closing %s: %s", name, err)
}
}
4 changes: 2 additions & 2 deletions nodebuilder/blob/blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ type Module interface {
// If all blobs were found without any errors, the user will receive a list of blobs.
// If the BlobService couldn't find any blobs under the requested namespaces,
// the user will receive an empty list of blobs along with an empty error.
// If some of the requested namespaces were not found, the user will receive all the found blobs and an empty error.
// If there were internal errors during some of the requests,
// If some of the requested namespaces were not found, the user will receive all the found blobs
// and an empty error. If there were internal errors during some of the requests,
// the user will receive all found blobs along with a combined error message.
//
// All blobs will preserve the order of the namespaces that were requested.
Expand Down
2 changes: 1 addition & 1 deletion nodebuilder/share/opts.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ import (

"github.com/celestiaorg/celestia-node/share/eds"
"github.com/celestiaorg/celestia-node/share/getters"
disc "github.com/celestiaorg/celestia-node/share/p2p/discovery"
"github.com/celestiaorg/celestia-node/share/p2p/peers"
"github.com/celestiaorg/celestia-node/share/p2p/shrexeds"
"github.com/celestiaorg/celestia-node/share/p2p/shrexnd"
disc "github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery"
)

// WithPeerManagerMetrics is a utility function to turn on peer manager metrics and that is
Expand Down
2 changes: 1 addition & 1 deletion share/availability/full/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
"github.com/celestiaorg/celestia-node/share/eds"
"github.com/celestiaorg/celestia-node/share/getters"
"github.com/celestiaorg/celestia-node/share/ipld"
"github.com/celestiaorg/celestia-node/share/p2p/discovery"
"github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery"
)

// GetterWithRandSquare provides a share.Getter filled with 'n' NMT
Expand Down
2 changes: 2 additions & 0 deletions share/new_eds/accessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import (
type Accessor interface {
// Size returns square size of the Accessor.
Size(ctx context.Context) int
// DataHash returns data hash of the Accessor.
DataHash(ctx context.Context) (share.DataHash, error)
// Sample returns share and corresponding proof for row and column indices. Implementation can
// choose which axis to use for proof. Chosen axis for proof should be indicated in the returned
// Sample.
Expand Down
8 changes: 8 additions & 0 deletions share/new_eds/close_once.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,14 @@ func (c *closeOnce) Size(ctx context.Context) int {
return c.f.Size(ctx)
}

// DataHash returns data hash of the Accessor.
func (c *closeOnce) DataHash(ctx context.Context) (share.DataHash, error) {
if c.closed.Load() {
return nil, errAccessorClosed
}
return c.f.DataHash(ctx)
}

func (c *closeOnce) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) {
if c.closed.Load() {
return shwap.Sample{}, errAccessorClosed
Expand Down
4 changes: 4 additions & 0 deletions share/new_eds/close_once_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,10 @@ func (s *stubEdsAccessorCloser) Size(context.Context) int {
return 0
}

func (s *stubEdsAccessorCloser) DataHash(context.Context) (share.DataHash, error) {
return nil, nil
}

func (s *stubEdsAccessorCloser) Sample(context.Context, int, int) (shwap.Sample, error) {
return shwap.Sample{}, nil
}
Expand Down
5 changes: 5 additions & 0 deletions share/new_eds/proofs_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ func (c *proofsCache) Size(ctx context.Context) int {
return int(size)
}

// DataHash returns data hash of the Accessor.
func (c *proofsCache) DataHash(ctx context.Context) (share.DataHash, error) {
return c.inner.DataHash(ctx)
}

func (c *proofsCache) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) {
axisType, axisIdx, shrIdx := rsmt2d.Row, rowIdx, colIdx
ax, err := c.axisWithProofs(ctx, axisType, axisIdx)
Expand Down
7 changes: 7 additions & 0 deletions share/new_eds/rsmt2d.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"io"

"github.com/celestiaorg/celestia-app/pkg/da"
"github.com/celestiaorg/celestia-app/pkg/wrapper"
"github.com/celestiaorg/rsmt2d"

Expand All @@ -24,6 +25,12 @@ func (eds *Rsmt2D) Size(context.Context) int {
return int(eds.Width())
}

// DataHash returns data hash of the Accessor.
func (eds *Rsmt2D) DataHash(context.Context) (share.DataHash, error) {
dah, _ := da.NewDataAvailabilityHeader(eds.ExtendedDataSquare)
return dah.Hash(), nil
}

// Sample returns share and corresponding proof for row and column indices.
func (eds *Rsmt2D) Sample(
_ context.Context,
Expand Down
123 changes: 123 additions & 0 deletions share/shwap/namespace_data_id.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
package shwap

import (
"encoding/binary"
"fmt"

"github.com/celestiaorg/celestia-node/share"
)

// NamespaceDataIDSize defines the total size of a RowNamespaceDataID in bytes, combining the
// size of a RowID and the size of a Namespace.
const NamespaceDataIDSize = EdsIDSize + 4 + share.NamespaceSize

// RowNamespaceDataID uniquely identifies a piece of namespaced data within a row of an Extended
// Data Square (EDS).
type NamespaceDataID struct {
// Embedding EdsID to include the block height in RowID.
EdsID
// FromRow and ToRow specify the range of rows within the data square.
FromRowIndex, ToRowIndex int
// DataNamespace is a string representation of the namespace to facilitate comparisons.
DataNamespace share.Namespace
}

// NewNamespaceDataID creates a new RowNamespaceDataID with the specified parameters. It
// validates the RowNamespaceDataID against the provided Root before returning.
func NewNamespaceDataID(
height uint64,
fromRowIndex, toRowIndex int,
namespace share.Namespace,
edsSize int,
) (NamespaceDataID, error) {
ndid := NamespaceDataID{
EdsID: EdsID{
Height: height,
},
FromRowIndex: fromRowIndex,
ToRowIndex: toRowIndex,
DataNamespace: namespace,
}

if err := ndid.Verify(edsSize); err != nil {
return NamespaceDataID{}, err
}
return ndid, nil
}

// NamespaceDataIDFromBinary deserializes a RowNamespaceDataID from its binary form. It returns
// an error if the binary data's length does not match the expected size.
func NamespaceDataIDFromBinary(data []byte) (NamespaceDataID, error) {
if len(data) != NamespaceDataIDSize {
return NamespaceDataID{},
fmt.Errorf("invalid RowNamespaceDataID length: expected %d, got %d", RowNamespaceDataIDSize, len(data))
}

edsID, err := EdsIDFromBinary(data[:EdsIDSize])
if err != nil {
return NamespaceDataID{}, fmt.Errorf("error unmarshaling RowID: %w", err)
}

fromRowIndex := int(binary.BigEndian.Uint16(data[EdsIDSize:]))
toRowIndex := int(binary.BigEndian.Uint16(data[EdsIDSize+2:]))
ns := share.Namespace(data[EdsIDSize+4:])
if err := ns.ValidateForData(); err != nil {
return NamespaceDataID{}, fmt.Errorf("error validating DataNamespace: %w", err)
}

return NamespaceDataID{
EdsID: edsID,
FromRowIndex: fromRowIndex,
ToRowIndex: toRowIndex,
DataNamespace: ns,
}, nil
}

// MarshalBinary encodes RowNamespaceDataID into binary form.
// NOTE: Proto is avoided because
// * Its size is not deterministic which is required for IPLD.
// * No support for uint16
func (ndid NamespaceDataID) MarshalBinary() ([]byte, error) {
data := make([]byte, 0, NamespaceDataIDSize)
return ndid.appendTo(data), nil
}

// Verify checks the validity of RowNamespaceDataID's fields, including the RowID and the
// namespace.
func (ndid NamespaceDataID) Verify(edsSize int) error {
if ndid.FromRowIndex >= edsSize {
return fmt.Errorf("FromRowIndex: %w: %d >= %d", ErrOutOfBounds, ndid.FromRowIndex, edsSize)
}
if ndid.ToRowIndex >= edsSize {
return fmt.Errorf("ToRowIndex: %w: %d >= %d", ErrOutOfBounds, ndid.ToRowIndex, edsSize)
}
return ndid.Validate()
}

func (ndid NamespaceDataID) Validate() error {
if err := ndid.EdsID.Validate(); err != nil {
return fmt.Errorf("error validating RowID: %w", err)
}
if ndid.FromRowIndex > ndid.ToRowIndex {
return fmt.Errorf("%w: FromRowIndex %d is greater than ToRowIndex %d",
ErrInvalidShwapID, ndid.FromRowIndex, ndid.ToRowIndex)
}
if ndid.FromRowIndex < 0 {
return fmt.Errorf("%w: FromRowIndex %d", ErrInvalidShwapID, ndid.FromRowIndex)
}
if ndid.ToRowIndex < 0 {
return fmt.Errorf("%w: ToRowIndex %d", ErrInvalidShwapID, ndid.ToRowIndex)
}
if err := ndid.DataNamespace.ValidateForData(); err != nil {
return fmt.Errorf("%w: error validating DataNamespace: %w", ErrInvalidShwapID, err)
}
return nil
}

// appendTo helps in appending the binary form of DataNamespace to the serialized RowID data.
func (ndid NamespaceDataID) appendTo(data []byte) []byte {
data = ndid.EdsID.appendTo(data)
data = binary.BigEndian.AppendUint16(data, uint16(ndid.FromRowIndex))
data = binary.BigEndian.AppendUint16(data, uint16(ndid.ToRowIndex))
return append(data, ndid.DataNamespace...)
}
28 changes: 28 additions & 0 deletions share/shwap/namespace_data_id_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
package shwap

import (
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/celestiaorg/celestia-node/share/sharetest"
)

func TestNamespaceDataID(t *testing.T) {
odsSize := 4
ns := sharetest.RandV0Namespace()

id, err := NewNamespaceDataID(1, 1, 2, ns, odsSize*2)
require.NoError(t, err)

data, err := id.MarshalBinary()
require.NoError(t, err)

sidOut, err := NamespaceDataIDFromBinary(data)
require.NoError(t, err)
assert.EqualValues(t, id, sidOut)

err = sidOut.Verify(odsSize * 2)
require.NoError(t, err)
}
116 changes: 116 additions & 0 deletions share/shwap/p2p/discovery/backoff.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
package discovery

import (
"context"
"errors"
"sync"
"time"

"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/discovery/backoff"
)

const (
// gcInterval is a default period after which disconnected peers will be removed from cache
gcInterval = time.Minute
// connectTimeout is the timeout used for dialing peers and discovering peer addresses.
connectTimeout = time.Minute * 2
)

var (
defaultBackoffFactory = backoff.NewFixedBackoff(time.Minute * 10)
errBackoffNotEnded = errors.New("share/discovery: backoff period has not ended")
)

// backoffConnector wraps a libp2p.Host to establish a connection with peers
// with adding a delay for the next connection attempt.
type backoffConnector struct {
h host.Host
backoff backoff.BackoffFactory

cacheLk sync.Mutex
cacheData map[peer.ID]backoffData
}

// backoffData stores time when next connection attempt with the remote peer.
type backoffData struct {
nexttry time.Time
backoff backoff.BackoffStrategy
}

func newBackoffConnector(h host.Host, factory backoff.BackoffFactory) *backoffConnector {
return &backoffConnector{
h: h,
backoff: factory,
cacheData: make(map[peer.ID]backoffData),
}
}

// Connect puts peer to the backoffCache and tries to establish a connection with it.
func (b *backoffConnector) Connect(ctx context.Context, p peer.AddrInfo) error {
if b.HasBackoff(p.ID) {
return errBackoffNotEnded
}

ctx, cancel := context.WithTimeout(ctx, connectTimeout)
defer cancel()

err := b.h.Connect(ctx, p)
// we don't want to add backoff when the context is canceled.
if !errors.Is(err, context.Canceled) {
b.Backoff(p.ID)
}
return err
}

// Backoff adds or extends backoff delay for the peer.
func (b *backoffConnector) Backoff(p peer.ID) {
b.cacheLk.Lock()
defer b.cacheLk.Unlock()

data, ok := b.cacheData[p]
if !ok {
data = backoffData{}
data.backoff = b.backoff()
b.cacheData[p] = data
}

data.nexttry = time.Now().Add(data.backoff.Delay())
b.cacheData[p] = data
}

// HasBackoff checks if peer is in backoff.
func (b *backoffConnector) HasBackoff(p peer.ID) bool {
b.cacheLk.Lock()
cache, ok := b.cacheData[p]
b.cacheLk.Unlock()
return ok && time.Now().Before(cache.nexttry)
}

// GC is a perpetual GCing loop.
func (b *backoffConnector) GC(ctx context.Context) {
ticker := time.NewTicker(gcInterval)
defer ticker.Stop()

for {
select {
case <-ctx.Done():
return
case <-ticker.C:
b.cacheLk.Lock()
for id, cache := range b.cacheData {
if cache.nexttry.Before(time.Now()) {
delete(b.cacheData, id)
}
}
b.cacheLk.Unlock()
}
}
}

func (b *backoffConnector) Size() int {
b.cacheLk.Lock()
defer b.cacheLk.Unlock()
return len(b.cacheData)
}
Loading
Loading