Skip to content

Commit

Permalink
test: mock e2e tests
Browse files Browse the repository at this point in the history
  • Loading branch information
dndll committed Apr 16, 2024
1 parent f08b47f commit a59d5b4
Show file tree
Hide file tree
Showing 4 changed files with 224 additions and 94 deletions.
123 changes: 69 additions & 54 deletions dataavailability/near/near.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,70 +87,94 @@ type Sequence struct {
Batches [][]byte
}

func (s *Sequence) encode() (*sidecar.Blob, error) {
var buf bytes.Buffer
encoder := gob.NewEncoder(&buf)
err := encoder.Encode(s)
if err != nil {
return nil, fmt.Errorf("error encoding sequence for NEAR: %s", err)
}
blob := sidecar.Blob{
Data: buf.Bytes(),
}

return &blob, err
}

func decode(b *sidecar.Blob) (*Sequence, error) {
var s Sequence

buf := bytes.NewReader(b.Data)
codec := gob.NewDecoder(buf)
err := codec.Decode(&s)
if len(s.Batches) == 0 {
return nil, fmt.Errorf("sequence is empty")
}
return &s, err
}

func hexEncode(b [][]byte) string {
var str string = "0x"
var bytes []byte
for _, batch := range b {
str += fmt.Sprintf("%x", batch)
bytes = append(bytes, batch...)
}
return str
return common.Bytes2Hex(bytes)
}

func hexEncode2(b []byte) string {
return fmt.Sprintf("0x%x", b)
return common.Bytes2Hex(b)
}

const MaxBatchSize = 4 * 1024 * 1024 // Max batch size is 4MB

// PostSequence posts a sequence of batches to the Near blockchain.
// It takes a context and a slice of byte slices representing the batches data.
// It returns the transaction ID of the submitted sequence and any error encountered.
func (n *NearProtocolBackend) PostSequence(ctx context.Context, batches [][]byte) ([]byte, error) {
const maxBatchSize = 4 * 1024 * 1024 // Max batch size is 4MB
log.Debugf("submitting batches %s", hexEncode(batches))
// count the size of all batches, overflowing batches into multiple sequences
sequences := ChunkedSequences(batches)
log.Debugf("Submitting %d sequences", len(sequences))

var blobRefs []byte
// For each sequence weno
for _, seq := range sequences {
blob, err := seq.encode()
if err != nil {
return nil, err
}

blobRef, err := n.Client.SubmitBlob(*blob)
if err != nil {
return nil, fmt.Errorf("error submitting data to NEAR: %s", err)
}
log.Debugf("Blob ref: %s", hexEncode2(blobRef.Deref()))
blobRefs = append(blobRefs, blobRef.Deref()...)
}
return blobRefs, nil
}

func ChunkedSequences(batches [][]byte) []Sequence {
// count the size of all batches, overflowing batches into multiple sequences
var sequences []Sequence
sequences = append(sequences, Sequence{})

size := 0
seqIndex := 0
for _, batch := range batches {
if len(sequences) == 0 {
sequences = append(sequences, Sequence{})
}

size += len(batch)
if size > maxBatchSize {
size = len(batch)

if size > MaxBatchSize {
seqIndex++
size = len(batch)

var seq Sequence
seq.Batches = append(seq.Batches, batch)
sequences = append(sequences, seq)
} else {
sequences[seqIndex].Batches = append(sequences[seqIndex].Batches, batch)
}
log.Debugf("Sequence %s", hexEncode(sequences[seqIndex].Batches))
}
log.Debugf("Submitting %d sequences", len(sequences))

var blobRefs []byte
for _, seq := range sequences {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(seq)
if err != nil {
return nil, fmt.Errorf("error encoding sequence for NEAR: %s", err)
}

blob := sidecar.Blob{
Data: buf.Bytes(),
}
log.Debugf("Blob: %s", hexEncode2(blob.Data))

blobRef, err := n.Client.SubmitBlob(blob)
log.Debugf("Blob ref: %s", hexEncode2(blobRef.Deref()))
if err != nil {
return nil, fmt.Errorf("error submitting data to NEAR: %s", err)
}
blobRefs = append(blobRefs, blobRef.Deref()...)
}
return blobRefs, nil
return sequences
}

// GetSequence retrieves a sequence of batches from the Near blockchain.
Expand All @@ -159,41 +183,32 @@ func (n *NearProtocolBackend) PostSequence(ctx context.Context, batches [][]byte
func (n *NearProtocolBackend) GetSequence(ctx context.Context, batchHashes []common.Hash, dataAvailabilityMessage []byte) ([][]byte, error) {
var batchData [][]byte

log.Debugf("Retrieving %d batches from dataAvailabilityMessage %s", len(batchHashes), hexEncode2(dataAvailabilityMessage))
log.Debugf("Retrieving %d batches from dataAvailabilityMessage %s: %d", len(batchHashes), hexEncode2(dataAvailabilityMessage), len(dataAvailabilityMessage))

// FIXME: define the size of the ref in the library
// Chunk the da message into references
for _, ref := range chunks(dataAvailabilityMessage, 32) {
blobRef, err := sidecar.NewBlobRef(ref)
for _, blobRef := range Chunks(dataAvailabilityMessage, sidecar.EncodedBlobRefSize) {
blobRef, err := sidecar.NewBlobRef(blobRef)
if err != nil {
return nil, fmt.Errorf("error reading blob: %s", err)
return nil, fmt.Errorf("error reading blob reference: %s", err)
}

log.Debugf("Retrieving %s from %s", hexEncode2(blobRef.Deref()), n.host)
blob, err := n.Client.GetBlob(*blobRef)
if err != nil {
return nil, fmt.Errorf("error getting data from NEAR: %s", err)
return nil, fmt.Errorf("error fetching blob %s: %s", hexEncode2(blobRef.Deref()), err)
}
log.Debugf("Retrieved blob %s", hexEncode2(blob.Data))

buf := bytes.NewReader(blob.Data)
codec := gob.NewDecoder(buf)

var seq Sequence
err = codec.Decode(&seq)
seq, err := decode(blob)
if err != nil {
return nil, fmt.Errorf("error encoding sequence for NEAR: %s", err)
return nil, fmt.Errorf("error decoding sequence from NEAR: %s", err)
}
log.Debugf("Decoded sequence %s", seq)

batchData = append(batchData, seq.Batches...)
}

log.Debugf("Retrieved batches %s", batchData)
return batchData, nil
}

func chunks(message []byte, chunkSize int) [][]byte {
func Chunks(message []byte, chunkSize int) [][]byte {
var chunks [][]byte
for {
if len(message) == 0 {
Expand Down
Loading

0 comments on commit a59d5b4

Please sign in to comment.