-
Notifications
You must be signed in to change notification settings - Fork 901
/
availability.go
151 lines (131 loc) · 4.33 KB
/
availability.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
package light
import (
"context"
"errors"
"sync"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/autobatch"
"github.com/ipfs/go-datastore/namespace"
ipldFormat "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/celestiaorg/celestia-node/header"
"github.com/celestiaorg/celestia-node/share"
"github.com/celestiaorg/celestia-node/share/getters"
)
var (
log = logging.Logger("share/light")
cacheAvailabilityPrefix = datastore.NewKey("sampling_result")
writeBatchSize = 2048
)
// ShareAvailability implements share.Availability using Data Availability Sampling technique.
// It is light because it does not require the downloading of all the data to verify
// its availability. It is assumed that there are a lot of lightAvailability instances
// on the network doing sampling over the same Root to collectively verify its availability.
type ShareAvailability struct {
getter share.Getter
params Parameters
// TODO(@Wondertan): Once we come to parallelized DASer, this lock becomes a contention point
// Related to #483
// TODO: Striped locks? :D
dsLk sync.RWMutex
ds *autobatch.Datastore
}
// NewShareAvailability creates a new light Availability.
func NewShareAvailability(
getter share.Getter,
ds datastore.Batching,
opts ...Option,
) *ShareAvailability {
params := DefaultParameters()
ds = namespace.Wrap(ds, cacheAvailabilityPrefix)
autoDS := autobatch.NewAutoBatching(ds, writeBatchSize)
for _, opt := range opts {
opt(¶ms)
}
return &ShareAvailability{
getter: getter,
params: params,
ds: autoDS,
}
}
// SharesAvailable randomly samples `params.SampleAmount` amount of Shares committed to the given
// ExtendedHeader. This way SharesAvailable subjectively verifies that Shares are available.
func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error {
dah := header.DAH
// short-circuit if the given root is minimum DAH of an empty data square
if share.DataHash(dah.Hash()).IsEmptyRoot() {
return nil
}
// do not sample over Root that has already been sampled
key := rootKey(dah)
la.dsLk.RLock()
exists, err := la.ds.Has(ctx, key)
la.dsLk.RUnlock()
if err != nil || exists {
return err
}
log.Debugw("validate availability", "root", dah.String())
// We assume the caller of this method has already performed basic validation on the
// given dah/root. If for some reason this has not happened, the node should panic.
if err := dah.ValidateBasic(); err != nil {
log.Errorw("availability validation cannot be performed on a malformed DataAvailabilityHeader",
"err", err)
panic(err)
}
samples, err := SampleSquare(len(dah.RowRoots), int(la.params.SampleAmount))
if err != nil {
return err
}
// indicate to the share.Getter that a blockservice session should be created. This
// functionality is optional and must be supported by the used share.Getter.
ctx = getters.WithSession(ctx)
log.Debugw("starting sampling session", "root", dah.String())
errs := make(chan error, len(samples))
for _, s := range samples {
go func(s Sample) {
log.Debugw("fetching share", "root", dah.String(), "row", s.Row, "col", s.Col)
_, err := la.getter.GetShare(ctx, header, s.Row, s.Col)
if err != nil {
log.Debugw("error fetching share", "root", dah.String(), "row", s.Row, "col", s.Col)
}
// we don't really care about Share bodies at this point
// it also means we now saved the Share in local storage
select {
case errs <- err:
case <-ctx.Done():
}
}(s)
}
for range samples {
var err error
select {
case err = <-errs:
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
if errors.Is(err, context.Canceled) {
return err
}
log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error())
if ipldFormat.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) {
return share.ErrNotAvailable
}
return err
}
}
la.dsLk.Lock()
err = la.ds.Put(ctx, key, []byte{})
la.dsLk.Unlock()
if err != nil {
log.Errorw("storing root of successful SharesAvailable request to disk", "err", err)
}
return nil
}
func rootKey(root *share.Root) datastore.Key {
return datastore.NewKey(root.String())
}
// Close flushes all queued writes to disk.
func (la *ShareAvailability) Close(ctx context.Context) error {
return la.ds.Flush(ctx)
}