-
Notifications
You must be signed in to change notification settings - Fork 5
/
util.go
158 lines (138 loc) · 5.32 KB
/
util.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
package dtsync
import (
"context"
"fmt"
"strings"
"time"
dt "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-data-transfer/v2/channelmonitor"
datatransfer "github.com/filecoin-project/go-data-transfer/v2/impl"
dtnetwork "github.com/filecoin-project/go-data-transfer/v2/network"
gstransport "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-graphsync"
gsimpl "github.com/ipfs/go-graphsync/impl"
gsnet "github.com/ipfs/go-graphsync/network"
"github.com/ipld/go-ipld-prime"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
)
// Time to wait for datatransfer to gracefully stop before canceling.
const datatransferStopTimeout = time.Minute
const (
defaultGsMaxInReq = 1024
defaultGsMaxOutReq = 1024
)
type dtCloseFunc func() error
// configureDataTransferForDagsync configures an existing data transfer
// instance to serve dagsync requests from given linksystem (publisher only).
func configureDataTransferForDagsync(ctx context.Context, dtManager dt.Manager, lsys ipld.LinkSystem, allowPeer func(peer.ID) bool) error {
err := registerVoucher(dtManager, allowPeer)
if err != nil {
return err
}
lsc := dagsyncStorageConfiguration{lsys}
if err = dtManager.RegisterTransportConfigurer(LegsVoucherType, lsc.configureTransport); err != nil {
return fmt.Errorf("failed to register datatransfer TransportConfigurer: %w", err)
}
return nil
}
type dagsyncStorageConfiguration struct {
linkSystem ipld.LinkSystem
}
func (lsc dagsyncStorageConfiguration) configureTransport(_ dt.ChannelID, _ dt.TypedVoucher) []dt.TransportOption {
return []dt.TransportOption{gstransport.UseStore(lsc.linkSystem)}
}
func registerVoucher(dtManager dt.Manager, allowPeer func(peer.ID) bool) error {
val := &dagsyncValidator{
allowPeer: allowPeer,
}
err := dtManager.RegisterVoucherType(LegsVoucherType, val)
if err != nil {
// This can happen if a host is both a publisher and a subscriber.
if strings.Contains(err.Error(), "identifier already registered: "+string(LegsVoucherType)) {
// Matching the error string is the best we can do until datatransfer exposes some handles
// to either check for types or re-register vouchers.
log.Warn("voucher type already registered; skipping datatrasfer voucher registration", "type", LegsVoucherType)
return nil
}
return fmt.Errorf("failed to register dagsync validator voucher type: %w", err)
}
return nil
}
func makeDataTransfer(host host.Host, ds datastore.Batching, lsys ipld.LinkSystem, allowPeer func(peer.ID) bool, gsMaxInReq, gsMaxOutReq uint64) (dt.Manager, graphsync.GraphExchange, dtCloseFunc, error) {
gsNet := gsnet.NewFromLibp2pHost(host)
ctx, cancel := context.WithCancel(context.Background())
if gsMaxInReq == 0 {
gsMaxInReq = defaultGsMaxInReq
}
if gsMaxOutReq == 0 {
gsMaxOutReq = defaultGsMaxOutReq
}
gs := gsimpl.New(ctx, gsNet, lsys, gsimpl.MaxInProgressIncomingRequests(gsMaxInReq), gsimpl.MaxInProgressOutgoingRequests(gsMaxOutReq))
dtNet := dtnetwork.NewFromLibp2pHost(host)
tp := gstransport.NewTransport(host.ID(), gs)
dtRestartConfig := datatransfer.ChannelRestartConfig(channelmonitor.Config{
AcceptTimeout: 30 * time.Second,
CompleteTimeout: time.Minute,
// When an error occurs, wait a little while until all related errors
// have fired before sending a restart message
RestartDebounce: 10 * time.Second,
// After sending a restart, wait at least this long before sending another
RestartBackoff: 30 * time.Second,
// After trying to restart this many times, give up and fail the transfer
MaxConsecutiveRestarts: 1,
})
dtManager, err := datatransfer.NewDataTransfer(ds, dtNet, tp, dtRestartConfig)
if err != nil {
cancel()
return nil, nil, nil, fmt.Errorf("failed to instantiate datatransfer: %w", err)
}
err = registerVoucher(dtManager, allowPeer)
if err != nil {
cancel()
return nil, nil, nil, fmt.Errorf("failed to register voucher: %w", err)
}
// Tell datatransfer to notify when ready.
dtReady := make(chan error)
dtManager.OnReady(func(e error) {
dtReady <- e
})
// Start datatransfer. The context passed in allows Start to be canceled
// if fsm migration takes too long. Timeout for dtManager.Start() is not
// handled here, so pass context.Background().
if err = dtManager.Start(ctx); err != nil {
cancel()
return nil, nil, nil, fmt.Errorf("failed to start datatransfer: %w", err)
}
log.Info("Started data transfer manager successfully.")
// Wait for datatransfer to be ready.
log.Info("Awaiting data transfer manager to become ready...")
err = <-dtReady
if err != nil {
log.Errorw("Failed while waiting for data transfer manager to become ready", "err", err)
cancel()
return nil, nil, nil, err
}
log.Info("Data transfer manager is ready.")
closeFunc := func() error {
errCh := make(chan error, 1)
stopCtx, stopCancel := context.WithTimeout(context.Background(), datatransferStopTimeout)
go func() {
errCh <- dtManager.Stop(stopCtx)
}()
var err error
select {
case err = <-errCh:
if err != nil {
err = fmt.Errorf("failed to stop datatransfer manager: %w", err)
}
case <-stopCtx.Done():
log.Errorw("Timeout waiting to stop datatransfer manager", "timeout", datatransferStopTimeout.String())
}
stopCancel()
cancel()
return err
}
return dtManager, gs, closeFunc, nil
}