-
Notifications
You must be signed in to change notification settings - Fork 0
/
workers.go
141 lines (124 loc) · 3.5 KB
/
workers.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
package bitswap
import (
"time"
inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect"
process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
)
func (bs *bitswap) startWorkers(px process.Process, ctx context.Context) {
// Start up a worker to handle block requests this node is making
px.Go(func(px process.Process) {
bs.clientWorker(ctx)
})
// Start up a worker to handle requests from other nodes for the data on this node
px.Go(func(px process.Process) {
bs.taskWorker(ctx)
})
// Start up a worker to manage periodically resending our wantlist out to peers
px.Go(func(px process.Process) {
bs.rebroadcastWorker(ctx)
})
// Spawn up multiple workers to handle incoming blocks
// consider increasing number if providing blocks bottlenecks
// file transfers
for i := 0; i < provideWorkers; i++ {
px.Go(func(px process.Process) {
bs.provideWorker(ctx)
})
}
}
func (bs *bitswap) taskWorker(ctx context.Context) {
defer log.Info("bitswap task worker shutting down...")
for {
select {
case nextEnvelope := <-bs.engine.Outbox():
select {
case envelope, ok := <-nextEnvelope:
if !ok {
continue
}
log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer)
bs.send(ctx, envelope.Peer, envelope.Message)
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}
func (bs *bitswap) provideWorker(ctx context.Context) {
for {
select {
case blk, ok := <-bs.newBlocks:
if !ok {
log.Debug("newBlocks channel closed")
return
}
ctx, _ := context.WithTimeout(ctx, provideTimeout)
err := bs.network.Provide(ctx, blk.Key())
if err != nil {
log.Error(err)
}
case <-ctx.Done():
return
}
}
}
// TODO ensure only one active request per key
func (bs *bitswap) clientWorker(parent context.Context) {
defer log.Info("bitswap client worker shutting down...")
for {
select {
case req := <-bs.batchRequests:
keys := req.keys
if len(keys) == 0 {
log.Warning("Received batch request for zero blocks")
continue
}
for i, k := range keys {
bs.wantlist.Add(k, kMaxPriority-i)
}
done := make(chan struct{})
go func() {
bs.wantNewBlocks(req.ctx, keys)
close(done)
}()
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
child, _ := context.WithTimeout(req.ctx, providerRequestTimeout)
providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest)
err := bs.sendWantlistToPeers(req.ctx, providers)
if err != nil {
log.Debugf("error sending wantlist: %s", err)
}
// Wait for wantNewBlocks to finish
<-done
case <-parent.Done():
return
}
}
}
func (bs *bitswap) rebroadcastWorker(parent context.Context) {
ctx, cancel := context.WithCancel(parent)
defer cancel()
broadcastSignal := time.After(rebroadcastDelay.Get())
for {
select {
case <-time.Tick(10 * time.Second):
n := bs.wantlist.Len()
if n > 0 {
log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist")
}
case <-broadcastSignal: // resend unfulfilled wantlist keys
entries := bs.wantlist.Entries()
if len(entries) > 0 {
bs.sendWantlistToProviders(ctx, entries)
}
broadcastSignal = time.After(rebroadcastDelay.Get())
case <-parent.Done():
return
}
}
}