forked from ipfs/kubo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
handlers.go
239 lines (197 loc) · 7.49 KB
/
handlers.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
package dht
import (
"errors"
"fmt"
proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
key "github.com/ipfs/go-ipfs/blocks/key"
peer "github.com/ipfs/go-ipfs/p2p/peer"
pb "github.com/ipfs/go-ipfs/routing/dht/pb"
lgbl "github.com/ipfs/go-ipfs/util/eventlog/loggables"
)
// The number of closer peers to send on requests.
var CloserPeerCount = 4
// dhthandler specifies the signature of functions that handle DHT messages.
type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error)
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
switch t {
case pb.Message_GET_VALUE:
return dht.handleGetValue
case pb.Message_PUT_VALUE:
return dht.handlePutValue
case pb.Message_FIND_NODE:
return dht.handleFindPeer
case pb.Message_ADD_PROVIDER:
return dht.handleAddProvider
case pb.Message_GET_PROVIDERS:
return dht.handleGetProviders
case pb.Message_PING:
return dht.handlePing
default:
return nil
}
}
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
defer log.EventBegin(ctx, "handleGetValue", p).Done()
log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey())
// setup response
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
// first, is there even a key?
k := pmes.GetKey()
if k == "" {
return nil, errors.New("handleGetValue but no key was provided")
// TODO: send back an error response? could be bad, but the other node's hanging.
}
// let's first check if we have the value locally.
log.Debugf("%s handleGetValue looking into ds", dht.self)
dskey := key.Key(k).DsKey()
iVal, err := dht.datastore.Get(dskey)
log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
// if we got an unexpected error, bail.
if err != nil && err != ds.ErrNotFound {
return nil, err
}
// if we have the value, send it back
if err == nil {
log.Debugf("%s handleGetValue success!", dht.self)
byts, ok := iVal.([]byte)
if !ok {
return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
}
rec := new(pb.Record)
err := proto.Unmarshal(byts, rec)
if err != nil {
log.Debug("Failed to unmarshal dht record from datastore")
return nil, err
}
resp.Record = rec
}
// Find closest peer on given cluster to desired key and reply with that info
closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
if len(closer) > 0 {
closerinfos := peer.PeerInfos(dht.peerstore, closer)
for _, pi := range closerinfos {
log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID)
if len(pi.Addrs) < 1 {
log.Errorf(`no addresses on peer being sent!
[local:%s]
[sending:%s]
[remote:%s]`, dht.self, pi.ID, p)
}
}
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
}
return resp, nil
}
// Store a value in this peer local storage
func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
defer log.EventBegin(ctx, "handlePutValue", p).Done()
dskey := key.Key(pmes.GetKey()).DsKey()
if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil {
log.Debugf("Bad dht record in PUT from: %s. %s", key.Key(pmes.GetRecord().GetAuthor()), err)
return nil, err
}
data, err := proto.Marshal(pmes.GetRecord())
if err != nil {
return nil, err
}
err = dht.datastore.Put(dskey, data)
log.Debugf("%s handlePutValue %v", dht.self, dskey)
return pmes, err
}
func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
return pmes, nil
}
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
defer log.EventBegin(ctx, "handleFindPeer", p).Done()
resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
var closest []peer.ID
// if looking for self... special case where we send it on CloserPeers.
if peer.ID(pmes.GetKey()) == dht.self {
closest = []peer.ID{dht.self}
} else {
closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
}
if closest == nil {
log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
return resp, nil
}
var withAddresses []peer.PeerInfo
closestinfos := peer.PeerInfos(dht.peerstore, closest)
for _, pi := range closestinfos {
if len(pi.Addrs) > 0 {
withAddresses = append(withAddresses, pi)
log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
}
}
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
return resp, nil
}
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
lm := make(lgbl.DeferredMap)
lm["peer"] = func() interface{} { return p.Pretty() }
defer log.EventBegin(ctx, "handleGetProviders", lm).Done()
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
key := key.Key(pmes.GetKey())
lm["key"] = func() interface{} { return key.Pretty() }
// debug logging niceness.
reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key)
log.Debugf("%s begin", reqDesc)
defer log.Debugf("%s end", reqDesc)
// check if we have this value, to add ourselves as provider.
has, err := dht.datastore.Has(key.DsKey())
if err != nil && err != ds.ErrNotFound {
log.Debugf("unexpected datastore error: %v\n", err)
has = false
}
// setup providers
providers := dht.providers.GetProviders(ctx, key)
if has {
providers = append(providers, dht.self)
log.Debugf("%s have the value. added self as provider", reqDesc)
}
if providers != nil && len(providers) > 0 {
infos := peer.PeerInfos(dht.peerstore, providers)
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos)
}
// Also send closer peers.
closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
if closer != nil {
infos := peer.PeerInfos(dht.peerstore, closer)
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos)
}
return resp, nil
}
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
lm := make(lgbl.DeferredMap)
lm["peer"] = func() interface{} { return p.Pretty() }
defer log.EventBegin(ctx, "handleAddProvider", lm).Done()
key := key.Key(pmes.GetKey())
lm["key"] = func() interface{} { return key.Pretty() }
log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key)
// add provider should use the address given in the message
pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
for _, pi := range pinfos {
if pi.ID != p {
// we should ignore this provider reccord! not from originator.
// (we chould sign them and check signature later...)
log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
continue
}
if len(pi.Addrs) < 1 {
log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
continue
}
log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
if pi.ID != dht.self { // dont add own addrs.
// add the received addresses to our peerstore.
dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
}
dht.providers.AddProvider(ctx, key, p)
}
return nil, nil
}