forked from sei-protocol/sei-tendermint
-
Notifications
You must be signed in to change notification settings - Fork 0
/
reactor.go
318 lines (272 loc) · 10.2 KB
/
reactor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
package evidence
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
clist "github.com/anchorageoss/sei-tendermint/internal/libs/clist"
"github.com/anchorageoss/sei-tendermint/internal/p2p"
"github.com/anchorageoss/sei-tendermint/libs/log"
"github.com/anchorageoss/sei-tendermint/libs/service"
tmproto "github.com/anchorageoss/sei-tendermint/proto/tendermint/types"
"github.com/anchorageoss/sei-tendermint/types"
)
var _ service.Service = (*Reactor)(nil)
const (
EvidenceChannel = p2p.ChannelID(0x38)
maxMsgSize = 4194304 // 4MB TODO make it configurable
// broadcast all uncommitted evidence this often. This sets when the reactor
// goes back to the start of the list and begins sending the evidence again.
// Most evidence should be committed in the very next block that is why we wait
// just over the block production rate before sending evidence again.
broadcastEvidenceIntervalS = 10
)
// GetChannelDescriptor produces an instance of a descriptor for this
// package's required channels.
func GetChannelDescriptor() *p2p.ChannelDescriptor {
return &p2p.ChannelDescriptor{
ID: EvidenceChannel,
MessageType: new(tmproto.Evidence),
Priority: 6,
RecvMessageCapacity: maxMsgSize,
RecvBufferCapacity: 32,
Name: "evidence",
}
}
// Reactor handles evpool evidence broadcasting amongst peers.
type Reactor struct {
service.BaseService
logger log.Logger
evpool *Pool
peerEvents p2p.PeerEventSubscriber
mtx sync.Mutex
peerRoutines map[types.NodeID]context.CancelFunc
channel *p2p.Channel
}
// NewReactor returns a reference to a new evidence reactor, which implements the
// service.Service interface. It accepts a p2p Channel dedicated for handling
// envelopes with EvidenceList messages.
func NewReactor(
logger log.Logger,
peerEvents p2p.PeerEventSubscriber,
evpool *Pool,
) *Reactor {
r := &Reactor{
logger: logger,
evpool: evpool,
peerEvents: peerEvents,
peerRoutines: make(map[types.NodeID]context.CancelFunc),
}
r.BaseService = *service.NewBaseService(logger, "Evidence", r)
return r
}
func (r *Reactor) SetChannel(ch *p2p.Channel) {
r.channel = ch
}
// OnStart starts separate go routines for each p2p Channel and listens for
// envelopes on each. In addition, it also listens for peer updates and handles
// messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed. No error is returned.
func (r *Reactor) OnStart(ctx context.Context) error {
go r.processEvidenceCh(ctx, r.channel)
go r.processPeerUpdates(ctx, r.peerEvents(ctx), r.channel)
return nil
}
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit.
func (r *Reactor) OnStop() { r.evpool.Close() }
// handleEvidenceMessage handles envelopes sent from peers on the EvidenceChannel.
// It returns an error only if the Envelope.Message is unknown for this channel
// or if the given evidence is invalid. This should never be called outside of
// handleMessage.
func (r *Reactor) handleEvidenceMessage(ctx context.Context, envelope *p2p.Envelope) error {
logger := r.logger.With("peer", envelope.From)
switch msg := envelope.Message.(type) {
case *tmproto.Evidence:
// Process the evidence received from a peer
// Evidence is sent and received one by one
ev, err := types.EvidenceFromProto(msg)
if err != nil {
logger.Error("failed to convert evidence", "err", err)
return err
}
if err := r.evpool.AddEvidence(ctx, ev); err != nil {
// If we're given invalid evidence by the peer, notify the router that
// we should remove this peer by returning an error.
if _, ok := err.(*types.ErrInvalidEvidence); ok {
return err
}
}
default:
return fmt.Errorf("received unknown message: %T", msg)
}
return nil
}
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.logger.Error(
"recovering from processing message panic",
"err", err,
"stack", string(debug.Stack()),
)
}
}()
r.logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
switch envelope.ChannelID {
case EvidenceChannel:
err = r.handleEvidenceMessage(ctx, envelope)
default:
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope)
}
return
}
// processEvidenceCh implements a blocking event loop where we listen for p2p
// Envelope messages from the evidenceCh.
func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel) {
iter := evidenceCh.Receive(ctx)
for iter.Next(ctx) {
envelope := iter.Envelope()
if err := r.handleMessage(ctx, envelope); err != nil {
r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err)
if serr := evidenceCh.SendError(ctx, p2p.PeerError{
NodeID: envelope.From,
Err: err,
}); serr != nil {
return
}
}
}
}
// processPeerUpdate processes a PeerUpdate. For new or live peers it will check
// if an evidence broadcasting goroutine needs to be started. For down or
// removed peers, it will check if an evidence broadcasting goroutine
// exists and signal that it should exit.
//
// FIXME: The peer may be behind in which case it would simply ignore the
// evidence and treat it as invalid. This would cause the peer to disconnect.
// The peer may also receive the same piece of evidence multiple times if it
// connects/disconnects frequently from the broadcasting peer(s).
//
// REF: https://github.com/anchorageoss/sei-tendermint/issues/4727
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh *p2p.Channel) {
r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
r.mtx.Lock()
defer r.mtx.Unlock()
switch peerUpdate.Status {
case p2p.PeerStatusUp:
// Do not allow starting new evidence broadcast loops after reactor shutdown
// has been initiated. This can happen after we've manually closed all
// peer broadcast loops, but the router still sends in-flight peer updates.
if !r.IsRunning() {
return
}
// Check if we've already started a goroutine for this peer, if not we create
// a new done channel so we can explicitly close the goroutine if the peer
// is later removed, we increment the waitgroup so the reactor can stop
// safely, and finally start the goroutine to broadcast evidence to that peer.
_, ok := r.peerRoutines[peerUpdate.NodeID]
if !ok {
pctx, pcancel := context.WithCancel(ctx)
r.peerRoutines[peerUpdate.NodeID] = pcancel
go r.broadcastEvidenceLoop(pctx, peerUpdate.NodeID, evidenceCh)
}
case p2p.PeerStatusDown:
// Check if we've started an evidence broadcasting goroutine for this peer.
// If we have, we signal to terminate the goroutine via the channel's closure.
// This will internally decrement the peer waitgroup and remove the peer
// from the map of peer evidence broadcasting goroutines.
closer, ok := r.peerRoutines[peerUpdate.NodeID]
if ok {
closer()
}
}
}
// processPeerUpdates initiates a blocking process where we listen for and handle
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
// close the p2p PeerUpdatesCh gracefully.
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh *p2p.Channel) {
for {
select {
case peerUpdate := <-peerUpdates.Updates():
r.processPeerUpdate(ctx, peerUpdate, evidenceCh)
case <-ctx.Done():
return
}
}
}
// broadcastEvidenceLoop starts a blocking process that continuously reads pieces
// of evidence off of a linked-list and sends the evidence in a p2p Envelope to
// the given peer by ID. This should be invoked in a goroutine per unique peer
// ID via an appropriate PeerUpdate. The goroutine can be signaled to gracefully
// exit by either explicitly closing the provided doneCh or by the reactor
// signaling to stop.
//
// TODO: This should be refactored so that we do not blindly gossip evidence
// that the peer has already received or may not be ready for.
//
// REF: https://github.com/anchorageoss/sei-tendermint/issues/4727
func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh *p2p.Channel) {
var next *clist.CElement
defer func() {
r.mtx.Lock()
delete(r.peerRoutines, peerID)
r.mtx.Unlock()
if e := recover(); e != nil {
r.logger.Error(
"recovering from broadcasting evidence loop",
"err", e,
"stack", string(debug.Stack()),
)
}
}()
timer := time.NewTimer(0)
defer timer.Stop()
for {
// This happens because the CElement we were looking at got garbage
// collected (removed). That is, .NextWaitChan() returned nil. So we can go
// ahead and start from the beginning.
if next == nil {
select {
case <-r.evpool.EvidenceWaitChan(): // wait until next evidence is available
if next = r.evpool.EvidenceFront(); next == nil {
continue
}
case <-ctx.Done():
return
}
}
ev := next.Value.(types.Evidence)
evProto, err := types.EvidenceToProto(ev)
if err != nil {
panic(fmt.Errorf("failed to convert evidence: %w", err))
}
// Send the evidence to the corresponding peer. Note, the peer may be behind
// and thus would not be able to process the evidence correctly. Also, the
// peer may receive this piece of evidence multiple times if it added and
// removed frequently from the broadcasting peer.
if err := evidenceCh.Send(ctx, p2p.Envelope{
To: peerID,
Message: evProto,
}); err != nil {
return
}
r.logger.Debug("gossiped evidence to peer", "evidence", ev, "peer", peerID)
select {
case <-timer.C:
// start from the beginning after broadcastEvidenceIntervalS seconds
timer.Reset(time.Second * broadcastEvidenceIntervalS)
next = nil
case <-next.NextWaitChan():
next = next.Next()
timer.Stop()
case <-ctx.Done():
return
}
}
}