forked from hyperledger/fabric
-
Notifications
You must be signed in to change notification settings - Fork 0
/
state.go
830 lines (692 loc) · 27.1 KB
/
state.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package state
import (
"bytes"
"sync"
"sync/atomic"
"time"
pb "github.com/golang/protobuf/proto"
vsccErrors "github.com/hyperledger/fabric/common/errors"
"github.com/hyperledger/fabric/gossip/api"
"github.com/hyperledger/fabric/gossip/comm"
common2 "github.com/hyperledger/fabric/gossip/common"
"github.com/hyperledger/fabric/gossip/discovery"
"github.com/hyperledger/fabric/gossip/metrics"
"github.com/hyperledger/fabric/gossip/util"
"github.com/hyperledger/fabric/protos/common"
proto "github.com/hyperledger/fabric/protos/gossip"
"github.com/hyperledger/fabric/protos/ledger/rwset"
"github.com/hyperledger/fabric/protos/transientstore"
"github.com/pkg/errors"
)
// GossipStateProvider is the interface to acquire sequences of the ledger blocks
// capable to full fill missing blocks by running state replication and
// sending request to get missing block to other nodes
type GossipStateProvider interface {
AddPayload(payload *proto.Payload) error
// Stop terminates state transfer object
Stop()
}
const (
DefAntiEntropyInterval = 10 * time.Second
DefAntiEntropyStateResponseTimeout = 3 * time.Second
DefAntiEntropyBatchSize = 10
DefChannelBufferSize = 100
DefAntiEntropyMaxRetries = 3
DefMaxBlockDistance = 100
Blocking = true
NonBlocking = false
enqueueRetryInterval = time.Millisecond * 100
)
// Configuration keeps state transfer configuration parameters
type Configuration struct {
AntiEntropyInterval time.Duration
AntiEntropyStateResponseTimeout time.Duration
AntiEntropyBatchSize uint64
MaxBlockDistance int
AntiEntropyMaxRetries int
ChannelBufferSize int
EnableStateTransfer bool
BlockingMode bool
}
// GossipAdapter defines gossip/communication required interface for state provider
type GossipAdapter interface {
// Send sends a message to remote peers
Send(msg *proto.GossipMessage, peers ...*comm.RemotePeer)
// Accept returns a dedicated read-only channel for messages sent by other nodes that match a certain predicate.
// If passThrough is false, the messages are processed by the gossip layer beforehand.
// If passThrough is true, the gossip layer doesn't intervene and the messages
// can be used to send a reply back to the sender
Accept(acceptor common2.MessageAcceptor, passThrough bool) (<-chan *proto.GossipMessage, <-chan proto.ReceivedMessage)
// UpdateLedgerHeight updates the ledger height the peer
// publishes to other peers in the channel
UpdateLedgerHeight(height uint64, chainID common2.ChainID)
// PeersOfChannel returns the NetworkMembers considered alive
// and also subscribed to the channel given
PeersOfChannel(common2.ChainID) []discovery.NetworkMember
}
// MCSAdapter adapter of message crypto service interface to bound
// specific APIs required by state transfer service
type MCSAdapter interface {
// VerifyBlock returns nil if the block is properly signed, and the claimed seqNum is the
// sequence number that the block's header contains.
// else returns error
VerifyBlock(chainID common2.ChainID, seqNum uint64, signedBlock []byte) error
// VerifyByChannel checks that signature is a valid signature of message
// under a peer's verification key, but also in the context of a specific channel.
// If the verification succeeded, Verify returns nil meaning no error occurred.
// If peerIdentity is nil, then the verification fails.
VerifyByChannel(chainID common2.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error
}
// ledgerResources defines abilities that the ledger provides
type ledgerResources interface {
// StoreBlock deliver new block with underlined private data
// returns missing transaction ids
StoreBlock(block *common.Block, data util.PvtDataCollections) error
// StorePvtData used to persist private date into transient store
StorePvtData(txid string, privData *transientstore.TxPvtReadWriteSetWithConfigInfo, blckHeight uint64) error
// GetPvtDataAndBlockByNum get block by number and returns also all related private data
// the order of private data in slice of PvtDataCollections doesn't imply the order of
// transactions in the block related to these private data, to get the correct placement
// need to read TxPvtData.SeqInBlock field
GetPvtDataAndBlockByNum(seqNum uint64, peerAuthInfo common.SignedData) (*common.Block, util.PvtDataCollections, error)
// Get recent block sequence number
LedgerHeight() (uint64, error)
// Close ledgerResources
Close()
}
// ServicesMediator aggregated adapter to compound all mediator
// required by state transfer into single struct
type ServicesMediator struct {
GossipAdapter
MCSAdapter
}
// GossipStateProviderImpl the implementation of the GossipStateProvider interface
// the struct to handle in memory sliding window of
// new ledger block to be acquired by hyper ledger
type GossipStateProviderImpl struct {
// Chain id
chainID string
mediator *ServicesMediator
// Channel to read gossip messages from
gossipChan <-chan *proto.GossipMessage
commChan <-chan proto.ReceivedMessage
// Queue of payloads which wasn't acquired yet
payloads PayloadsBuffer
ledger ledgerResources
stateResponseCh chan proto.ReceivedMessage
stateRequestCh chan proto.ReceivedMessage
stopCh chan struct{}
done sync.WaitGroup
once sync.Once
stateTransferActive int32
requestValidator *stateRequestValidator
config *Configuration
stateMetrics *metrics.StateMetrics
}
var logger = util.GetLogger(util.StateLogger, "")
// stateRequestValidator facilitates validation of the state request messages
type stateRequestValidator struct {
}
// validate checks for RemoteStateRequest message validity
func (v *stateRequestValidator) validate(request *proto.RemoteStateRequest, batchSize uint64) error {
if request.StartSeqNum > request.EndSeqNum {
return errors.Errorf("Invalid sequence interval [%d...%d).", request.StartSeqNum, request.EndSeqNum)
}
if request.EndSeqNum > batchSize+request.StartSeqNum {
return errors.Errorf("Requesting blocks range [%d-%d) greater than configured allowed"+
" (%d) batching size for anti-entropy.", request.StartSeqNum, request.EndSeqNum, batchSize)
}
return nil
}
// NewGossipStateProvider creates state provider with coordinator instance
// to orchestrate arrival of private rwsets and blocks before committing them into the ledger.
func NewGossipStateProvider(chainID string, services *ServicesMediator, ledger ledgerResources, stateMetrics *metrics.StateMetrics, config *Configuration) GossipStateProvider {
gossipChan, _ := services.Accept(func(message interface{}) bool {
// Get only data messages
return message.(*proto.GossipMessage).IsDataMsg() &&
bytes.Equal(message.(*proto.GossipMessage).Channel, []byte(chainID))
}, false)
remoteStateMsgFilter := func(message interface{}) bool {
receivedMsg := message.(proto.ReceivedMessage)
msg := receivedMsg.GetGossipMessage()
if !(msg.IsRemoteStateMessage() || msg.GetPrivateData() != nil) {
return false
}
// Ensure we deal only with messages that belong to this channel
if !bytes.Equal(msg.Channel, []byte(chainID)) {
return false
}
connInfo := receivedMsg.GetConnectionInfo()
authErr := services.VerifyByChannel(msg.Channel, connInfo.Identity, connInfo.Auth.Signature, connInfo.Auth.SignedData)
if authErr != nil {
logger.Warning("Got unauthorized request from", string(connInfo.Identity))
return false
}
return true
}
// Filter message which are only relevant for nodeMetastate transfer
_, commChan := services.Accept(remoteStateMsgFilter, true)
height, err := ledger.LedgerHeight()
if height == 0 {
// Panic here since this is an indication of invalid situation which should not happen in normal
// code path.
logger.Panic("Committer height cannot be zero, ledger should include at least one block (genesis).")
}
if err != nil {
logger.Error("Could not read ledger info to obtain current ledger height due to: ", errors.WithStack(err))
// Exiting as without ledger it will be impossible
// to deliver new blocks
return nil
}
s := &GossipStateProviderImpl{
// MessageCryptoService
mediator: services,
// Chain ID
chainID: chainID,
// Channel to read new messages from
gossipChan: gossipChan,
// Channel to read direct messages from other peers
commChan: commChan,
// Create a queue for payloads, wrapped in a metrics buffer
payloads: &metricsBuffer{
PayloadsBuffer: NewPayloadsBuffer(height),
sizeMetrics: stateMetrics.PayloadBufferSize,
chainID: chainID,
},
ledger: ledger,
stateResponseCh: make(chan proto.ReceivedMessage, config.ChannelBufferSize),
stateRequestCh: make(chan proto.ReceivedMessage, config.ChannelBufferSize),
stopCh: make(chan struct{}, 1),
stateTransferActive: 0,
once: sync.Once{},
requestValidator: &stateRequestValidator{},
config: config,
stateMetrics: stateMetrics,
}
logger.Infof("Updating metadata information for channel %s, "+
"current ledger sequence is at = %d, next expected block is = %d", chainID, height-1, s.payloads.Next())
logger.Debug("Updating gossip ledger height to", height)
services.UpdateLedgerHeight(height, common2.ChainID(s.chainID))
s.done.Add(4)
// Listen for incoming communication
go s.listen()
// Deliver in order messages into the incoming channel
go s.deliverPayloads()
if s.config.EnableStateTransfer {
// Execute anti entropy to fill missing gaps
go s.antiEntropy()
}
// Taking care of state request messages
go s.processStateRequests()
return s
}
func (s *GossipStateProviderImpl) listen() {
defer s.done.Done()
for {
select {
case msg := <-s.gossipChan:
logger.Debug("Received new message via gossip channel")
go s.queueNewMessage(msg)
case msg := <-s.commChan:
logger.Debug("Dispatching a message", msg)
go s.dispatch(msg)
case <-s.stopCh:
s.stopCh <- struct{}{}
logger.Debug("Stop listening for new messages")
return
}
}
}
func (s *GossipStateProviderImpl) dispatch(msg proto.ReceivedMessage) {
// Check type of the message
if msg.GetGossipMessage().IsRemoteStateMessage() {
logger.Debug("Handling direct state transfer message")
// Got state transfer request response
s.directMessage(msg)
} else if msg.GetGossipMessage().GetPrivateData() != nil {
logger.Debug("Handling private data collection message")
// Handling private data replication message
s.privateDataMessage(msg)
}
}
func (s *GossipStateProviderImpl) privateDataMessage(msg proto.ReceivedMessage) {
if !bytes.Equal(msg.GetGossipMessage().Channel, []byte(s.chainID)) {
logger.Warning("Received state transfer request for channel",
string(msg.GetGossipMessage().Channel), "while expecting channel", s.chainID, "skipping request...")
return
}
gossipMsg := msg.GetGossipMessage()
pvtDataMsg := gossipMsg.GetPrivateData()
if pvtDataMsg.Payload == nil {
logger.Warning("Malformed private data message, no payload provided")
return
}
collectionName := pvtDataMsg.Payload.CollectionName
txID := pvtDataMsg.Payload.TxId
pvtRwSet := pvtDataMsg.Payload.PrivateRwset
if len(pvtRwSet) == 0 {
logger.Warning("Malformed private data message, no rwset provided, collection name = ", collectionName)
return
}
txPvtRwSet := &rwset.TxPvtReadWriteSet{
DataModel: rwset.TxReadWriteSet_KV,
NsPvtRwset: []*rwset.NsPvtReadWriteSet{{
Namespace: pvtDataMsg.Payload.Namespace,
CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{{
CollectionName: collectionName,
Rwset: pvtRwSet,
}}},
},
}
txPvtRwSetWithConfig := &transientstore.TxPvtReadWriteSetWithConfigInfo{
PvtRwset: txPvtRwSet,
CollectionConfigs: map[string]*common.CollectionConfigPackage{
pvtDataMsg.Payload.Namespace: pvtDataMsg.Payload.CollectionConfigs,
},
}
if err := s.ledger.StorePvtData(txID, txPvtRwSetWithConfig, pvtDataMsg.Payload.PrivateSimHeight); err != nil {
logger.Errorf("Wasn't able to persist private data for collection %s, due to %s", collectionName, err)
msg.Ack(err) // Sending NACK to indicate failure of storing collection
}
msg.Ack(nil)
logger.Debug("Private data for collection", collectionName, "has been stored")
}
func (s *GossipStateProviderImpl) directMessage(msg proto.ReceivedMessage) {
logger.Debug("[ENTER] -> directMessage")
defer logger.Debug("[EXIT] -> directMessage")
if msg == nil {
logger.Error("Got nil message via end-to-end channel, should not happen!")
return
}
if !bytes.Equal(msg.GetGossipMessage().Channel, []byte(s.chainID)) {
logger.Warning("Received state transfer request for channel",
string(msg.GetGossipMessage().Channel), "while expecting channel", s.chainID, "skipping request...")
return
}
incoming := msg.GetGossipMessage()
if incoming.GetStateRequest() != nil {
if len(s.stateRequestCh) < s.config.ChannelBufferSize {
// Forward state request to the channel, if there are too
// many message of state request ignore to avoid flooding.
s.stateRequestCh <- msg
}
} else if incoming.GetStateResponse() != nil {
// If no state transfer procedure activate there is
// no reason to process the message
if atomic.LoadInt32(&s.stateTransferActive) == 1 {
// Send signal of state response message
s.stateResponseCh <- msg
}
}
}
func (s *GossipStateProviderImpl) processStateRequests() {
defer s.done.Done()
for {
select {
case msg := <-s.stateRequestCh:
s.handleStateRequest(msg)
case <-s.stopCh:
s.stopCh <- struct{}{}
return
}
}
}
// handleStateRequest handles state request message, validate batch size, reads current leader state to
// obtain required blocks, builds response message and send it back
func (s *GossipStateProviderImpl) handleStateRequest(msg proto.ReceivedMessage) {
if msg == nil {
return
}
request := msg.GetGossipMessage().GetStateRequest()
if err := s.requestValidator.validate(request, s.config.AntiEntropyBatchSize); err != nil {
logger.Errorf("State request validation failed, %s. Ignoring request...", err)
return
}
currentHeight, err := s.ledger.LedgerHeight()
if err != nil {
logger.Errorf("Cannot access to current ledger height, due to %+v", errors.WithStack(err))
return
}
if currentHeight < request.EndSeqNum {
logger.Warningf("Received state request to transfer blocks with sequence numbers higher [%d...%d] "+
"than available in ledger (%d)", request.StartSeqNum, request.StartSeqNum, currentHeight)
}
endSeqNum := min(currentHeight, request.EndSeqNum)
response := &proto.RemoteStateResponse{Payloads: make([]*proto.Payload, 0)}
for seqNum := request.StartSeqNum; seqNum <= endSeqNum; seqNum++ {
logger.Debug("Reading block ", seqNum, " with private data from the coordinator service")
connInfo := msg.GetConnectionInfo()
peerAuthInfo := common.SignedData{
Data: connInfo.Auth.SignedData,
Signature: connInfo.Auth.Signature,
Identity: connInfo.Identity,
}
block, pvtData, err := s.ledger.GetPvtDataAndBlockByNum(seqNum, peerAuthInfo)
if err != nil {
logger.Errorf("cannot read block number %d from ledger, because %+v, skipping...", seqNum, err)
continue
}
if block == nil {
logger.Errorf("Wasn't able to read block with sequence number %d from ledger, skipping....", seqNum)
continue
}
blockBytes, err := pb.Marshal(block)
if err != nil {
logger.Errorf("Could not marshal block: %+v", errors.WithStack(err))
continue
}
var pvtBytes [][]byte
if pvtData != nil {
// Marshal private data
pvtBytes, err = pvtData.Marshal()
if err != nil {
logger.Errorf("Failed to marshal private rwset for block %d due to %+v", seqNum, errors.WithStack(err))
continue
}
}
// Appending result to the response
response.Payloads = append(response.Payloads, &proto.Payload{
SeqNum: seqNum,
Data: blockBytes,
PrivateData: pvtBytes,
})
}
// Sending back response with missing blocks
msg.Respond(&proto.GossipMessage{
// Copy nonce field from the request, so it will be possible to match response
Nonce: msg.GetGossipMessage().Nonce,
Tag: proto.GossipMessage_CHAN_OR_ORG,
Channel: []byte(s.chainID),
Content: &proto.GossipMessage_StateResponse{StateResponse: response},
})
}
func (s *GossipStateProviderImpl) handleStateResponse(msg proto.ReceivedMessage) (uint64, error) {
max := uint64(0)
// Send signal that response for given nonce has been received
response := msg.GetGossipMessage().GetStateResponse()
// Extract payloads, verify and push into buffer
if len(response.GetPayloads()) == 0 {
return uint64(0), errors.New("Received state transfer response without payload")
}
for _, payload := range response.GetPayloads() {
logger.Debugf("Received payload with sequence number %d.", payload.SeqNum)
if err := s.mediator.VerifyBlock(common2.ChainID(s.chainID), payload.SeqNum, payload.Data); err != nil {
err = errors.WithStack(err)
logger.Warningf("Error verifying block with sequence number %d, due to %+v", payload.SeqNum, err)
return uint64(0), err
}
if max < payload.SeqNum {
max = payload.SeqNum
}
err := s.addPayload(payload, Blocking)
if err != nil {
logger.Warningf("Block [%d] received from block transfer wasn't added to payload buffer: %v", payload.SeqNum, err)
}
}
return max, nil
}
// Stop function sends halting signal to all go routines
func (s *GossipStateProviderImpl) Stop() {
// Make sure stop won't be executed twice
// and stop channel won't be used again
s.once.Do(func() {
s.stopCh <- struct{}{}
// Make sure all go-routines has finished
s.done.Wait()
// Close all resources
s.ledger.Close()
close(s.stateRequestCh)
close(s.stateResponseCh)
close(s.stopCh)
})
}
// queueNewMessage makes new message notification/handler
func (s *GossipStateProviderImpl) queueNewMessage(msg *proto.GossipMessage) {
if !bytes.Equal(msg.Channel, []byte(s.chainID)) {
logger.Warning("Received enqueue for channel",
string(msg.Channel), "while expecting channel", s.chainID, "ignoring enqueue")
return
}
dataMsg := msg.GetDataMsg()
if dataMsg != nil {
if err := s.addPayload(dataMsg.GetPayload(), NonBlocking); err != nil {
logger.Warningf("Block [%d] received from gossip wasn't added to payload buffer: %v", dataMsg.Payload.SeqNum, err)
return
}
} else {
logger.Debug("Gossip message received is not of data message type, usually this should not happen.")
}
}
func (s *GossipStateProviderImpl) deliverPayloads() {
defer s.done.Done()
for {
select {
// Wait for notification that next seq has arrived
case <-s.payloads.Ready():
logger.Debugf("[%s] Ready to transfer payloads (blocks) to the ledger, next block number is = [%d]", s.chainID, s.payloads.Next())
// Collect all subsequent payloads
for payload := s.payloads.Pop(); payload != nil; payload = s.payloads.Pop() {
rawBlock := &common.Block{}
if err := pb.Unmarshal(payload.Data, rawBlock); err != nil {
logger.Errorf("Error getting block with seqNum = %d due to (%+v)...dropping block", payload.SeqNum, errors.WithStack(err))
continue
}
if rawBlock.Data == nil || rawBlock.Header == nil {
logger.Errorf("Block with claimed sequence %d has no header (%v) or data (%v)",
payload.SeqNum, rawBlock.Header, rawBlock.Data)
continue
}
logger.Debugf("[%s] Transferring block [%d] with %d transaction(s) to the ledger", s.chainID, payload.SeqNum, len(rawBlock.Data.Data))
// Read all private data into slice
var p util.PvtDataCollections
if payload.PrivateData != nil {
err := p.Unmarshal(payload.PrivateData)
if err != nil {
logger.Errorf("Wasn't able to unmarshal private data for block seqNum = %d due to (%+v)...dropping block", payload.SeqNum, errors.WithStack(err))
continue
}
}
if err := s.commitBlock(rawBlock, p); err != nil {
if executionErr, isExecutionErr := err.(*vsccErrors.VSCCExecutionFailureError); isExecutionErr {
logger.Errorf("Failed executing VSCC due to %v. Aborting chain processing", executionErr)
return
}
logger.Panicf("Cannot commit block to the ledger due to %+v", errors.WithStack(err))
}
}
case <-s.stopCh:
s.stopCh <- struct{}{}
logger.Debug("State provider has been stopped, finishing to push new blocks.")
return
}
}
}
func (s *GossipStateProviderImpl) antiEntropy() {
defer s.done.Done()
defer logger.Debug("State Provider stopped, stopping anti entropy procedure.")
for {
select {
case <-s.stopCh:
s.stopCh <- struct{}{}
return
case <-time.After(s.config.AntiEntropyInterval):
ourHeight, err := s.ledger.LedgerHeight()
if err != nil {
// Unable to read from ledger continue to the next round
logger.Errorf("Cannot obtain ledger height, due to %+v", errors.WithStack(err))
continue
}
if ourHeight == 0 {
logger.Error("Ledger reported block height of 0 but this should be impossible")
continue
}
maxHeight := s.maxAvailableLedgerHeight()
if ourHeight >= maxHeight {
continue
}
s.requestBlocksInRange(uint64(ourHeight), uint64(maxHeight)-1)
}
}
}
// maxAvailableLedgerHeight iterates over all available peers and checks advertised meta state to
// find maximum available ledger height across peers
func (s *GossipStateProviderImpl) maxAvailableLedgerHeight() uint64 {
max := uint64(0)
for _, p := range s.mediator.PeersOfChannel(common2.ChainID(s.chainID)) {
if p.Properties == nil {
logger.Debug("Peer", p.PreferredEndpoint(), "doesn't have properties, skipping it")
continue
}
peerHeight := p.Properties.LedgerHeight
if max < peerHeight {
max = peerHeight
}
}
return max
}
// requestBlocksInRange capable to acquire blocks with sequence
// numbers in the range [start...end).
func (s *GossipStateProviderImpl) requestBlocksInRange(start uint64, end uint64) {
atomic.StoreInt32(&s.stateTransferActive, 1)
defer atomic.StoreInt32(&s.stateTransferActive, 0)
for prev := start; prev <= end; {
next := min(end, prev+s.config.AntiEntropyBatchSize)
gossipMsg := s.stateRequestMessage(prev, next)
responseReceived := false
tryCounts := 0
for !responseReceived {
if tryCounts > s.config.AntiEntropyMaxRetries {
logger.Warningf("Wasn't able to get blocks in range [%d...%d), after %d retries",
prev, next, tryCounts)
return
}
// Select peers to ask for blocks
peer, err := s.selectPeerToRequestFrom(next)
if err != nil {
logger.Warningf("Cannot send state request for blocks in range [%d...%d), due to %+v",
prev, next, errors.WithStack(err))
return
}
logger.Debugf("State transfer, with peer %s, requesting blocks in range [%d...%d), "+
"for chainID %s", peer.Endpoint, prev, next, s.chainID)
s.mediator.Send(gossipMsg, peer)
tryCounts++
// Wait until timeout or response arrival
select {
case msg := <-s.stateResponseCh:
if msg.GetGossipMessage().Nonce != gossipMsg.Nonce {
continue
}
// Got corresponding response for state request, can continue
index, err := s.handleStateResponse(msg)
if err != nil {
logger.Warningf("Wasn't able to process state response for "+
"blocks [%d...%d], due to %+v", prev, next, errors.WithStack(err))
continue
}
prev = index + 1
responseReceived = true
case <-time.After(s.config.AntiEntropyStateResponseTimeout):
case <-s.stopCh:
s.stopCh <- struct{}{}
return
}
}
}
}
// stateRequestMessage generates state request message for given blocks in range [beginSeq...endSeq]
func (s *GossipStateProviderImpl) stateRequestMessage(beginSeq uint64, endSeq uint64) *proto.GossipMessage {
return &proto.GossipMessage{
Nonce: util.RandomUInt64(),
Tag: proto.GossipMessage_CHAN_OR_ORG,
Channel: []byte(s.chainID),
Content: &proto.GossipMessage_StateRequest{
StateRequest: &proto.RemoteStateRequest{
StartSeqNum: beginSeq,
EndSeqNum: endSeq,
},
},
}
}
// selectPeerToRequestFrom selects peer which has required blocks to ask missing blocks from
func (s *GossipStateProviderImpl) selectPeerToRequestFrom(height uint64) (*comm.RemotePeer, error) {
// Filter peers which posses required range of missing blocks
peers := s.filterPeers(s.hasRequiredHeight(height))
n := len(peers)
if n == 0 {
return nil, errors.New("there are no peers to ask for missing blocks from")
}
// Select peer to ask for blocks
return peers[util.RandomInt(n)], nil
}
// filterPeers returns list of peers which aligns the predicate provided
func (s *GossipStateProviderImpl) filterPeers(predicate func(peer discovery.NetworkMember) bool) []*comm.RemotePeer {
var peers []*comm.RemotePeer
for _, member := range s.mediator.PeersOfChannel(common2.ChainID(s.chainID)) {
if predicate(member) {
peers = append(peers, &comm.RemotePeer{Endpoint: member.PreferredEndpoint(), PKIID: member.PKIid})
}
}
return peers
}
// hasRequiredHeight returns predicate which is capable to filter peers with ledger height above than indicated
// by provided input parameter
func (s *GossipStateProviderImpl) hasRequiredHeight(height uint64) func(peer discovery.NetworkMember) bool {
return func(peer discovery.NetworkMember) bool {
if peer.Properties != nil {
return peer.Properties.LedgerHeight >= height
}
logger.Debug(peer.PreferredEndpoint(), "doesn't have properties")
return false
}
}
// AddPayload adds new payload into state.
func (s *GossipStateProviderImpl) AddPayload(payload *proto.Payload) error {
return s.addPayload(payload, s.config.BlockingMode)
}
// addPayload adds new payload into state. It may (or may not) block according to the
// given parameter. If it gets a block while in blocking mode - it would wait until
// the block is sent into the payloads buffer.
// Else - it may drop the block, if the payload buffer is too full.
func (s *GossipStateProviderImpl) addPayload(payload *proto.Payload, blockingMode bool) error {
if payload == nil {
return errors.New("Given payload is nil")
}
logger.Debugf("[%s] Adding payload to local buffer, blockNum = [%d]", s.chainID, payload.SeqNum)
height, err := s.ledger.LedgerHeight()
if err != nil {
return errors.Wrap(err, "Failed obtaining ledger height")
}
if !blockingMode && payload.SeqNum-height >= uint64(s.config.MaxBlockDistance) {
return errors.Errorf("Ledger height is at %d, cannot enqueue block with sequence of %d", height, payload.SeqNum)
}
for blockingMode && s.payloads.Size() > s.config.MaxBlockDistance*2 {
time.Sleep(enqueueRetryInterval)
}
s.payloads.Push(payload)
logger.Debugf("Blocks payloads buffer size for channel [%s] is %d blocks", s.chainID, s.payloads.Size())
return nil
}
func (s *GossipStateProviderImpl) commitBlock(block *common.Block, pvtData util.PvtDataCollections) error {
t1 := time.Now()
// Commit block with available private transactions
if err := s.ledger.StoreBlock(block, pvtData); err != nil {
logger.Errorf("Got error while committing(%+v)", errors.WithStack(err))
return err
}
sinceT1 := time.Since(t1)
s.stateMetrics.CommitDuration.With("channel", s.chainID).Observe(sinceT1.Seconds())
// Update ledger height
s.mediator.UpdateLedgerHeight(block.Header.Number+1, common2.ChainID(s.chainID))
logger.Debugf("[%s] Committed block [%d] with %d transaction(s)",
s.chainID, block.Header.Number, len(block.Data.Data))
s.stateMetrics.Height.With("channel", s.chainID).Set(float64(block.Header.Number + 1))
return nil
}
func min(a uint64, b uint64) uint64 {
return b ^ ((a ^ b) & (-(uint64(a-b) >> 63)))
}