-
Notifications
You must be signed in to change notification settings - Fork 199
/
hashValidatorShuffler.go
792 lines (661 loc) · 26.4 KB
/
hashValidatorShuffler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
package nodesCoordinator
import (
"bytes"
"fmt"
"sort"
"sync"
"github.com/ElrondNetwork/elrond-go-core/core"
"github.com/ElrondNetwork/elrond-go-core/core/atomic"
"github.com/ElrondNetwork/elrond-go-core/hashing/sha256"
"github.com/ElrondNetwork/elrond-go/config"
)
var _ NodesShuffler = (*randHashShuffler)(nil)
// NodesShufflerArgs defines the arguments required to create a nodes shuffler
type NodesShufflerArgs struct {
NodesShard uint32
NodesMeta uint32
Hysteresis float32
Adaptivity bool
ShuffleBetweenShards bool
MaxNodesEnableConfig []config.MaxNodesChangeConfig
BalanceWaitingListsEnableEpoch uint32
WaitingListFixEnableEpoch uint32
}
type shuffleNodesArg struct {
eligible map[uint32][]Validator
waiting map[uint32][]Validator
unstakeLeaving []Validator
additionalLeaving []Validator
newNodes []Validator
randomness []byte
distributor ValidatorsDistributor
nodesMeta uint32
nodesPerShard uint32
nbShards uint32
maxNodesToSwapPerShard uint32
flagBalanceWaitingLists bool
flagWaitingListFix bool
}
// TODO: Decide if transaction load statistics will be used for limiting the number of shards
type randHashShuffler struct {
// TODO: remove the references to this constant and the distributor
// when reinitialization of node in new shard is implemented
shuffleBetweenShards bool
adaptivity bool
nodesShard uint32
nodesMeta uint32
shardHysteresis uint32
metaHysteresis uint32
activeNodesConfig config.MaxNodesChangeConfig
availableNodesConfigs []config.MaxNodesChangeConfig
mutShufflerParams sync.RWMutex
validatorDistributor ValidatorsDistributor
balanceWaitingListsEnableEpoch uint32
flagBalanceWaitingLists atomic.Flag
waitingListFixEnableEpoch uint32
flagWaitingListFix atomic.Flag
}
// NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given
// random number to do the shuffling
func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, error) {
if args == nil {
return nil, ErrNilNodeShufflerArguments
}
var configs []config.MaxNodesChangeConfig
log.Debug("hashValidatorShuffler: enable epoch for max nodes change", "epoch", args.MaxNodesEnableConfig)
log.Debug("hashValidatorShuffler: enable epoch for balance waiting lists", "epoch", args.BalanceWaitingListsEnableEpoch)
if args.MaxNodesEnableConfig != nil {
configs = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig))
copy(configs, args.MaxNodesEnableConfig)
}
log.Debug("Shuffler created", "shuffleBetweenShards", args.ShuffleBetweenShards)
rxs := &randHashShuffler{
shuffleBetweenShards: args.ShuffleBetweenShards,
availableNodesConfigs: configs,
balanceWaitingListsEnableEpoch: args.BalanceWaitingListsEnableEpoch,
waitingListFixEnableEpoch: args.WaitingListFixEnableEpoch,
}
log.Debug("randHashShuffler: enable epoch for balance waiting list", "epoch", rxs.balanceWaitingListsEnableEpoch)
log.Debug("randHashShuffler: enable epoch for waiting waiting list", "epoch", rxs.waitingListFixEnableEpoch)
rxs.UpdateParams(args.NodesShard, args.NodesMeta, args.Hysteresis, args.Adaptivity)
if rxs.shuffleBetweenShards {
rxs.validatorDistributor = &CrossShardValidatorDistributor{}
} else {
rxs.validatorDistributor = &IntraShardValidatorDistributor{}
}
rxs.sortConfigs()
return rxs, nil
}
// UpdateParams updates the shuffler parameters
// Should be called when new params are agreed through governance
func (rhs *randHashShuffler) UpdateParams(
nodesShard uint32,
nodesMeta uint32,
hysteresis float32,
adaptivity bool,
) {
// TODO: are there constraints we want to enforce? e.g min/max hysteresis
shardHysteresis := uint32(float32(nodesShard) * hysteresis)
metaHysteresis := uint32(float32(nodesMeta) * hysteresis)
rhs.mutShufflerParams.Lock()
rhs.shardHysteresis = shardHysteresis
rhs.metaHysteresis = metaHysteresis
rhs.nodesShard = nodesShard
rhs.nodesMeta = nodesMeta
rhs.adaptivity = adaptivity
rhs.mutShufflerParams.Unlock()
}
// UpdateNodeLists shuffles the nodes and returns the lists with the new nodes configuration
// The function needs to ensure that:
// 1. Old eligible nodes list will have up to shuffleOutThreshold percent nodes shuffled out from each shard
// 2. The leaving nodes are checked against the eligible nodes and waiting nodes and removed if present from the
// pools and leaving nodes list (if remaining nodes can still sustain the shard)
// 3. shuffledOutNodes = oldEligibleNodes + waitingListNodes - minNbNodesPerShard (for each shard)
// 4. Old waiting nodes list for each shard will be added to the remaining eligible nodes list
// 5. The new nodes are equally distributed among the existing shards into waiting lists
// 6. The shuffled out nodes are distributed among the existing shards into waiting lists.
// We may have three situations:
// a) In case (shuffled out nodes + new nodes) > (nbShards * perShardHysteresis + minNodesPerShard) then
// we need to prepare for a split event, so a higher percentage of nodes need to be directed to the shard
// that will be split.
// b) In case (shuffled out nodes + new nodes) < (nbShards * perShardHysteresis) then we can immediately
// execute the shard merge
// c) No change in the number of shards then nothing extra needs to be done
func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNodes, error) {
rhs.UpdateShufflerConfig(args.Epoch)
eligibleAfterReshard := copyValidatorMap(args.Eligible)
waitingAfterReshard := copyValidatorMap(args.Waiting)
args.AdditionalLeaving = removeDupplicates(args.UnStakeLeaving, args.AdditionalLeaving)
totalLeavingNum := len(args.AdditionalLeaving) + len(args.UnStakeLeaving)
newNbShards := rhs.computeNewShards(
args.Eligible,
args.Waiting,
len(args.NewNodes),
totalLeavingNum,
args.NbShards,
)
rhs.mutShufflerParams.RLock()
canSplit := rhs.adaptivity && newNbShards > args.NbShards
canMerge := rhs.adaptivity && newNbShards < args.NbShards
nodesPerShard := rhs.nodesShard
nodesMeta := rhs.nodesMeta
rhs.mutShufflerParams.RUnlock()
if canSplit {
eligibleAfterReshard, waitingAfterReshard = rhs.splitShards(args.Eligible, args.Waiting, newNbShards)
}
if canMerge {
eligibleAfterReshard, waitingAfterReshard = rhs.mergeShards(args.Eligible, args.Waiting, newNbShards)
}
return shuffleNodes(shuffleNodesArg{
eligible: eligibleAfterReshard,
waiting: waitingAfterReshard,
unstakeLeaving: args.UnStakeLeaving,
additionalLeaving: args.AdditionalLeaving,
newNodes: args.NewNodes,
randomness: args.Rand,
nodesMeta: nodesMeta,
nodesPerShard: nodesPerShard,
nbShards: args.NbShards,
distributor: rhs.validatorDistributor,
maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard,
flagBalanceWaitingLists: rhs.flagBalanceWaitingLists.IsSet(),
flagWaitingListFix: rhs.flagWaitingListFix.IsSet(),
})
}
func removeDupplicates(unstake []Validator, additionalLeaving []Validator) []Validator {
additionalCopy := make([]Validator, 0, len(additionalLeaving))
additionalCopy = append(additionalCopy, additionalLeaving...)
for _, unstakeValidator := range unstake {
for i := len(additionalCopy) - 1; i >= 0; i-- {
if bytes.Equal(unstakeValidator.PubKey(), additionalCopy[i].PubKey()) {
additionalCopy = removeValidatorFromList(additionalCopy, i)
}
}
}
return additionalCopy
}
func removeNodesFromMap(
existingNodes map[uint32][]Validator,
leavingNodes []Validator,
numToRemove map[uint32]int,
) (map[uint32][]Validator, []Validator) {
sortedShardIds := sortKeys(existingNodes)
numRemoved := 0
for _, shardId := range sortedShardIds {
numToRemoveOnShard := numToRemove[shardId]
leavingNodes, numRemoved = removeNodesFromShard(existingNodes, leavingNodes, shardId, numToRemoveOnShard)
numToRemove[shardId] -= numRemoved
}
return existingNodes, leavingNodes
}
func removeNodesFromShard(existingNodes map[uint32][]Validator, leavingNodes []Validator, shard uint32, nbToRemove int) ([]Validator, int) {
if len(leavingNodes) < nbToRemove {
nbToRemove = len(leavingNodes)
}
vList, removedNodes := removeValidatorsFromList(existingNodes[shard], leavingNodes, nbToRemove)
leavingNodes, _ = removeValidatorsFromList(leavingNodes, removedNodes, len(removedNodes))
existingNodes[shard] = vList
return leavingNodes, len(removedNodes)
}
// IsInterfaceNil verifies if the underlying object is nil
func (rhs *randHashShuffler) IsInterfaceNil() bool {
return rhs == nil
}
func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) {
allLeaving := append(arg.unstakeLeaving, arg.additionalLeaving...)
waitingCopy := copyValidatorMap(arg.waiting)
eligibleCopy := copyValidatorMap(arg.eligible)
createListsForAllShards(waitingCopy, arg.nbShards)
numToRemove, err := computeNumToRemove(arg)
if err != nil {
return nil, err
}
for i, toRemove := range numToRemove {
if toRemove > int(arg.maxNodesToSwapPerShard) {
numToRemove[i] = int(arg.maxNodesToSwapPerShard)
}
}
remainingUnstakeLeaving, _ := removeLeavingNodesNotExistingInEligibleOrWaiting(arg.unstakeLeaving, waitingCopy, eligibleCopy)
remainingAdditionalLeaving, _ := removeLeavingNodesNotExistingInEligibleOrWaiting(arg.additionalLeaving, waitingCopy, eligibleCopy)
newEligible, newWaiting, stillRemainingUnstakeLeaving := removeLeavingNodesFromValidatorMaps(
eligibleCopy,
waitingCopy,
numToRemove,
remainingUnstakeLeaving,
int(arg.nodesMeta),
int(arg.nodesPerShard),
arg.flagWaitingListFix)
newEligible, newWaiting, stillRemainingAdditionalLeaving := removeLeavingNodesFromValidatorMaps(
newEligible,
newWaiting,
numToRemove,
remainingAdditionalLeaving,
int(arg.nodesMeta),
int(arg.nodesPerShard),
arg.flagWaitingListFix)
stillRemainingInLeaving := append(stillRemainingUnstakeLeaving, stillRemainingAdditionalLeaving...)
shuffledOutMap, newEligible := shuffleOutNodes(newEligible, numToRemove, arg.randomness)
err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard)
if err != nil {
log.Warn("moveNodesToMap failed", "error", err)
}
err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false)
if err != nil {
log.Warn("distributeValidators newNodes failed", "error", err)
}
err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists)
if err != nil {
log.Warn("distributeValidators shuffledOut failed", "error", err)
}
actualLeaving, _ := removeValidatorsFromList(allLeaving, stillRemainingInLeaving, len(stillRemainingInLeaving))
return &ResUpdateNodes{
Eligible: newEligible,
Waiting: newWaiting,
Leaving: actualLeaving,
StillRemaining: stillRemainingInLeaving,
}, nil
}
func createListsForAllShards(shardMap map[uint32][]Validator, shards uint32) {
for shardId := uint32(0); shardId < shards; shardId++ {
if shardMap[shardId] == nil {
shardMap[shardId] = make([]Validator, 0)
}
}
if shardMap[core.MetachainShardId] == nil {
shardMap[core.MetachainShardId] = make([]Validator, 0)
}
}
func computeNumToRemove(arg shuffleNodesArg) (map[uint32]int, error) {
numToRemove := make(map[uint32]int)
if arg.nbShards == 0 {
return numToRemove, nil
}
for shardId := uint32(0); shardId < arg.nbShards; shardId++ {
maxToRemove, err := computeNumToRemovePerShard(
len(arg.eligible[shardId]),
len(arg.waiting[shardId]),
int(arg.nodesPerShard))
if err != nil {
return nil, fmt.Errorf("%w shard=%v", err, shardId)
}
numToRemove[shardId] = maxToRemove
}
maxToRemove, err := computeNumToRemovePerShard(
len(arg.eligible[core.MetachainShardId]),
len(arg.waiting[core.MetachainShardId]),
int(arg.nodesMeta))
if err != nil {
return nil, fmt.Errorf("%w shard=%v", err, core.MetachainShardId)
}
numToRemove[core.MetachainShardId] = maxToRemove
return numToRemove, nil
}
func computeNumToRemovePerShard(numEligible int, numWaiting int, nodesPerShard int) (int, error) {
notEnoughValidatorsInShard := numEligible+numWaiting < nodesPerShard
if notEnoughValidatorsInShard {
return 0, ErrSmallShardEligibleListSize
}
return numEligible + numWaiting - nodesPerShard, nil
}
func removeLeavingNodesNotExistingInEligibleOrWaiting(
leavingValidators []Validator,
waiting map[uint32][]Validator,
eligible map[uint32][]Validator,
) ([]Validator, []Validator) {
notFoundValidators := make([]Validator, 0)
for _, v := range leavingValidators {
found, _ := searchInMap(waiting, v.PubKey())
if found {
continue
}
found, _ = searchInMap(eligible, v.PubKey())
if !found {
log.Debug("Leaving validator not found in waiting or eligible", "pk", v.PubKey())
notFoundValidators = append(notFoundValidators, v)
}
}
return removeValidatorsFromList(leavingValidators, notFoundValidators, len(notFoundValidators))
}
func removeLeavingNodesFromValidatorMaps(
eligible map[uint32][]Validator,
waiting map[uint32][]Validator,
numToRemove map[uint32]int,
leaving []Validator,
minNodesMeta int,
minNodesPerShard int,
waitingFixEnabled bool,
) (map[uint32][]Validator, map[uint32][]Validator, []Validator) {
stillRemainingInLeaving := make([]Validator, len(leaving))
copy(stillRemainingInLeaving, leaving)
if !waitingFixEnabled {
newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove)
newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove)
return newEligible, newWaiting, stillRemainingInLeaving
}
return removeLeavingNodes(eligible, waiting, numToRemove, stillRemainingInLeaving, minNodesMeta, minNodesPerShard)
}
func removeLeavingNodes(
eligible map[uint32][]Validator,
waiting map[uint32][]Validator,
numToRemove map[uint32]int,
stillRemainingInLeaving []Validator,
minNodesMeta int,
minNodesPerShard int,
) (map[uint32][]Validator, map[uint32][]Validator, []Validator) {
maxNumToRemoveFromWaiting := make(map[uint32]int)
for shardId := range eligible {
computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard)
maxNumToRemoveFromWaiting[shardId] = computedMinNumberOfNodes
}
newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, maxNumToRemoveFromWaiting)
for shardId, toRemove := range numToRemove {
computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard)
if toRemove > computedMinNumberOfNodes {
numToRemove[shardId] = computedMinNumberOfNodes
}
}
newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove)
return newEligible, newWaiting, stillRemainingInLeaving
}
func computeMinNumberOfNodes(eligible map[uint32][]Validator, waiting map[uint32][]Validator, shardId uint32, minNodesMeta int, minNodesPerShard int) int {
minimumNumberOfNodes := minNodesPerShard
if shardId == core.MetachainShardId {
minimumNumberOfNodes = minNodesMeta
}
computedMinNumberOfNodes := len(eligible[shardId]) + len(waiting[shardId]) - minimumNumberOfNodes
if computedMinNumberOfNodes < 0 {
computedMinNumberOfNodes = 0
}
return computedMinNumberOfNodes
}
// computeNewShards determines the new number of shards based on the number of nodes in the network
func (rhs *randHashShuffler) computeNewShards(
eligible map[uint32][]Validator,
waiting map[uint32][]Validator,
numNewNodes int,
numLeavingNodes int,
nbShards uint32,
) uint32 {
nbEligible := 0
nbWaiting := 0
for shard := range eligible {
nbEligible += len(eligible[shard])
nbWaiting += len(waiting[shard])
}
nodesNewEpoch := uint32(nbEligible + nbWaiting + numNewNodes - numLeavingNodes)
rhs.mutShufflerParams.RLock()
maxNodesMeta := rhs.nodesMeta + rhs.metaHysteresis
maxNodesShard := rhs.nodesShard + rhs.shardHysteresis
nodesForSplit := (nbShards+1)*maxNodesShard + maxNodesMeta
nodesForMerge := nbShards*rhs.nodesShard + rhs.nodesMeta
rhs.mutShufflerParams.RUnlock()
nbShardsNew := nbShards
if nodesNewEpoch > nodesForSplit {
nbNodesWithoutMaxMeta := nodesNewEpoch - maxNodesMeta
nbShardsNew = nbNodesWithoutMaxMeta / maxNodesShard
return nbShardsNew
}
if nodesNewEpoch < nodesForMerge {
return nbShardsNew - 1
}
return nbShardsNew
}
// shuffleOutNodes shuffles the list of eligible validators in each shard and returns the map of shuffled out
// validators
func shuffleOutNodes(
eligible map[uint32][]Validator,
numToShuffle map[uint32]int,
randomness []byte,
) (map[uint32][]Validator, map[uint32][]Validator) {
shuffledOutMap := make(map[uint32][]Validator)
newEligible := make(map[uint32][]Validator)
var shardShuffledOut []Validator
sortedShardIds := sortKeys(eligible)
for _, shardId := range sortedShardIds {
validators := eligible[shardId]
shardShuffledOut, validators = shuffleOutShard(validators, numToShuffle[shardId], randomness)
shuffledOutMap[shardId] = shardShuffledOut
newEligible[shardId], _ = removeValidatorsFromList(validators, shardShuffledOut, len(shardShuffledOut))
}
return shuffledOutMap, newEligible
}
// shuffleOutShard selects the validators to be shuffled out from a shard
func shuffleOutShard(
validators []Validator,
validatorsToSelect int,
randomness []byte,
) ([]Validator, []Validator) {
if len(validators) < validatorsToSelect {
validatorsToSelect = len(validators)
}
shardShuffledEligible := shuffleList(validators, randomness)
shardShuffledOut := shardShuffledEligible[:validatorsToSelect]
remainingEligible := shardShuffledEligible[validatorsToSelect:]
return shardShuffledOut, remainingEligible
}
// shuffleList returns a shuffled list of validators.
// The shuffling is done by hash-ing the randomness concatenated with the
// public keys of validators and sorting the validators depending on
// the hash result.
func shuffleList(validators []Validator, randomness []byte) []Validator {
keys := make([]string, len(validators))
mapValidators := make(map[string]Validator)
var concat []byte
hasher := sha256.NewSha256()
for i, v := range validators {
concat = append(v.PubKey(), randomness...)
keys[i] = string(hasher.Compute(string(concat)))
mapValidators[keys[i]] = v
}
sort.Strings(keys)
result := make([]Validator, len(validators))
for i := 0; i < len(validators); i++ {
result[i] = mapValidators[keys[i]]
}
return result
}
func removeValidatorsFromList(
validatorList []Validator,
validatorsToRemove []Validator,
maxToRemove int,
) ([]Validator, []Validator) {
resultedList := make([]Validator, 0)
resultedList = append(resultedList, validatorList...)
removed := make([]Validator, 0)
for _, valToRemove := range validatorsToRemove {
if len(removed) == maxToRemove {
break
}
for i := len(resultedList) - 1; i >= 0; i-- {
val := resultedList[i]
if bytes.Equal(val.PubKey(), valToRemove.PubKey()) {
resultedList = removeValidatorFromList(resultedList, i)
removed = append(removed, val)
break
}
}
}
return resultedList, removed
}
// removeValidatorFromList replaces the element at given index with the last element in the slice and returns a slice
// with a decremented length.The order in the list is important as long as it is kept the same for all validators,
// so not critical to maintain the original order inside the list, as that would be slower.
//
// Attention: The slice given as parameter will have its element on position index swapped with the last element
func removeValidatorFromList(validatorList []Validator, index int) []Validator {
indexNotOK := index > len(validatorList)-1 || index < 0
if indexNotOK {
return validatorList
}
validatorList[index] = validatorList[len(validatorList)-1]
return validatorList[:len(validatorList)-1]
}
func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator {
indexNotOK := index > len(validatorList)-1 || index < 0
if indexNotOK {
return validatorList
}
return append(validatorList[:index], validatorList[index+1:]...)
}
// splitShards prepares for the shards split, or if already prepared does the split returning the resulting
// shards configuration for eligible and waiting lists
func (rhs *randHashShuffler) splitShards(
eligible map[uint32][]Validator,
waiting map[uint32][]Validator,
_ uint32,
) (map[uint32][]Validator, map[uint32][]Validator) {
log.Error(ErrNotImplemented.Error())
// TODO: do the split
return copyValidatorMap(eligible), copyValidatorMap(waiting)
}
// mergeShards merges the required shards, returning the resulting shards configuration for eligible and waiting lists
func (rhs *randHashShuffler) mergeShards(
eligible map[uint32][]Validator,
waiting map[uint32][]Validator,
_ uint32,
) (map[uint32][]Validator, map[uint32][]Validator) {
log.Error(ErrNotImplemented.Error())
// TODO: do the merge
return copyValidatorMap(eligible), copyValidatorMap(waiting)
}
// copyValidatorMap creates a copy for the Validators map, creating copies for each of the lists for each shard
func copyValidatorMap(validatorsMap map[uint32][]Validator) map[uint32][]Validator {
result := make(map[uint32][]Validator)
for shardId, validators := range validatorsMap {
elems := make([]Validator, 0)
result[shardId] = append(elems, validators...)
}
return result
}
// moveNodesToMap moves the validators in the source list to the corresponding destination list
func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Validator) error {
if destination == nil {
return ErrNilOrEmptyDestinationForDistribute
}
for shardId, validators := range source {
destination[shardId] = append(destination[shardId], validators...)
source[shardId] = make([]Validator, 0)
}
return nil
}
// moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list
// but adding just enough nodes so that at most the number of nodes is kept in the destination list
// The parameter maxNodesToMove is a limiting factor and should limit the number of nodes
func moveMaxNumNodesToMap(
destination map[uint32][]Validator,
source map[uint32][]Validator,
numMeta uint32,
numShard uint32,
) error {
if destination == nil {
return ErrNilOrEmptyDestinationForDistribute
}
for shardId, validators := range source {
maxNodes := numShard
if shardId == core.MetachainShardId {
maxNodes = numMeta
}
numNeededNodes := computeNeededNodes(destination[shardId], source[shardId], maxNodes)
destination[shardId] = append(destination[shardId], validators[0:numNeededNodes]...)
source[shardId] = validators[numNeededNodes:]
}
return nil
}
func computeNeededNodes(destination []Validator, source []Validator, maxNumNodes uint32) uint32 {
numNeededNodes := uint32(0)
numCurrentNodes := uint32(len(destination))
numSourceNodes := uint32(len(source))
if maxNumNodes > numCurrentNodes {
numNeededNodes = maxNumNodes - numCurrentNodes
}
if numSourceNodes < numNeededNodes {
return numSourceNodes
}
return numNeededNodes
}
// distributeNewNodes distributes a list of validators to the given validators map
func distributeValidators(destLists map[uint32][]Validator, validators []Validator, randomness []byte, balanced bool) error {
if len(destLists) == 0 {
return ErrNilOrEmptyDestinationForDistribute
}
// if there was a split, or a merge, eligible map should already have a different nb of keys (shards)
shuffledValidators := shuffleList(validators, randomness)
var shardId uint32
sortedShardIds := sortKeys(destLists)
destLength := uint32(len(sortedShardIds))
if balanced {
shuffledValidators = equalizeValidatorsLists(destLists, shuffledValidators)
}
for i, v := range shuffledValidators {
shardId = sortedShardIds[uint32(i)%destLength]
destLists[shardId] = append(destLists[shardId], v)
}
return nil
}
func equalizeValidatorsLists(destLists map[uint32][]Validator, validators []Validator) []Validator {
log.Debug("equalizeValidatorsLists")
maxListSize := getMaxListSize(destLists)
indexValidators := 0
remainingValidatorsNumber := len(validators)
sortedShardIds := sortKeys(destLists)
for _, shardID := range sortedShardIds {
shardList := destLists[shardID]
if len(shardList) < maxListSize {
toMove := maxListSize - len(shardList)
if toMove > remainingValidatorsNumber {
toMove = remainingValidatorsNumber
}
destLists[shardID] = append(destLists[shardID], validators[indexValidators:indexValidators+toMove]...)
remainingValidatorsNumber -= toMove
indexValidators += toMove
}
}
return validators[indexValidators : indexValidators+remainingValidatorsNumber]
}
func getMaxListSize(lists map[uint32][]Validator) int {
var maxSize int
for _, list := range lists {
if maxSize < len(list) {
maxSize = len(list)
}
}
return maxSize
}
func sortKeys(nodes map[uint32][]Validator) []uint32 {
keys := make([]uint32, 0, len(nodes))
for k := range nodes {
keys = append(keys, k)
}
sort.SliceStable(keys, func(i, j int) bool {
return keys[i] < keys[j]
})
return keys
}
// UpdateShufflerConfig updates the shuffler config according to the current epoch.
func (rhs *randHashShuffler) UpdateShufflerConfig(epoch uint32) {
rhs.mutShufflerParams.Lock()
defer rhs.mutShufflerParams.Unlock()
rhs.activeNodesConfig.NodesToShufflePerShard = rhs.nodesShard
for _, maxNodesConfig := range rhs.availableNodesConfigs {
if epoch >= maxNodesConfig.EpochEnable {
rhs.activeNodesConfig = maxNodesConfig
}
}
log.Debug("randHashShuffler: UpdateShufflerConfig",
"epoch", epoch,
"maxNumNodes", rhs.activeNodesConfig.MaxNumNodes,
"epochEnable", rhs.activeNodesConfig.EpochEnable,
"maxNodesToShufflePerShard", rhs.activeNodesConfig.NodesToShufflePerShard,
)
rhs.flagBalanceWaitingLists.SetValue(epoch >= rhs.balanceWaitingListsEnableEpoch)
log.Debug("balanced waiting lists", "enabled", rhs.flagBalanceWaitingLists.IsSet())
rhs.flagWaitingListFix.SetValue(epoch >= rhs.waitingListFixEnableEpoch)
log.Debug("waiting list fix", "enabled", rhs.flagWaitingListFix.IsSet())
}
func (rhs *randHashShuffler) sortConfigs() {
rhs.mutShufflerParams.Lock()
sort.Slice(rhs.availableNodesConfigs, func(i, j int) bool {
return rhs.availableNodesConfigs[i].EpochEnable < rhs.availableNodesConfigs[j].EpochEnable
})
rhs.mutShufflerParams.Unlock()
}