Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nodes coordinator with staking v4 #3883

Merged
Merged
Show file tree
Hide file tree
Changes from 24 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
479692d
FEAT: Add first version
mariusmihaic Mar 7, 2022
8c1ed21
FEAT: ihnc with auction
mariusmihaic Mar 7, 2022
40805f9
Merge branch 'move-waiting-list-from-staking' into EN-11664-nodes-coo…
mariusmihaic Mar 7, 2022
d87f063
FEAT: Use flag to save with auction list
mariusmihaic Mar 7, 2022
fe9db50
FEAT: Use interface instead of *NodesCoordinatorRegistry
mariusmihaic Mar 8, 2022
34b4f01
FIX: Build
mariusmihaic Mar 8, 2022
9664050
FIX: Build 2
mariusmihaic Mar 8, 2022
54087d9
FEAT: Refactor LoadState to use interface
mariusmihaic Mar 8, 2022
55e09b3
FEAT: Use proto structs
mariusmihaic Mar 8, 2022
337a353
FEAT: Add generated proto file
mariusmihaic Mar 8, 2022
6e7b730
FIX: Refactor code structure
mariusmihaic Mar 8, 2022
d6cf445
FIX: Remove SetEpochsConfig interface func
mariusmihaic Mar 8, 2022
e63f85b
FEAT: Extract common code to getMinAndLastEpoch
mariusmihaic Mar 8, 2022
82bf91e
FEAT: Add CreateNodesCoordinatorRegistry
mariusmihaic Mar 8, 2022
3ca3f89
FEAT: Use CreateNodesCoordinatorRegistry in nodesCoord
mariusmihaic Mar 9, 2022
be3bc70
Merge branch 'move-waiting-list-from-staking' into EN-11664-nodes-coo…
mariusmihaic Mar 9, 2022
b4993df
FIX: Use SelectedFromAuctionList instead of AuctionList
mariusmihaic Mar 10, 2022
e306d99
FEAT: Add tmp test
mariusmihaic Mar 10, 2022
0c6ae5e
FEAT: Add nodes coord tests
mariusmihaic Mar 11, 2022
9815093
FEAT: Add node shuffler tests
mariusmihaic Mar 11, 2022
0807341
FIX: Small test refactor
mariusmihaic Mar 11, 2022
4fddf7b
Merge branch 'move-waiting-list-from-staking' into EN-11664-nodes-coo…
mariusmihaic Mar 11, 2022
044e8e7
Merge branch 'move-waiting-list-from-staking' into EN-11664-nodes-coo…
mariusmihaic Mar 17, 2022
8dbcf97
FIX: Merge conflicts
mariusmihaic Mar 17, 2022
068c23a
FEAT: Create nodesCoordinatorRegistryFactory.go
mariusmihaic Mar 22, 2022
ccea211
FIX: Test
mariusmihaic Mar 22, 2022
04b6888
FIX: CreateNodesCoordinator
mariusmihaic Mar 22, 2022
eca5854
FIX: Review findings
mariusmihaic Mar 22, 2022
09d6a4a
Merge branch 'feat/liquid-staking' into EN-11664-nodes-coordinator-st…
mariusmihaic Mar 22, 2022
fb6a3b9
FIX: Tests
mariusmihaic Mar 23, 2022
9f32944
FEAT: Save shuffled out in auction list + test
mariusmihaic Mar 23, 2022
8f17265
FIX: Test
mariusmihaic Mar 23, 2022
d58e550
FIX: Another test + typo
mariusmihaic Mar 23, 2022
7dd0593
FIX: Findings + tests
mariusmihaic Mar 23, 2022
6092f80
FIX: Review findings
mariusmihaic Mar 24, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion epochStart/bootstrap/baseStorageHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*blo

func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry(
metaBlock data.HeaderHandler,
nodesConfig *nodesCoordinator.NodesCoordinatorRegistry,
nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler,
) ([]byte, error) {
key := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), metaBlock.GetPrevRandSeed()...)

Expand Down
14 changes: 6 additions & 8 deletions epochStart/bootstrap/fromLocalStorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package bootstrap

import (
"bytes"
"encoding/json"
"fmt"
"strconv"

Expand Down Expand Up @@ -192,19 +191,19 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) {

func (e *epochStartBootstrap) checkIfShuffledOut(
pubKey []byte,
nodesConfig *nodesCoordinator.NodesCoordinatorRegistry,
nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler,
) (uint32, bool) {
epochIDasString := fmt.Sprint(e.baseData.lastEpoch)
epochConfig := nodesConfig.EpochsConfig[epochIDasString]
epochConfig := nodesConfig.GetEpochsConfig()[epochIDasString]

newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.WaitingValidators)
newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetWaitingValidators())
if isWaitingForShard {
isShuffledOut := newShardId != e.baseData.shardId
e.nodeType = core.NodeTypeValidator
return newShardId, isShuffledOut
}

newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.EligibleValidators)
newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetEligibleValidators())
if isEligibleForShard {
isShuffledOut := newShardId != e.baseData.shardId
e.nodeType = core.NodeTypeValidator
Expand Down Expand Up @@ -245,7 +244,7 @@ func checkIfValidatorIsInList(
return false
}

func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *nodesCoordinator.NodesCoordinatorRegistry, error) {
func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, nodesCoordinator.NodesCoordinatorRegistryHandler, error) {
bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.coreComponentsHolder.InternalMarshalizer(), storer)
if err != nil {
return nil, nil, err
Expand All @@ -264,8 +263,7 @@ func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*boot
return nil, nil, err
}

config := &nodesCoordinator.NodesCoordinatorRegistry{}
err = json.Unmarshal(d, config)
config, err := nodesCoordinator.CreateNodesCoordinatorRegistry(e.coreComponentsHolder.InternalMarshalizer(), d)
if err != nil {
return nil, nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions epochStart/bootstrap/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (

// StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks
type StartOfEpochNodesConfigHandler interface {
NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, error)
NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, error)
IsInterfaceNil() bool
}

Expand All @@ -25,7 +25,7 @@ type EpochStartMetaBlockInterceptorProcessor interface {
// StartInEpochNodesCoordinator defines the methods to process and save nodesCoordinator information to storage
type StartInEpochNodesCoordinator interface {
EpochStartPrepare(metaHdr data.HeaderHandler, body data.BodyHandler)
NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry
NodesCoordinatorToRegistry() nodesCoordinator.NodesCoordinatorRegistryHandler
ShardIdForEpoch(epoch uint32) (uint32, error)
IsInterfaceNil() bool
}
Expand Down
6 changes: 3 additions & 3 deletions epochStart/bootstrap/process.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,15 @@ type Parameters struct {
Epoch uint32
SelfShardId uint32
NumOfShards uint32
NodesConfig *nodesCoordinator.NodesCoordinatorRegistry
NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler
}

// ComponentsNeededForBootstrap holds the components which need to be initialized from network
type ComponentsNeededForBootstrap struct {
EpochStartMetaBlock data.MetaHeaderHandler
PreviousEpochStart data.MetaHeaderHandler
ShardHeader data.HeaderHandler
NodesConfig *nodesCoordinator.NodesCoordinatorRegistry
NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler
Headers map[string]data.HeaderHandler
ShardCoordinator sharding.Coordinator
PendingMiniBlocks map[string]*block.MiniBlock
Expand Down Expand Up @@ -126,7 +126,7 @@ type epochStartBootstrap struct {
epochStartMeta data.MetaHeaderHandler
prevEpochStartMeta data.MetaHeaderHandler
syncedHeaders map[string]data.HeaderHandler
nodesConfig *nodesCoordinator.NodesCoordinatorRegistry
nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler
baseData baseDataInStorage
startRound int64
nodeType core.NodeType
Expand Down
2 changes: 1 addition & 1 deletion epochStart/bootstrap/shardStorageHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo
return err
}

components.NodesConfig.CurrentEpoch = components.ShardHeader.GetEpoch()
components.NodesConfig.SetCurrentEpoch(components.ShardHeader.GetEpoch())
nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig)
if err != nil {
return err
Expand Down
4 changes: 2 additions & 2 deletions epochStart/bootstrap/syncValidatorStatus.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat
func (s *syncValidatorStatus) NodesConfigFromMetaBlock(
currMetaBlock data.HeaderHandler,
prevMetaBlock data.HeaderHandler,
) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, error) {
) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, error) {
if currMetaBlock.GetNonce() > 1 && !currMetaBlock.IsStartOfEpochBlock() {
return nil, 0, epochStart.ErrNotEpochStartBlock
}
Expand All @@ -155,7 +155,7 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock(
}

nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry()
nodesConfig.CurrentEpoch = currMetaBlock.GetEpoch()
nodesConfig.SetCurrentEpoch(currMetaBlock.GetEpoch())
return nodesConfig, selfShardId, nil
}

Expand Down
2 changes: 1 addition & 1 deletion factory/bootstrapParameters.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func (bph *bootstrapParams) NumOfShards() uint32 {
}

// NodesConfig returns the nodes coordinator config after bootstrap
func (bph *bootstrapParams) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry {
func (bph *bootstrapParams) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler {
return bph.bootstrapParams.NodesConfig
}

Expand Down
2 changes: 1 addition & 1 deletion factory/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ type BootstrapParamsHolder interface {
Epoch() uint32
SelfShardID() uint32
NumOfShards() uint32
NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry
NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler
IsInterfaceNil() bool
}

Expand Down
6 changes: 3 additions & 3 deletions factory/shardingFactory.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,15 @@ func CreateNodesCoordinator(
if bootstrapParameters.NodesConfig() != nil {
nodeRegistry := bootstrapParameters.NodesConfig()
currentEpoch = bootstrapParameters.Epoch()
epochsConfig, ok := nodeRegistry.EpochsConfig[fmt.Sprintf("%d", currentEpoch)]
epochsConfig, ok := nodeRegistry.GetEpochsConfig()[fmt.Sprintf("%d", currentEpoch)]
if ok {
eligibles := epochsConfig.EligibleValidators
eligibles := epochsConfig.GetEligibleValidators()
eligibleValidators, err = nodesCoordinator.SerializableValidatorsToValidators(eligibles)
if err != nil {
return nil, err
}

waitings := epochsConfig.WaitingValidators
waitings := epochsConfig.GetWaitingValidators()
waitingValidators, err = nodesCoordinator.SerializableValidatorsToValidators(waitings)
if err != nil {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion node/nodeRunner.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,7 +799,7 @@ func (nr *nodeRunner) logInformation(
log.Info("Bootstrap", "epoch", bootstrapComponents.EpochBootstrapParams().Epoch())
if bootstrapComponents.EpochBootstrapParams().NodesConfig() != nil {
log.Info("the epoch from nodesConfig is",
"epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().CurrentEpoch)
"epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().GetCurrentEpoch())
}

var shardIdString = core.GetShardIDString(bootstrapComponents.ShardCoordinator().SelfId())
Expand Down
39 changes: 39 additions & 0 deletions sharding/nodesCoordinator/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@ package nodesCoordinator

import (
"encoding/hex"
"encoding/json"
"strconv"

"github.com/ElrondNetwork/elrond-go-core/core"
"github.com/ElrondNetwork/elrond-go-core/marshal"
logger "github.com/ElrondNetwork/elrond-go-logger"
)

Expand Down Expand Up @@ -52,6 +54,7 @@ func displayNodesConfiguration(
waiting map[uint32][]Validator,
leaving map[uint32][]Validator,
actualRemaining map[uint32][]Validator,
shuffledOut map[uint32][]Validator,
nbShards uint32,
) {
for shard := uint32(0); shard <= nbShards; shard++ {
Expand All @@ -75,6 +78,10 @@ func displayNodesConfiguration(
pk := v.PubKey()
log.Debug("actually remaining", "pk", pk, "shardID", shardID)
}
for _, v := range shuffledOut[shardID] {
pk := v.PubKey()
log.Debug("shuffled out", "pk", pk, "shardID", shardID)
}
}
}

Expand Down Expand Up @@ -108,3 +115,35 @@ func SerializableShardValidatorListToValidatorList(shardValidators []*Serializab
}
return newValidators, nil
}

// CreateNodesCoordinatorRegistry creates a NodesCoordinatorRegistryHandler depending on the buffer. Old version uses
// NodesCoordinatorRegistry with a json marshaller; while the new version(from staking v4) uses NodesCoordinatorRegistryWithAuction
// with proto marshaller
func CreateNodesCoordinatorRegistry(marshaller marshal.Marshalizer, buff []byte) (NodesCoordinatorRegistryHandler, error) {
registry, err := createOldRegistry(buff)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do not unmarshall 2 times - push the epoch here - make a component which is NodesCoordinatorRegistryFactory - which knows depending on the epoch which registry has to be created

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Very good suggestion. I've done this similar to how CreateHeader is done, which creates either Header/HeaderV2, but your suggestion is way better.

Therefore, I've done a pretty big refactored and created a NodesCoordinatorRegistryFactory component

if err == nil {
return registry, nil
}

return createRegistryWithAuction(marshaller, buff)
}

func createOldRegistry(buff []byte) (*NodesCoordinatorRegistry, error) {
registry := &NodesCoordinatorRegistry{}
err := json.Unmarshal(buff, registry)
if err != nil {
return nil, err
}

return registry, nil
}

func createRegistryWithAuction(marshaller marshal.Marshalizer, buff []byte) (*NodesCoordinatorRegistryWithAuction, error) {
registry := &NodesCoordinatorRegistryWithAuction{}
err := marshaller.Unmarshal(registry, buff)
if err != nil {
return nil, err
}

return registry, nil
}
2 changes: 2 additions & 0 deletions sharding/nodesCoordinator/dtos.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ type ArgsUpdateNodes struct {
NewNodes []Validator
UnStakeLeaving []Validator
AdditionalLeaving []Validator
Auction []Validator
Rand []byte
NbShards uint32
Epoch uint32
Expand All @@ -16,6 +17,7 @@ type ArgsUpdateNodes struct {
type ResUpdateNodes struct {
Eligible map[uint32][]Validator
Waiting map[uint32][]Validator
ShuffledOut map[uint32][]Validator
Leaving []Validator
StillRemaining []Validator
}
27 changes: 24 additions & 3 deletions sharding/nodesCoordinator/hashValidatorShuffler.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ type NodesShufflerArgs struct {
MaxNodesEnableConfig []config.MaxNodesChangeConfig
BalanceWaitingListsEnableEpoch uint32
WaitingListFixEnableEpoch uint32
StakingV4EnableEpoch uint32
}

type shuffleNodesArg struct {
Expand All @@ -32,6 +33,7 @@ type shuffleNodesArg struct {
unstakeLeaving []Validator
additionalLeaving []Validator
newNodes []Validator
auction []Validator
randomness []byte
distributor ValidatorsDistributor
nodesMeta uint32
Expand All @@ -40,6 +42,7 @@ type shuffleNodesArg struct {
maxNodesToSwapPerShard uint32
flagBalanceWaitingLists bool
flagWaitingListFix bool
flagStakingV4 bool
}

// TODO: Decide if transaction load statistics will be used for limiting the number of shards
Expand All @@ -61,6 +64,8 @@ type randHashShuffler struct {
flagBalanceWaitingLists atomic.Flag
waitingListFixEnableEpoch uint32
flagWaitingListFix atomic.Flag
stakingV4EnableEpoch uint32
flagStakingV4 atomic.Flag
}

// NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given
Expand All @@ -85,10 +90,12 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro
availableNodesConfigs: configs,
balanceWaitingListsEnableEpoch: args.BalanceWaitingListsEnableEpoch,
waitingListFixEnableEpoch: args.WaitingListFixEnableEpoch,
stakingV4EnableEpoch: args.StakingV4EnableEpoch,
}

log.Debug("randHashShuffler: enable epoch for balance waiting list", "epoch", rxs.balanceWaitingListsEnableEpoch)
log.Debug("randHashShuffler: enable epoch for waiting waiting list", "epoch", rxs.waitingListFixEnableEpoch)
log.Debug("randHashShuffler: enable epoch for staking v4", "epoch", rxs.stakingV4EnableEpoch)

rxs.UpdateParams(args.NodesShard, args.NodesMeta, args.Hysteresis, args.Adaptivity)

Expand Down Expand Up @@ -176,6 +183,7 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo
unstakeLeaving: args.UnStakeLeaving,
additionalLeaving: args.AdditionalLeaving,
newNodes: args.NewNodes,
auction: args.Auction,
randomness: args.Rand,
nodesMeta: nodesMeta,
nodesPerShard: nodesPerShard,
Expand All @@ -184,6 +192,7 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo
maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard,
flagBalanceWaitingLists: rhs.flagBalanceWaitingLists.IsSet(),
flagWaitingListFix: rhs.flagWaitingListFix.IsSet(),
flagStakingV4: rhs.flagStakingV4.IsSet(),
})
}

Expand Down Expand Up @@ -288,16 +297,24 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) {
log.Warn("distributeValidators newNodes failed", "error", err)
}

err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists)
if err != nil {
log.Warn("distributeValidators shuffledOut failed", "error", err)
if arg.flagStakingV4 {
err = distributeValidators(newWaiting, arg.auction, arg.randomness, false)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it is hard to understand the difference between the 2 versions. Rename this function to distributeValidatorsUsingAuction - or something more clear.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not think that creating an extra function here would bring any benefit, since the only thing it would do is to call distributeValidators -> which already does its job.

I think the way to go here is to add an explanatory comment

if err != nil {
log.Warn("distributeValidators auction list failed", "error", err)
}
} else {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not see why there is log.Warn everywhere - this errors are critical - so they should return error. Add a task to refactor this.
Plus I do not see the point of shuffleNodes - why is it like a function which gets a lot of arguments as input - I think it would be better that this is a component, with defined interface. Add a refactor task for this as well.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

RIght, I was also thinking the same when I saw this, but decided to be consistent with the current implementation.
Added 2 tasks for this refactor !

err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists)
if err != nil {
log.Warn("distributeValidators shuffledOut failed", "error", err)
}
}

actualLeaving, _ := removeValidatorsFromList(allLeaving, stillRemainingInLeaving, len(stillRemainingInLeaving))

return &ResUpdateNodes{
Eligible: newEligible,
Waiting: newWaiting,
ShuffledOut: shuffledOutMap,
Leaving: actualLeaving,
StillRemaining: stillRemainingInLeaving,
}, nil
Expand Down Expand Up @@ -779,8 +796,12 @@ func (rhs *randHashShuffler) UpdateShufflerConfig(epoch uint32) {

rhs.flagBalanceWaitingLists.SetValue(epoch >= rhs.balanceWaitingListsEnableEpoch)
log.Debug("balanced waiting lists", "enabled", rhs.flagBalanceWaitingLists.IsSet())

rhs.flagWaitingListFix.SetValue(epoch >= rhs.waitingListFixEnableEpoch)
log.Debug("waiting list fix", "enabled", rhs.flagWaitingListFix.IsSet())

rhs.flagStakingV4.SetValue(epoch >= rhs.stakingV4EnableEpoch)
log.Debug("staking v4", "enabled", rhs.flagStakingV4.IsSet())
}

func (rhs *randHashShuffler) sortConfigs() {
Expand Down