Skip to content

Commit

Permalink
Merge pull request #4121 from ElrondNetwork/EN-12197-staking-v4-integ…
Browse files Browse the repository at this point in the history
…ration-tests-unstake

Staking v4: integration tests for unStake nodes
  • Loading branch information
mariusmihaic committed Jun 16, 2022
2 parents 12c16be + 40e3c25 commit 45b5d03
Show file tree
Hide file tree
Showing 7 changed files with 375 additions and 76 deletions.
15 changes: 2 additions & 13 deletions integrationTests/testInitializer.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ import (
dataRetrieverMock "github.com/ElrondNetwork/elrond-go/testscommon/dataRetriever"
"github.com/ElrondNetwork/elrond-go/testscommon/genesisMocks"
"github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks"
"github.com/ElrondNetwork/elrond-go/testscommon/stakingcommon"
statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler"
"github.com/ElrondNetwork/elrond-go/trie"
"github.com/ElrondNetwork/elrond-go/trie/hashesHolder"
Expand Down Expand Up @@ -98,7 +99,6 @@ const (
adaptivity = false
hysteresis = float32(0.2)
maxTrieLevelInMemory = uint(5)
delegationManagementKey = "delegationManagement"
delegationContractsList = "delegationContracts"
)

Expand Down Expand Up @@ -2550,18 +2550,7 @@ func SaveDelegationManagerConfig(nodes []*TestProcessorNode) {
continue
}

acc, _ := n.AccntState.LoadAccount(vm.DelegationManagerSCAddress)
userAcc, _ := acc.(state.UserAccountHandler)

managementData := &systemSmartContracts.DelegationManagement{
MinDeposit: big.NewInt(100),
LastAddress: vm.FirstDelegationSCAddress,
MinDelegationAmount: big.NewInt(1),
}
marshaledData, _ := TestMarshalizer.Marshal(managementData)
_ = userAcc.DataTrieTracker().SaveKeyValue([]byte(delegationManagementKey), marshaledData)
_ = n.AccntState.SaveAccount(userAcc)
_, _ = n.AccntState.Commit()
stakingcommon.SaveDelegationManagerConfig(n.AccntState, TestMarshalizer)
}
}

Expand Down
59 changes: 35 additions & 24 deletions integrationTests/vm/staking/baseTestMetaProcessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ type nodesConfig struct {
shuffledOut map[uint32][][]byte
queue [][]byte
auction [][]byte
new [][]byte
}

// TestMetaProcessor -
Expand Down Expand Up @@ -88,6 +89,11 @@ func newTestMetaProcessor(
maxNodesConfig,
)

stakingcommon.SaveDelegationManagerConfig(
stateComponents.AccountsAdapter(),
coreComponents.InternalMarshalizer(),
)

gasScheduleNotifier := createGasScheduleNotifier()
blockChainHook := createBlockChainHook(
dataComponents,
Expand Down Expand Up @@ -176,6 +182,30 @@ func newTestMetaProcessor(
}
}

func saveNodesConfig(
accountsDB state.AccountsAdapter,
marshaller marshal.Marshalizer,
nc nodesCoordinator.NodesCoordinator,
maxNodesConfig []config.MaxNodesChangeConfig,
) {
eligibleMap, _ := nc.GetAllEligibleValidatorsPublicKeys(0)
waitingMap, _ := nc.GetAllWaitingValidatorsPublicKeys(0)
allStakedNodes := int64(len(getAllPubKeys(eligibleMap)) + len(getAllPubKeys(waitingMap)))

maxNumNodes := allStakedNodes
if len(maxNodesConfig) > 0 {
maxNumNodes = int64(maxNodesConfig[0].MaxNumNodes)
}

stakingcommon.SaveNodesConfig(
accountsDB,
marshaller,
allStakedNodes,
1,
maxNumNodes,
)
}

func createGasScheduleNotifier() core.GasScheduleNotifier {
gasSchedule := arwenConfig.MakeGasMapForTests()
defaults.FillGasMapInternal(gasSchedule, 1)
Expand Down Expand Up @@ -325,17 +355,22 @@ func (tmp *TestMetaProcessor) updateNodesConfig(epoch uint32) {
validatorsInfoMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash)

auction := make([][]byte, 0)
newList := make([][]byte, 0)
for _, validator := range validatorsInfoMap.GetAllValidatorsInfo() {
if validator.GetList() == string(common.AuctionList) {
auction = append(auction, validator.GetPublicKey())
}
if validator.GetList() == string(common.NewList) {
newList = append(newList, validator.GetPublicKey())
}
}

tmp.NodesConfig.eligible = eligible
tmp.NodesConfig.waiting = waiting
tmp.NodesConfig.shuffledOut = shuffledOut
tmp.NodesConfig.leaving = leaving
tmp.NodesConfig.auction = auction
tmp.NodesConfig.new = newList
tmp.NodesConfig.queue = tmp.getWaitingListKeys()
}

Expand All @@ -353,27 +388,3 @@ func generateAddress(identifier uint32) []byte {
uniqueIdentifier := fmt.Sprintf("address-%d", identifier)
return []byte(strings.Repeat("0", addressLength-len(uniqueIdentifier)) + uniqueIdentifier)
}

func saveNodesConfig(
accountsDB state.AccountsAdapter,
marshaller marshal.Marshalizer,
nc nodesCoordinator.NodesCoordinator,
maxNodesConfig []config.MaxNodesChangeConfig,
) {
eligibleMap, _ := nc.GetAllEligibleValidatorsPublicKeys(0)
waitingMap, _ := nc.GetAllWaitingValidatorsPublicKeys(0)
allStakedNodes := int64(len(getAllPubKeys(eligibleMap)) + len(getAllPubKeys(waitingMap)))

maxNumNodes := allStakedNodes
if len(maxNodesConfig) > 0 {
maxNumNodes = int64(maxNodesConfig[0].MaxNumNodes)
}

stakingcommon.SaveNodesConfig(
accountsDB,
marshaller,
allStakedNodes,
1,
maxNumNodes,
)
}
1 change: 1 addition & 0 deletions integrationTests/vm/staking/configDisplayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ func (tmp *TestMetaProcessor) displayConfig(config nodesConfig) {
headline := display.Headline("Nodes config", "", delimiter)
fmt.Printf("%s\n%s\n", headline, table)

tmp.displayValidators("New", config.new)
tmp.displayValidators("Auction", config.auction)
tmp.displayValidators("Queue", config.queue)

Expand Down
16 changes: 8 additions & 8 deletions integrationTests/vm/staking/stakingQueue.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,21 +55,21 @@ func createStakingQueueCustomNodes(
queue := make([][]byte, 0)

for owner, ownerStats := range owners {
stakingcommon.AddKeysToWaitingList(
stakingcommon.RegisterValidatorKeys(
accountsAdapter,
ownerStats.StakingQueueKeys,
marshaller,
[]byte(owner),
[]byte(owner),
ownerStats.StakingQueueKeys,
ownerStats.TotalStake,
marshaller,
)

stakingcommon.RegisterValidatorKeys(
stakingcommon.AddKeysToWaitingList(
accountsAdapter,
[]byte(owner),
[]byte(owner),
ownerStats.StakingQueueKeys,
ownerStats.TotalStake,
marshaller,
[]byte(owner),
[]byte(owner),
)

queue = append(queue, ownerStats.StakingQueueKeys...)
Expand Down Expand Up @@ -103,7 +103,7 @@ func (tmp *TestMetaProcessor) getWaitingListKeys() [][]byte {

allPubKeys := make([][]byte, 0)
for len(nextKey) != 0 && index <= waitingList.Length {
allPubKeys = append(allPubKeys, nextKey)
allPubKeys = append(allPubKeys, nextKey[2:]) // remove "w_" prefix

element, errGet := stakingcommon.GetWaitingListElement(stakingSCAcc, tmp.Marshaller, nextKey)
if errGet != nil {
Expand Down
194 changes: 193 additions & 1 deletion integrationTests/vm/staking/stakingV4_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,7 @@ func TestStakingV4_StakeNewNodes(t *testing.T) {
queue = append(queue, newNodes0[newOwner0].BLSKeys...)
currNodesConfig = node.NodesConfig
require.Len(t, currNodesConfig.queue, 4)
requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue)

// NewOwner1 stakes 1 node with top up = 2*node price; should be sent to auction list
newOwner1 := "newOwner1"
Expand Down Expand Up @@ -553,4 +554,195 @@ func TestStakingV4_StakeNewNodes(t *testing.T) {
requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys)
}

// TODO: test unstake with 1 owner -> 1 bls key in auction => numStakedNodes = 0
func TestStakingV4_UnStakeNodes(t *testing.T) {
pubKeys := generateAddresses(0, 20)

owner1 := "owner1"
owner1Stats := &OwnerStats{
EligibleBlsKeys: map[uint32][][]byte{
core.MetachainShardId: pubKeys[:2],
},
WaitingBlsKeys: map[uint32][][]byte{
0: pubKeys[2:4],
},
StakingQueueKeys: pubKeys[4:6],
TotalStake: big.NewInt(10 * nodePrice),
}

owner2 := "owner2"
owner2Stats := &OwnerStats{
EligibleBlsKeys: map[uint32][][]byte{
0: pubKeys[6:8],
},
WaitingBlsKeys: map[uint32][][]byte{
core.MetachainShardId: pubKeys[8:12],
},
StakingQueueKeys: pubKeys[12:15],
TotalStake: big.NewInt(10 * nodePrice),
}

owner3 := "owner3"
owner3Stats := &OwnerStats{
StakingQueueKeys: pubKeys[15:17],
TotalStake: big.NewInt(6 * nodePrice),
}

cfg := &InitialNodesConfig{
MetaConsensusGroupSize: 1,
ShardConsensusGroupSize: 1,
MinNumberOfEligibleShardNodes: 2,
MinNumberOfEligibleMetaNodes: 2,
NumOfShards: 1,
Owners: map[string]*OwnerStats{
owner1: owner1Stats,
owner2: owner2Stats,
owner3: owner3Stats,
},
MaxNodesChangeConfig: []config.MaxNodesChangeConfig{
{
EpochEnable: 0,
MaxNumNodes: 10,
NodesToShufflePerShard: 1,
},
},
}
node := NewTestMetaProcessorWithCustomNodes(cfg)
node.EpochStartTrigger.SetRoundsPerEpoch(4)

// 1. Check initial config is correct
currNodesConfig := node.NodesConfig
require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4)
require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6)
require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2)
require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4)
require.Len(t, currNodesConfig.eligible[0], 2)
require.Len(t, currNodesConfig.waiting[0], 2)
require.Empty(t, currNodesConfig.shuffledOut)
require.Empty(t, currNodesConfig.auction)

owner1StakingQueue := owner1Stats.StakingQueueKeys
owner2StakingQueue := owner2Stats.StakingQueueKeys
owner3StakingQueue := owner3Stats.StakingQueueKeys
queue := make([][]byte, 0)
queue = append(queue, owner1StakingQueue...)
queue = append(queue, owner2StakingQueue...)
queue = append(queue, owner3StakingQueue...)
require.Len(t, currNodesConfig.queue, 7)
requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue)

// 1.1 Owner2 unStakes one of his staking queue nodes. Node should be removed from staking queue list
node.ProcessUnStake(t, map[string][][]byte{
owner2: {owner2Stats.StakingQueueKeys[0]},
})
currNodesConfig = node.NodesConfig
queue = remove(queue, owner2Stats.StakingQueueKeys[0])
require.Len(t, currNodesConfig.queue, 6)
requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue)
require.Empty(t, currNodesConfig.new)
require.Empty(t, currNodesConfig.auction)

// 1.2 Owner2 unStakes one of his waiting list keys. First node from staking queue should be added to fill its place.
copy(queue, currNodesConfig.queue) // copy queue to local variable so we have the queue in same order
node.ProcessUnStake(t, map[string][][]byte{
owner2: {owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]},
})
currNodesConfig = node.NodesConfig
require.Len(t, currNodesConfig.new, 1)
require.Equal(t, currNodesConfig.new[0], queue[0])
require.Empty(t, currNodesConfig.auction)
queue = remove(queue, queue[0])
require.Len(t, currNodesConfig.queue, 5)
requireSameSliceDifferentOrder(t, queue, currNodesConfig.queue)

// 2. Check config after staking v4 init
node.Process(t, 3)
currNodesConfig = node.NodesConfig
require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4)
require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6)
// Owner2's node from waiting list which was unStaked in previous epoch is now leaving
require.Len(t, currNodesConfig.leaving, 1)
require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0])
require.Len(t, currNodesConfig.auction, 5)
// All nodes from queue have been moved to auction
requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction)

// 2.1 Owner3 unStakes one of his nodes from auction
node.ProcessUnStake(t, map[string][][]byte{
owner3: {owner3StakingQueue[1]},
})
unStakedNodesInStakingV4InitEpoch := make([][]byte, 0)
unStakedNodesInStakingV4InitEpoch = append(unStakedNodesInStakingV4InitEpoch, owner3StakingQueue[1])
currNodesConfig = node.NodesConfig
queue = remove(queue, owner3StakingQueue[1])
require.Len(t, currNodesConfig.auction, 4)
requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction)
require.Empty(t, currNodesConfig.queue)
require.Empty(t, currNodesConfig.new)

// 2.2 Owner1 unStakes 2 nodes: one from auction + one active
node.ProcessUnStake(t, map[string][][]byte{
owner1: {owner1StakingQueue[1], owner1Stats.WaitingBlsKeys[0][0]},
})
unStakedNodesInStakingV4InitEpoch = append(unStakedNodesInStakingV4InitEpoch, owner1StakingQueue[1])
unStakedNodesInStakingV4InitEpoch = append(unStakedNodesInStakingV4InitEpoch, owner1Stats.WaitingBlsKeys[0][0])
currNodesConfig = node.NodesConfig
queue = remove(queue, owner1StakingQueue[1])
require.Len(t, currNodesConfig.auction, 3)
requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction)
require.Empty(t, currNodesConfig.queue)
require.Empty(t, currNodesConfig.new)

// 3. Check config in epoch = staking v4 epoch
node.Process(t, 3)
currNodesConfig = node.NodesConfig
require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4)
require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4)
require.Len(t, getAllPubKeys(currNodesConfig.leaving), 3)
// All unStaked nodes in previous epoch are now leaving
requireMapContains(t, currNodesConfig.leaving, unStakedNodesInStakingV4InitEpoch)
// 3.1 Owner2 unStakes one of his nodes from auction
node.ProcessUnStake(t, map[string][][]byte{
owner2: {owner2StakingQueue[1]},
})
currNodesConfig = node.NodesConfig
queue = remove(queue, owner2StakingQueue[1])
shuffledOutNodes := getAllPubKeys(currNodesConfig.shuffledOut)
require.Len(t, currNodesConfig.auction, len(shuffledOutNodes)+len(queue))
requireSliceContains(t, currNodesConfig.auction, shuffledOutNodes)
requireSliceContains(t, currNodesConfig.auction, queue)

// 4. Check config after whole staking v4 chain is ready, when one of the owners unStakes a node
node.Process(t, 4)
currNodesConfig = node.NodesConfig
node.ProcessUnStake(t, map[string][][]byte{
owner2: {owner2Stats.EligibleBlsKeys[0][0]},
})
node.Process(t, 4)
currNodesConfig = node.NodesConfig
require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1)
requireMapContains(t, currNodesConfig.leaving, [][]byte{owner2Stats.EligibleBlsKeys[0][0]})
require.Empty(t, currNodesConfig.new)
require.Empty(t, currNodesConfig.queue)

// 4.1 NewOwner stakes 1 node, should be sent to auction
newOwner := "newOwner1"
newNode := map[string]*NodesRegisterData{
newOwner: {
BLSKeys: [][]byte{generateAddress(444)},
TotalStake: big.NewInt(2 * nodePrice),
},
}
node.ProcessStake(t, newNode)
currNodesConfig = node.NodesConfig
requireSliceContains(t, currNodesConfig.auction, newNode[newOwner].BLSKeys)

// 4.2 NewOwner unStakes his node, he should not be in auction anymore + set to leaving
node.ProcessUnStake(t, map[string][][]byte{
newOwner: {newNode[newOwner].BLSKeys[0]},
})
currNodesConfig = node.NodesConfig
requireSliceContainsNumOfElements(t, currNodesConfig.auction, newNode[newOwner].BLSKeys, 0)
node.Process(t, 3)
currNodesConfig = node.NodesConfig
requireMapContains(t, currNodesConfig.leaving, newNode[newOwner].BLSKeys)
}

0 comments on commit 45b5d03

Please sign in to comment.