Skip to content

Commit

Permalink
Drop and restart validators - property test (#2009)
Browse files Browse the repository at this point in the history
* Drop and restart validators - property test

---------

Co-authored-by: Stefan Negovanović <stefan@ethernal.tech>
  • Loading branch information
stana-miric and Stefan-Ethernal committed Oct 24, 2023
1 parent 2966d12 commit 8eaaef7
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 17 deletions.
9 changes: 3 additions & 6 deletions command/secrets/output/secrets_output.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,24 +38,21 @@ func setFlags(cmd *cobra.Command) {
&params.outputBLS,
blsFlag,
false,
"output only the BLS public key "+
"from the provided secrets manager",
"output only the BLS public key from the provided secrets manager",
)

cmd.Flags().BoolVar(
&params.outputNodeID,
nodeIDFlag,
false,
"output only the node id "+
"from the provided secrets manager",
"output only the node id from the provided secrets manager",
)

cmd.Flags().BoolVar(
&params.outputValidator,
validatorFlag,
false,
"output only the validator key address "+
"from the provided secrets manager",
"output only the validator key address from the provided secrets manager",
)

cmd.MarkFlagsMutuallyExclusive(dataDirFlag, configFlag)
Expand Down
16 changes: 10 additions & 6 deletions consensus/polybft/proposer_calculator.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@ type PrioritizedValidator struct {
ProposerPriority *big.Int
}

func (pv PrioritizedValidator) String() string {
return fmt.Sprintf("[%v, voting power %v, priority %v]", pv.Metadata.Address.String(),
pv.Metadata.VotingPower, pv.ProposerPriority)
}

// ProposerSnapshot represents snapshot of one proposer calculation
type ProposerSnapshot struct {
Height uint64
Expand Down Expand Up @@ -211,9 +216,7 @@ func (pc *ProposerCalculator) GetSnapshot() (*ProposerSnapshot, bool) {
// PostBlock is called on every insert of finalized block (either from consensus or syncer)
// It will update priorities and save the updated snapshot to db
func (pc *ProposerCalculator) PostBlock(req *PostBlockRequest) error {
blockNumber := req.FullBlock.Block.Number()

return pc.update(blockNumber, req.DBTx)
return pc.update(req.FullBlock.Block.Number(), req.DBTx)
}

func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error {
Expand All @@ -229,15 +232,16 @@ func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error {
return err
}

pc.logger.Debug("Proposers snapshot has been updated", "current block", blockNumber+1,
"validators count", len(pc.snapshot.Validators))
pc.logger.Debug("Proposer snapshot has been updated",
"block", height, "validators", pc.snapshot.Validators)
}

if err := pc.state.ProposerSnapshotStore.writeProposerSnapshot(pc.snapshot, dbTx); err != nil {
return fmt.Errorf("cannot save proposers snapshot for block %d: %w", blockNumber, err)
}

pc.logger.Debug("Update proposers snapshot finished", "target block", blockNumber)
pc.logger.Info("Proposer snapshot update has been finished",
"target block", blockNumber+1, "validators", len(pc.snapshot.Validators))

return nil
}
Expand Down
79 changes: 74 additions & 5 deletions e2e-polybft/property/property_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ package property

import (
"fmt"
"math"
"math/big"
"path/filepath"
"sync"
"testing"
"time"

Expand All @@ -16,11 +16,9 @@ import (
)

func TestProperty_DifferentVotingPower(t *testing.T) {
t.Parallel()

const (
blockTime = time.Second * 6
maxStake = math.MaxUint64
maxStake = 20
)

rapid.Check(t, func(tt *rapid.T) {
Expand All @@ -41,6 +39,7 @@ func TestProperty_DifferentVotingPower(t *testing.T) {

cluster := framework.NewPropertyTestCluster(t, int(numNodes),
framework.WithEpochSize(epochSize),
framework.WithBlockTime(blockTime),
framework.WithSecretsCallback(func(adresses []types.Address, config *framework.TestClusterConfig) {
for i := range adresses {
config.StakeAmounts = append(config.StakeAmounts, stakes[i])
Expand All @@ -52,6 +51,76 @@ func TestProperty_DifferentVotingPower(t *testing.T) {
filepath.Base(cluster.Config.LogsDir), numNodes, epochSize, numBlocks)

// wait for single epoch to process withdrawal
require.NoError(t, cluster.WaitForBlock(numBlocks, blockTime*time.Duration(numBlocks)))
require.NoError(t, cluster.WaitForBlock(numBlocks, 2*blockTime*time.Duration(numBlocks)))
})
}

func TestProperty_DropValidators(t *testing.T) {
const (
blockTime = time.Second * 4
)

rapid.Check(t, func(tt *rapid.T) {
var (
numNodes = rapid.Uint64Range(5, 8).Draw(tt, "number of cluster nodes")
epochSize = rapid.OneOf(rapid.Just(4), rapid.Just(10)).Draw(tt, "epoch size")
)

cluster := framework.NewPropertyTestCluster(t, int(numNodes),
framework.WithEpochSize(epochSize),
framework.WithBlockTime(blockTime),
framework.WithSecretsCallback(func(adresses []types.Address, config *framework.TestClusterConfig) {
for range adresses {
config.StakeAmounts = append(config.StakeAmounts, big.NewInt(20))
}
}))
defer cluster.Stop()

t.Logf("Test %v, run with %d nodes, epoch size: %d",
filepath.Base(cluster.Config.LogsDir), numNodes, epochSize)

cluster.WaitForReady(t)

// stop first validator, block production should continue
cluster.Servers[0].Stop()
activeValidator := cluster.Servers[numNodes-1]
currentBlock, err := activeValidator.JSONRPC().Eth().BlockNumber()
require.NoError(t, err)
require.NoError(t, cluster.WaitForBlock(currentBlock+1, 2*blockTime))

// drop all validator nodes, leaving one node alive
numNodesToDrop := int(numNodes - 1)

var wg sync.WaitGroup
// drop bulk of nodes from cluster
for i := 1; i < numNodesToDrop; i++ {
node := cluster.Servers[i]

wg.Add(1)

go func(node *framework.TestServer) {
defer wg.Done()
node.Stop()
}(node)
}

wg.Wait()

// check that block production is stoped
currentBlock, err = activeValidator.JSONRPC().Eth().BlockNumber()
require.NoError(t, err)
oldBlockNumber := currentBlock
time.Sleep(2 * blockTime)
currentBlock, err = activeValidator.JSONRPC().Eth().BlockNumber()
require.NoError(t, err)
require.Equal(t, oldBlockNumber, currentBlock)

// start dropped nodes again
for i := 0; i < numNodesToDrop; i++ {
node := cluster.Servers[i]
node.Start()
}

require.NoError(t, cluster.WaitForBlock(oldBlockNumber+1, 3*blockTime))
})
}

0 comments on commit 8eaaef7

Please sign in to comment.