From 795e781b4afbda4af6e59ab623122030e4920af5 Mon Sep 17 00:00:00 2001 From: Mike Palmiotto Date: Fri, 10 Mar 2023 11:04:48 -0500 Subject: [PATCH] Add monotonic user restore test --- raft_test.go | 46 +++++++++++++++++++++++++++++++++++++++++----- testing.go | 8 +++++++- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/raft_test.go b/raft_test.go index 451bc240..7fd69fb8 100644 --- a/raft_test.go +++ b/raft_test.go @@ -1024,11 +1024,16 @@ func TestRaft_SnapshotRestore_Monotonic(t *testing.T) { // Make the cluster conf := inmemConfig(t) conf.TrailingLogs = 10 - c := MakeCluster(1, t, conf) + opts := &MakeClusterOpts{ + Peers: 1, + Bootstrap: true, + Conf: conf, + MonotonicLogs: true, + } + c := MakeClusterCustom(t, opts) defer c.Close() leader := c.Leader() - leader.logs = &MockMonotonicLogStore{s: leader.logs} // Commit a lot of things var future Future @@ -1411,7 +1416,7 @@ func TestRaft_UserSnapshot(t *testing.T) { // snapshotAndRestore does a snapshot and restore sequence and applies the given // offset to the snapshot index, so we can try out different situations. -func snapshotAndRestore(t *testing.T, offset uint64) { +func snapshotAndRestore(t *testing.T, offset uint64, monotonicLogStore bool) { // Make the cluster. conf := inmemConfig(t) @@ -1421,7 +1426,18 @@ func snapshotAndRestore(t *testing.T, offset uint64) { conf.ElectionTimeout = 500 * time.Millisecond conf.LeaderLeaseTimeout = 500 * time.Millisecond - c := MakeCluster(3, t, conf) + var c *cluster + if monotonicLogStore { + opts := &MakeClusterOpts{ + Peers: 3, + Bootstrap: true, + Conf: conf, + MonotonicLogs: true, + } + c = MakeClusterCustom(t, opts) + } else { + c = MakeCluster(3, t, conf) + } defer c.Close() // Wait for things to get stable and commit some things. @@ -1517,7 +1533,26 @@ func TestRaft_UserRestore(t *testing.T) { for _, c := range cases { t.Run(fmt.Sprintf("case %v", c), func(t *testing.T) { - snapshotAndRestore(t, c) + snapshotAndRestore(t, c, false) + }) + } +} + +func TestRaft_UserRestore_Monotonic(t *testing.T) { + cases := []uint64{ + 0, + 1, + 2, + + // Snapshots from the future + 100, + 1000, + 10000, + } + + for _, c := range cases { + t.Run(fmt.Sprintf("case %v", c), func(t *testing.T) { + snapshotAndRestore(t, c, true) }) } } @@ -2449,6 +2484,7 @@ func TestRaft_LeadershipTransferStopRightAway(t *testing.T) { t.Errorf("leadership shouldn't have started, but instead it error with: %v", err) } } + func TestRaft_GetConfigurationNoBootstrap(t *testing.T) { c := MakeCluster(2, t, nil) defer c.Close() diff --git a/testing.go b/testing.go index 89d2500f..f2f2d64a 100644 --- a/testing.go +++ b/testing.go @@ -714,6 +714,7 @@ type MakeClusterOpts struct { ConfigStoreFSM bool MakeFSMFunc func() FSM LongstopTimeout time.Duration + MonotonicLogs bool } // makeCluster will return a cluster with the given config and number of peers. @@ -789,11 +790,16 @@ func makeCluster(t *testing.T, opts *MakeClusterOpts) *cluster { // Create all the rafts c.startTime = time.Now() for i := 0; i < opts.Peers; i++ { - logs := c.stores[i] + var logs LogStore + logs = c.stores[i] store := c.stores[i] snap := c.snaps[i] trans := c.trans[i] + if opts.MonotonicLogs { + logs = &MockMonotonicLogStore{s: logs} + } + peerConf := opts.Conf peerConf.LocalID = configuration.Servers[i].ID peerConf.Logger = newTestLoggerWithPrefix(t, string(configuration.Servers[i].ID))