From 1382857bfe4f8cf102bf3370f890d6356e7b5a66 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Wed, 8 Jun 2022 13:50:30 +0800 Subject: [PATCH 01/35] pkg, scripts: refine the use of require.Error/NoError (#5124) ref tikv/pd#4813 Refine the use of require.Error/NoError. Signed-off-by: JmPotato --- pkg/encryption/crypter_test.go | 6 +++--- scripts/check-test.sh | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pkg/encryption/crypter_test.go b/pkg/encryption/crypter_test.go index e8b7e06bcdf..2f952d5b729 100644 --- a/pkg/encryption/crypter_test.go +++ b/pkg/encryption/crypter_test.go @@ -37,9 +37,9 @@ func TestKeyLength(t *testing.T) { t.Parallel() re := require.New(t) _, err := KeyLength(encryptionpb.EncryptionMethod_PLAINTEXT) - re.NotNil(err) + re.Error(err) _, err = KeyLength(encryptionpb.EncryptionMethod_UNKNOWN) - re.NotNil(err) + re.Error(err) length, err := KeyLength(encryptionpb.EncryptionMethod_AES128_CTR) re.NoError(err) re.Equal(16, length) @@ -111,5 +111,5 @@ func TestAesGcmCrypter(t *testing.T) { // ignore overflow fakeCiphertext[0] = ciphertext[0] + 1 _, err = AesGcmDecrypt(key, fakeCiphertext, iv) - re.NotNil(err) + re.Error(err) } diff --git a/scripts/check-test.sh b/scripts/check-test.sh index c8c5b72c0fe..f65d506565f 100755 --- a/scripts/check-test.sh +++ b/scripts/check-test.sh @@ -41,4 +41,12 @@ if [ "$res" ]; then exit 1 fi +res=$(grep -rn --include=\*_test.go -E "(re|suite|require)\.(Nil|NotNil)\((t, )?(err|error)" . | sort -u) + +if [ "$res" ]; then + echo "following packages use the inefficient assert function: please replace require.Nil/NotNil with require.NoError/Error" + echo "$res" + exit 1 +fi + exit 0 From 79b0290f55beef3065eab7fb22cb84b9e62f4915 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 8 Jun 2022 16:30:31 +0800 Subject: [PATCH 02/35] election: migrate test framework to testify (#5132) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/election/leadership_test.go | 65 +++++++++++++----------------- server/election/lease_test.go | 64 ++++++++++++++--------------- 2 files changed, 60 insertions(+), 69 deletions(-) diff --git a/server/election/leadership_test.go b/server/election/leadership_test.go index a52f867288c..9a4b52f782e 100644 --- a/server/election/leadership_test.go +++ b/server/election/leadership_test.go @@ -19,36 +19,29 @@ import ( "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/embed" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testLeadershipSuite{}) - -type testLeadershipSuite struct{} - const defaultLeaseTimeout = 1 -func (s *testLeadershipSuite) TestLeadership(c *C) { +func TestLeadership(t *testing.T) { + re := require.New(t) cfg := etcdutil.NewTestSingleConfig() etcd, err := embed.StartEtcd(cfg) defer func() { etcd.Close() etcdutil.CleanConfig(cfg) }() - c.Assert(err, IsNil) + re.NoError(err) ep := cfg.LCUrls[0].String() client, err := clientv3.New(clientv3.Config{ Endpoints: []string{ep}, }) - c.Assert(err, IsNil) + re.NoError(err) <-etcd.Server.ReadyNotify() @@ -58,27 +51,27 @@ func (s *testLeadershipSuite) TestLeadership(c *C) { // leadership1 starts first and get the leadership err = leadership1.Campaign(defaultLeaseTimeout, "test_leader_1") - c.Assert(err, IsNil) + re.NoError(err) // leadership2 starts then and can not get the leadership err = leadership2.Campaign(defaultLeaseTimeout, "test_leader_2") - c.Assert(err, NotNil) + re.Error(err) - c.Assert(leadership1.Check(), IsTrue) + re.True(leadership1.Check()) // leadership2 failed, so the check should return false - c.Assert(leadership2.Check(), IsFalse) + re.False(leadership2.Check()) // Sleep longer than the defaultLeaseTimeout to wait for the lease expires time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Assert(leadership1.Check(), IsFalse) - c.Assert(leadership2.Check(), IsFalse) + re.False(leadership1.Check()) + re.False(leadership2.Check()) // Delete the leader key and campaign for leadership1 err = leadership1.DeleteLeaderKey() - c.Assert(err, IsNil) + re.NoError(err) err = leadership1.Campaign(defaultLeaseTimeout, "test_leader_1") - c.Assert(err, IsNil) - c.Assert(leadership1.Check(), IsTrue) + re.NoError(err) + re.True(leadership1.Check()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() go leadership1.Keep(ctx) @@ -86,15 +79,15 @@ func (s *testLeadershipSuite) TestLeadership(c *C) { // Sleep longer than the defaultLeaseTimeout time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Assert(leadership1.Check(), IsTrue) - c.Assert(leadership2.Check(), IsFalse) + re.True(leadership1.Check()) + re.False(leadership2.Check()) // Delete the leader key and re-campaign for leadership2 err = leadership1.DeleteLeaderKey() - c.Assert(err, IsNil) + re.NoError(err) err = leadership2.Campaign(defaultLeaseTimeout, "test_leader_2") - c.Assert(err, IsNil) - c.Assert(leadership2.Check(), IsTrue) + re.NoError(err) + re.True(leadership2.Check()) ctx, cancel = context.WithCancel(context.Background()) defer cancel() go leadership2.Keep(ctx) @@ -102,14 +95,14 @@ func (s *testLeadershipSuite) TestLeadership(c *C) { // Sleep longer than the defaultLeaseTimeout time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Assert(leadership1.Check(), IsFalse) - c.Assert(leadership2.Check(), IsTrue) + re.False(leadership1.Check()) + re.True(leadership2.Check()) // Test resetting the leadership. leadership1.Reset() leadership2.Reset() - c.Assert(leadership1.Check(), IsFalse) - c.Assert(leadership2.Check(), IsFalse) + re.False(leadership1.Check()) + re.False(leadership2.Check()) // Try to keep the reset leadership. leadership1.Keep(ctx) @@ -117,12 +110,12 @@ func (s *testLeadershipSuite) TestLeadership(c *C) { // Check the lease. lease1 := leadership1.getLease() - c.Assert(lease1, NotNil) + re.NotNil(lease1) lease2 := leadership1.getLease() - c.Assert(lease2, NotNil) + re.NotNil(lease2) - c.Assert(lease1.IsExpired(), IsTrue) - c.Assert(lease2.IsExpired(), IsTrue) - c.Assert(lease1.Close(), IsNil) - c.Assert(lease2.Close(), IsNil) + re.True(lease1.IsExpired()) + re.True(lease2.IsExpired()) + re.NoError(lease1.Close()) + re.NoError(lease2.Close()) } diff --git a/server/election/lease_test.go b/server/election/lease_test.go index 0c0aa3c1687..ef8c12be2e9 100644 --- a/server/election/lease_test.go +++ b/server/election/lease_test.go @@ -16,32 +16,30 @@ package election import ( "context" + "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/embed" ) -var _ = Suite(&testLeaseSuite{}) - -type testLeaseSuite struct{} - -func (s *testLeaseSuite) TestLease(c *C) { +func TestLease(t *testing.T) { + re := require.New(t) cfg := etcdutil.NewTestSingleConfig() etcd, err := embed.StartEtcd(cfg) defer func() { etcd.Close() etcdutil.CleanConfig(cfg) }() - c.Assert(err, IsNil) + re.NoError(err) ep := cfg.LCUrls[0].String() client, err := clientv3.New(clientv3.Config{ Endpoints: []string{ep}, }) - c.Assert(err, IsNil) + re.NoError(err) <-etcd.Server.ReadyNotify() @@ -56,51 +54,51 @@ func (s *testLeaseSuite) TestLease(c *C) { client: client, lease: clientv3.NewLease(client), } - c.Check(lease1.IsExpired(), IsTrue) - c.Check(lease2.IsExpired(), IsTrue) - c.Check(lease1.Close(), IsNil) - c.Check(lease2.Close(), IsNil) + re.True(lease1.IsExpired()) + re.True(lease2.IsExpired()) + re.NoError(lease1.Close()) + re.NoError(lease2.Close()) // Grant the two leases with the same timeout. - c.Check(lease1.Grant(defaultLeaseTimeout), IsNil) - c.Check(lease2.Grant(defaultLeaseTimeout), IsNil) - c.Check(lease1.IsExpired(), IsFalse) - c.Check(lease2.IsExpired(), IsFalse) + re.NoError(lease1.Grant(defaultLeaseTimeout)) + re.NoError(lease2.Grant(defaultLeaseTimeout)) + re.False(lease1.IsExpired()) + re.False(lease2.IsExpired()) // Wait for a while to make both two leases timeout. time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Check(lease1.IsExpired(), IsTrue) - c.Check(lease2.IsExpired(), IsTrue) + re.True(lease1.IsExpired()) + re.True(lease2.IsExpired()) // Grant the two leases with different timeouts. - c.Check(lease1.Grant(defaultLeaseTimeout), IsNil) - c.Check(lease2.Grant(defaultLeaseTimeout*4), IsNil) - c.Check(lease1.IsExpired(), IsFalse) - c.Check(lease2.IsExpired(), IsFalse) + re.NoError(lease1.Grant(defaultLeaseTimeout)) + re.NoError(lease2.Grant(defaultLeaseTimeout * 4)) + re.False(lease1.IsExpired()) + re.False(lease2.IsExpired()) // Wait for a while to make one of the lease timeout. time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Check(lease1.IsExpired(), IsTrue) - c.Check(lease2.IsExpired(), IsFalse) + re.True(lease1.IsExpired()) + re.False(lease2.IsExpired()) // Close both of the two leases. - c.Check(lease1.Close(), IsNil) - c.Check(lease2.Close(), IsNil) - c.Check(lease1.IsExpired(), IsTrue) - c.Check(lease2.IsExpired(), IsTrue) + re.NoError(lease1.Close()) + re.NoError(lease2.Close()) + re.True(lease1.IsExpired()) + re.True(lease2.IsExpired()) // Grant the lease1 and keep it alive. - c.Check(lease1.Grant(defaultLeaseTimeout), IsNil) - c.Check(lease1.IsExpired(), IsFalse) + re.NoError(lease1.Grant(defaultLeaseTimeout)) + re.False(lease1.IsExpired()) ctx, cancel := context.WithCancel(context.Background()) go lease1.KeepAlive(ctx) defer cancel() // Wait for a timeout. time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Check(lease1.IsExpired(), IsFalse) + re.False(lease1.IsExpired()) // Close and wait for a timeout. - c.Check(lease1.Close(), IsNil) + re.NoError(lease1.Close()) time.Sleep((defaultLeaseTimeout + 1) * time.Second) - c.Check(lease1.IsExpired(), IsTrue) + re.True(lease1.IsExpired()) } From 6c0985d91647c9711c8456ae92bce3448f100e98 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 8 Jun 2022 17:02:30 +0800 Subject: [PATCH 03/35] core: migrate test framework to testify (#5123) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/core/region_test.go | 262 ++++++++++++++--------------- server/core/region_tree_test.go | 286 ++++++++++++++++---------------- server/core/store_stats_test.go | 31 ++-- server/core/store_test.go | 43 ++--- 4 files changed, 301 insertions(+), 321 deletions(-) diff --git a/server/core/region_test.go b/server/core/region_test.go index c1ed83b7f46..edf55c8ac7b 100644 --- a/server/core/region_test.go +++ b/server/core/region_test.go @@ -19,25 +19,17 @@ import ( "math" "math/rand" "strconv" - "strings" "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/server/id" ) -func TestCore(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testRegionInfoSuite{}) - -type testRegionInfoSuite struct{} - -func (s *testRegionInfoSuite) TestNeedMerge(c *C) { +func TestNeedMerge(t *testing.T) { + re := require.New(t) mererSize, mergeKeys := int64(20), int64(200000) testdata := []struct { size int64 @@ -69,11 +61,12 @@ func (s *testRegionInfoSuite) TestNeedMerge(c *C) { approximateSize: v.size, approximateKeys: v.keys, } - c.Assert(r.NeedMerge(mererSize, mergeKeys), Equals, v.expect) + re.Equal(v.expect, r.NeedMerge(mererSize, mergeKeys)) } } -func (s *testRegionInfoSuite) TestSortedEqual(c *C) { +func TestSortedEqual(t *testing.T) { + re := require.New(t) testcases := []struct { idsA []int idsB []int @@ -153,47 +146,48 @@ func (s *testRegionInfoSuite) TestSortedEqual(c *C) { return peers } // test NewRegionInfo - for _, t := range testcases { - regionA := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(t.idsA)}, nil) - regionB := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(t.idsB)}, nil) - c.Assert(SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters()), Equals, t.isEqual) - c.Assert(SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters()), Equals, t.isEqual) + for _, test := range testcases { + regionA := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(test.idsA)}, nil) + regionB := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(test.idsB)}, nil) + re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) } // test RegionFromHeartbeat - for _, t := range testcases { + for _, test := range testcases { regionA := RegionFromHeartbeat(&pdpb.RegionHeartbeatRequest{ - Region: &metapb.Region{Id: 100, Peers: pickPeers(t.idsA)}, - DownPeers: pickPeerStats(t.idsA), - PendingPeers: pickPeers(t.idsA), + Region: &metapb.Region{Id: 100, Peers: pickPeers(test.idsA)}, + DownPeers: pickPeerStats(test.idsA), + PendingPeers: pickPeers(test.idsA), }) regionB := RegionFromHeartbeat(&pdpb.RegionHeartbeatRequest{ - Region: &metapb.Region{Id: 100, Peers: pickPeers(t.idsB)}, - DownPeers: pickPeerStats(t.idsB), - PendingPeers: pickPeers(t.idsB), + Region: &metapb.Region{Id: 100, Peers: pickPeers(test.idsB)}, + DownPeers: pickPeerStats(test.idsB), + PendingPeers: pickPeers(test.idsB), }) - c.Assert(SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters()), Equals, t.isEqual) - c.Assert(SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters()), Equals, t.isEqual) - c.Assert(SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers()), Equals, t.isEqual) - c.Assert(SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers()), Equals, t.isEqual) + re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + re.Equal(test.isEqual, SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers())) + re.Equal(test.isEqual, SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers())) } // test Clone region := NewRegionInfo(meta, meta.Peers[0]) - for _, t := range testcases { - downPeersA := pickPeerStats(t.idsA) - downPeersB := pickPeerStats(t.idsB) - pendingPeersA := pickPeers(t.idsA) - pendingPeersB := pickPeers(t.idsB) + for _, test := range testcases { + downPeersA := pickPeerStats(test.idsA) + downPeersB := pickPeerStats(test.idsB) + pendingPeersA := pickPeers(test.idsA) + pendingPeersB := pickPeers(test.idsB) regionA := region.Clone(WithDownPeers(downPeersA), WithPendingPeers(pendingPeersA)) regionB := region.Clone(WithDownPeers(downPeersB), WithPendingPeers(pendingPeersB)) - c.Assert(SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers()), Equals, t.isEqual) - c.Assert(SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers()), Equals, t.isEqual) + re.Equal(test.isEqual, SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers())) + re.Equal(test.isEqual, SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers())) } } -func (s *testRegionInfoSuite) TestInherit(c *C) { +func TestInherit(t *testing.T) { + re := require.New(t) // size in MB // case for approximateSize testcases := []struct { @@ -208,16 +202,16 @@ func (s *testRegionInfoSuite) TestInherit(c *C) { {true, 1, 2, 2}, {true, 2, 0, 2}, } - for _, t := range testcases { + for _, test := range testcases { var origin *RegionInfo - if t.originExists { + if test.originExists { origin = NewRegionInfo(&metapb.Region{Id: 100}, nil) - origin.approximateSize = int64(t.originSize) + origin.approximateSize = int64(test.originSize) } r := NewRegionInfo(&metapb.Region{Id: 100}, nil) - r.approximateSize = int64(t.size) + r.approximateSize = int64(test.size) r.Inherit(origin, false) - c.Assert(r.approximateSize, Equals, int64(t.expect)) + re.Equal(int64(test.expect), r.approximateSize) } // bucket @@ -234,17 +228,18 @@ func (s *testRegionInfoSuite) TestInherit(c *C) { origin := NewRegionInfo(&metapb.Region{Id: 100}, nil, SetBuckets(d.originBuckets)) r := NewRegionInfo(&metapb.Region{Id: 100}, nil) r.Inherit(origin, true) - c.Assert(r.GetBuckets(), DeepEquals, d.originBuckets) + re.Equal(d.originBuckets, r.GetBuckets()) // region will not inherit bucket keys. if origin.GetBuckets() != nil { newRegion := NewRegionInfo(&metapb.Region{Id: 100}, nil) newRegion.Inherit(origin, false) - c.Assert(newRegion.GetBuckets(), Not(DeepEquals), d.originBuckets) + re.NotEqual(d.originBuckets, newRegion.GetBuckets()) } } } -func (s *testRegionInfoSuite) TestRegionRoundingFlow(c *C) { +func TestRegionRoundingFlow(t *testing.T) { + re := require.New(t) testcases := []struct { flow uint64 digit int @@ -259,15 +254,16 @@ func (s *testRegionInfoSuite) TestRegionRoundingFlow(c *C) { {252623, math.MaxInt64, 0}, {252623, math.MinInt64, 252623}, } - for _, t := range testcases { - r := NewRegionInfo(&metapb.Region{Id: 100}, nil, WithFlowRoundByDigit(t.digit)) - r.readBytes = t.flow - r.writtenBytes = t.flow - c.Assert(r.GetRoundBytesRead(), Equals, t.expect) + for _, test := range testcases { + r := NewRegionInfo(&metapb.Region{Id: 100}, nil, WithFlowRoundByDigit(test.digit)) + r.readBytes = test.flow + r.writtenBytes = test.flow + re.Equal(test.expect, r.GetRoundBytesRead()) } } -func (s *testRegionInfoSuite) TestRegionWriteRate(c *C) { +func TestRegionWriteRate(t *testing.T) { + re := require.New(t) testcases := []struct { bytes uint64 keys uint64 @@ -284,25 +280,17 @@ func (s *testRegionInfoSuite) TestRegionWriteRate(c *C) { {0, 0, 500, 0, 0}, {10, 3, 500, 0, 0}, } - for _, t := range testcases { - r := NewRegionInfo(&metapb.Region{Id: 100}, nil, SetWrittenBytes(t.bytes), SetWrittenKeys(t.keys), SetReportInterval(t.interval)) + for _, test := range testcases { + r := NewRegionInfo(&metapb.Region{Id: 100}, nil, SetWrittenBytes(test.bytes), SetWrittenKeys(test.keys), SetReportInterval(test.interval)) bytesRate, keysRate := r.GetWriteRate() - c.Assert(bytesRate, Equals, t.expectBytesRate) - c.Assert(keysRate, Equals, t.expectKeysRate) + re.Equal(test.expectBytesRate, bytesRate) + re.Equal(test.expectKeysRate, keysRate) } } -var _ = Suite(&testRegionGuideSuite{}) - -type testRegionGuideSuite struct { - RegionGuide RegionGuideFunc -} - -func (s *testRegionGuideSuite) SetUpSuite(c *C) { - s.RegionGuide = GenerateRegionGuideFunc(false) -} - -func (s *testRegionGuideSuite) TestNeedSync(c *C) { +func TestNeedSync(t *testing.T) { + re := require.New(t) + RegionGuide := GenerateRegionGuideFunc(false) meta := &metapb.Region{ Id: 1000, StartKey: []byte("a"), @@ -369,41 +357,38 @@ func (s *testRegionGuideSuite) TestNeedSync(c *C) { }, } - for _, t := range testcases { - regionA := region.Clone(t.optionsA...) - regionB := region.Clone(t.optionsB...) - _, _, _, needSync := s.RegionGuide(regionA, regionB) - c.Assert(needSync, Equals, t.needSync) + for _, test := range testcases { + regionA := region.Clone(test.optionsA...) + regionB := region.Clone(test.optionsB...) + _, _, _, needSync := RegionGuide(regionA, regionB) + re.Equal(test.needSync, needSync) } } -var _ = Suite(&testRegionMapSuite{}) - -type testRegionMapSuite struct{} - -func (s *testRegionMapSuite) TestRegionMap(c *C) { +func TestRegionMap(t *testing.T) { + re := require.New(t) rm := newRegionMap() - s.check(c, rm) - rm.AddNew(s.regionInfo(1)) - s.check(c, rm, 1) + check(re, rm) + rm.AddNew(regionInfo(1)) + check(re, rm, 1) - rm.AddNew(s.regionInfo(2)) - rm.AddNew(s.regionInfo(3)) - s.check(c, rm, 1, 2, 3) + rm.AddNew(regionInfo(2)) + rm.AddNew(regionInfo(3)) + check(re, rm, 1, 2, 3) - rm.AddNew(s.regionInfo(3)) + rm.AddNew(regionInfo(3)) rm.Delete(4) - s.check(c, rm, 1, 2, 3) + check(re, rm, 1, 2, 3) rm.Delete(3) rm.Delete(1) - s.check(c, rm, 2) + check(re, rm, 2) - rm.AddNew(s.regionInfo(3)) - s.check(c, rm, 2, 3) + rm.AddNew(regionInfo(3)) + check(re, rm, 2, 3) } -func (s *testRegionMapSuite) regionInfo(id uint64) *RegionInfo { +func regionInfo(id uint64) *RegionInfo { return &RegionInfo{ meta: &metapb.Region{ Id: id, @@ -413,13 +398,13 @@ func (s *testRegionMapSuite) regionInfo(id uint64) *RegionInfo { } } -func (s *testRegionMapSuite) check(c *C, rm regionMap, ids ...uint64) { +func check(re *require.Assertions, rm regionMap, ids ...uint64) { // Check Get. for _, id := range ids { - c.Assert(rm.Get(id).region.GetID(), Equals, id) + re.Equal(id, rm.Get(id).region.GetID()) } // Check Len. - c.Assert(rm.Len(), Equals, len(ids)) + re.Equal(len(ids), rm.Len()) // Check id set. expect := make(map[uint64]struct{}) for _, id := range ids { @@ -429,14 +414,11 @@ func (s *testRegionMapSuite) check(c *C, rm regionMap, ids ...uint64) { for _, r := range rm { set1[r.region.GetID()] = struct{}{} } - c.Assert(set1, DeepEquals, expect) + re.Equal(expect, set1) } -var _ = Suite(&testRegionKey{}) - -type testRegionKey struct{} - -func (*testRegionKey) TestRegionKey(c *C) { +func TestRegionKey(t *testing.T) { + re := require.New(t) testCase := []struct { key string expect string @@ -446,29 +428,30 @@ func (*testRegionKey) TestRegionKey(c *C) { {"\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf8\"", `80000000000000FF0500000000000000F8`}, } - for _, t := range testCase { - got, err := strconv.Unquote(t.key) - c.Assert(err, IsNil) + for _, test := range testCase { + got, err := strconv.Unquote(test.key) + re.NoError(err) s := fmt.Sprintln(RegionToHexMeta(&metapb.Region{StartKey: []byte(got)})) - c.Assert(strings.Contains(s, t.expect), IsTrue) + re.Contains(s, test.expect) // start key changed origin := NewRegionInfo(&metapb.Region{EndKey: []byte(got)}, nil) region := NewRegionInfo(&metapb.Region{StartKey: []byte(got), EndKey: []byte(got)}, nil) s = DiffRegionKeyInfo(origin, region) - c.Assert(s, Matches, ".*StartKey Changed.*") - c.Assert(strings.Contains(s, t.expect), IsTrue) + re.Regexp(".*StartKey Changed.*", s) + re.Contains(s, test.expect) // end key changed origin = NewRegionInfo(&metapb.Region{StartKey: []byte(got)}, nil) region = NewRegionInfo(&metapb.Region{StartKey: []byte(got), EndKey: []byte(got)}, nil) s = DiffRegionKeyInfo(origin, region) - c.Assert(s, Matches, ".*EndKey Changed.*") - c.Assert(strings.Contains(s, t.expect), IsTrue) + re.Regexp(".*EndKey Changed.*", s) + re.Contains(s, test.expect) } } -func (*testRegionKey) TestSetRegion(c *C) { +func TestSetRegion(t *testing.T) { + re := require.New(t) regions := NewRegionsInfo() for i := 0; i < 100; i++ { peer1 := &metapb.Peer{StoreId: uint64(i%5 + 1), Id: uint64(i*5 + 1)} @@ -495,9 +478,9 @@ func (*testRegionKey) TestSetRegion(c *C) { region.learners = append(region.learners, peer2) region.pendingPeers = append(region.pendingPeers, peer3) regions.SetRegion(region) - checkRegions(c, regions) - c.Assert(regions.tree.length(), Equals, 97) - c.Assert(regions.GetRegions(), HasLen, 97) + checkRegions(re, regions) + re.Equal(97, regions.tree.length()) + re.Len(regions.GetRegions(), 97) regions.SetRegion(region) peer1 = &metapb.Peer{StoreId: uint64(2), Id: uint64(101)} @@ -512,21 +495,21 @@ func (*testRegionKey) TestSetRegion(c *C) { region.learners = append(region.learners, peer2) region.pendingPeers = append(region.pendingPeers, peer3) regions.SetRegion(region) - checkRegions(c, regions) - c.Assert(regions.tree.length(), Equals, 97) - c.Assert(regions.GetRegions(), HasLen, 97) + checkRegions(re, regions) + re.Equal(97, regions.tree.length()) + re.Len(regions.GetRegions(), 97) // Test remove overlaps. region = region.Clone(WithStartKey([]byte(fmt.Sprintf("%20d", 175))), WithNewRegionID(201)) - c.Assert(regions.GetRegion(21), NotNil) - c.Assert(regions.GetRegion(18), NotNil) + re.NotNil(regions.GetRegion(21)) + re.NotNil(regions.GetRegion(18)) regions.SetRegion(region) - checkRegions(c, regions) - c.Assert(regions.tree.length(), Equals, 96) - c.Assert(regions.GetRegions(), HasLen, 96) - c.Assert(regions.GetRegion(201), NotNil) - c.Assert(regions.GetRegion(21), IsNil) - c.Assert(regions.GetRegion(18), IsNil) + checkRegions(re, regions) + re.Equal(96, regions.tree.length()) + re.Len(regions.GetRegions(), 96) + re.NotNil(regions.GetRegion(201)) + re.Nil(regions.GetRegion(21)) + re.Nil(regions.GetRegion(18)) // Test update keys and size of region. region = region.Clone( @@ -536,17 +519,18 @@ func (*testRegionKey) TestSetRegion(c *C) { SetWrittenKeys(10), SetReportInterval(5)) regions.SetRegion(region) - checkRegions(c, regions) - c.Assert(regions.tree.length(), Equals, 96) - c.Assert(regions.GetRegions(), HasLen, 96) - c.Assert(regions.GetRegion(201), NotNil) - c.Assert(regions.tree.TotalSize(), Equals, int64(30)) + checkRegions(re, regions) + re.Equal(96, regions.tree.length()) + re.Len(regions.GetRegions(), 96) + re.NotNil(regions.GetRegion(201)) + re.Equal(int64(30), regions.tree.TotalSize()) bytesRate, keysRate := regions.tree.TotalWriteRate() - c.Assert(bytesRate, Equals, float64(8)) - c.Assert(keysRate, Equals, float64(2)) + re.Equal(float64(8), bytesRate) + re.Equal(float64(2), keysRate) } -func (*testRegionKey) TestShouldRemoveFromSubTree(c *C) { +func TestShouldRemoveFromSubTree(t *testing.T) { + re := require.New(t) peer1 := &metapb.Peer{StoreId: uint64(1), Id: uint64(1)} peer2 := &metapb.Peer{StoreId: uint64(2), Id: uint64(2)} peer3 := &metapb.Peer{StoreId: uint64(3), Id: uint64(3)} @@ -564,28 +548,28 @@ func (*testRegionKey) TestShouldRemoveFromSubTree(c *C) { StartKey: []byte(fmt.Sprintf("%20d", 10)), EndKey: []byte(fmt.Sprintf("%20d", 20)), }, peer1) - c.Assert(region.peersEqualTo(origin), IsTrue) + re.True(region.peersEqualTo(origin)) region.leader = peer2 - c.Assert(region.peersEqualTo(origin), IsFalse) + re.False(region.peersEqualTo(origin)) region.leader = peer1 region.pendingPeers = append(region.pendingPeers, peer4) - c.Assert(region.peersEqualTo(origin), IsFalse) + re.False(region.peersEqualTo(origin)) region.pendingPeers = nil region.learners = append(region.learners, peer2) - c.Assert(region.peersEqualTo(origin), IsFalse) + re.False(region.peersEqualTo(origin)) origin.learners = append(origin.learners, peer2, peer3) region.learners = append(region.learners, peer4) - c.Assert(region.peersEqualTo(origin), IsTrue) + re.True(region.peersEqualTo(origin)) region.voters[2].StoreId = 4 - c.Assert(region.peersEqualTo(origin), IsFalse) + re.False(region.peersEqualTo(origin)) } -func checkRegions(c *C, regions *RegionsInfo) { +func checkRegions(re *require.Assertions, regions *RegionsInfo) { leaderMap := make(map[uint64]uint64) followerMap := make(map[uint64]uint64) learnerMap := make(map[uint64]uint64) @@ -619,16 +603,16 @@ func checkRegions(c *C, regions *RegionsInfo) { } } for key, value := range regions.leaders { - c.Assert(value.length(), Equals, int(leaderMap[key])) + re.Equal(int(leaderMap[key]), value.length()) } for key, value := range regions.followers { - c.Assert(value.length(), Equals, int(followerMap[key])) + re.Equal(int(followerMap[key]), value.length()) } for key, value := range regions.learners { - c.Assert(value.length(), Equals, int(learnerMap[key])) + re.Equal(int(learnerMap[key]), value.length()) } for key, value := range regions.pendingPeers { - c.Assert(value.length(), Equals, int(pendingPeerMap[key])) + re.Equal(int(pendingPeerMap[key]), value.length()) } } diff --git a/server/core/region_tree_test.go b/server/core/region_tree_test.go index 92c26744abf..0f813717fcb 100644 --- a/server/core/region_tree_test.go +++ b/server/core/region_tree_test.go @@ -19,16 +19,13 @@ import ( "math/rand" "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testRegionSuite{}) - -type testRegionSuite struct{} - -func (s *testRegionSuite) TestRegionInfo(c *C) { +func TestRegionInfo(t *testing.T) { + re := require.New(t) n := uint64(3) peers := make([]*metapb.Peer, 0, n) @@ -51,104 +48,108 @@ func (s *testRegionSuite) TestRegionInfo(c *C) { WithPendingPeers([]*metapb.Peer{pendingPeer})) r := info.Clone() - c.Assert(r, DeepEquals, info) + re.Equal(info, r) for i := uint64(0); i < n; i++ { - c.Assert(r.GetPeer(i), Equals, r.meta.Peers[i]) + re.Equal(r.meta.Peers[i], r.GetPeer(i)) } - c.Assert(r.GetPeer(n), IsNil) - c.Assert(r.GetDownPeer(n), IsNil) - c.Assert(r.GetDownPeer(downPeer.GetId()), DeepEquals, downPeer) - c.Assert(r.GetPendingPeer(n), IsNil) - c.Assert(r.GetPendingPeer(pendingPeer.GetId()), DeepEquals, pendingPeer) + re.Nil(r.GetPeer(n)) + re.Nil(r.GetDownPeer(n)) + re.Equal(downPeer, r.GetDownPeer(downPeer.GetId())) + re.Nil(r.GetPendingPeer(n)) + re.Equal(pendingPeer, r.GetPendingPeer(pendingPeer.GetId())) for i := uint64(0); i < n; i++ { - c.Assert(r.GetStorePeer(i).GetStoreId(), Equals, i) + re.Equal(i, r.GetStorePeer(i).GetStoreId()) } - c.Assert(r.GetStorePeer(n), IsNil) + re.Nil(r.GetStorePeer(n)) removePeer := &metapb.Peer{ Id: n, StoreId: n, } r = r.Clone(SetPeers(append(r.meta.Peers, removePeer))) - c.Assert(DiffRegionPeersInfo(info, r), Matches, "Add peer.*") - c.Assert(DiffRegionPeersInfo(r, info), Matches, "Remove peer.*") - c.Assert(r.GetStorePeer(n), DeepEquals, removePeer) + re.Regexp("Add peer.*", DiffRegionPeersInfo(info, r)) + re.Regexp("Remove peer.*", DiffRegionPeersInfo(r, info)) + re.Equal(removePeer, r.GetStorePeer(n)) r = r.Clone(WithRemoveStorePeer(n)) - c.Assert(DiffRegionPeersInfo(r, info), Equals, "") - c.Assert(r.GetStorePeer(n), IsNil) + re.Equal("", DiffRegionPeersInfo(r, info)) + re.Nil(r.GetStorePeer(n)) r = r.Clone(WithStartKey([]byte{0})) - c.Assert(DiffRegionKeyInfo(r, info), Matches, "StartKey Changed.*") + re.Regexp("StartKey Changed.*", DiffRegionKeyInfo(r, info)) r = r.Clone(WithEndKey([]byte{1})) - c.Assert(DiffRegionKeyInfo(r, info), Matches, ".*EndKey Changed.*") + re.Regexp(".*EndKey Changed.*", DiffRegionKeyInfo(r, info)) stores := r.GetStoreIds() - c.Assert(stores, HasLen, int(n)) + re.Len(stores, int(n)) for i := uint64(0); i < n; i++ { _, ok := stores[i] - c.Assert(ok, IsTrue) + re.True(ok) } followers := r.GetFollowers() - c.Assert(followers, HasLen, int(n-1)) + re.Len(followers, int(n-1)) for i := uint64(1); i < n; i++ { - c.Assert(followers[peers[i].GetStoreId()], DeepEquals, peers[i]) + re.Equal(peers[i], followers[peers[i].GetStoreId()]) } } -func (s *testRegionSuite) TestRegionItem(c *C) { +func TestRegionItem(t *testing.T) { + re := require.New(t) item := newRegionItem([]byte("b"), []byte{}) - c.Assert(item.Less(newRegionItem([]byte("a"), []byte{})), IsFalse) - c.Assert(item.Less(newRegionItem([]byte("b"), []byte{})), IsFalse) - c.Assert(item.Less(newRegionItem([]byte("c"), []byte{})), IsTrue) + re.False(item.Less(newRegionItem([]byte("a"), []byte{}))) + re.False(item.Less(newRegionItem([]byte("b"), []byte{}))) + re.True(item.Less(newRegionItem([]byte("c"), []byte{}))) - c.Assert(item.Contains([]byte("a")), IsFalse) - c.Assert(item.Contains([]byte("b")), IsTrue) - c.Assert(item.Contains([]byte("c")), IsTrue) + re.False(item.Contains([]byte("a"))) + re.True(item.Contains([]byte("b"))) + re.True(item.Contains([]byte("c"))) item = newRegionItem([]byte("b"), []byte("d")) - c.Assert(item.Contains([]byte("a")), IsFalse) - c.Assert(item.Contains([]byte("b")), IsTrue) - c.Assert(item.Contains([]byte("c")), IsTrue) - c.Assert(item.Contains([]byte("d")), IsFalse) + re.False(item.Contains([]byte("a"))) + re.True(item.Contains([]byte("b"))) + re.True(item.Contains([]byte("c"))) + re.False(item.Contains([]byte("d"))) } -func (s *testRegionSuite) newRegionWithStat(start, end string, size, keys int64) *RegionInfo { +func newRegionWithStat(start, end string, size, keys int64) *RegionInfo { region := NewTestRegionInfo([]byte(start), []byte(end)) region.approximateSize, region.approximateKeys = size, keys return region } -func (s *testRegionSuite) TestRegionTreeStat(c *C) { +func TestRegionTreeStat(t *testing.T) { + re := require.New(t) tree := newRegionTree() - c.Assert(tree.totalSize, Equals, int64(0)) - updateNewItem(tree, s.newRegionWithStat("a", "b", 1, 2)) - c.Assert(tree.totalSize, Equals, int64(1)) - updateNewItem(tree, s.newRegionWithStat("b", "c", 3, 4)) - c.Assert(tree.totalSize, Equals, int64(4)) - updateNewItem(tree, s.newRegionWithStat("b", "e", 5, 6)) - c.Assert(tree.totalSize, Equals, int64(6)) - tree.remove(s.newRegionWithStat("a", "b", 1, 2)) - c.Assert(tree.totalSize, Equals, int64(5)) - tree.remove(s.newRegionWithStat("f", "g", 1, 2)) - c.Assert(tree.totalSize, Equals, int64(5)) + re.Equal(int64(0), tree.totalSize) + updateNewItem(tree, newRegionWithStat("a", "b", 1, 2)) + re.Equal(int64(1), tree.totalSize) + updateNewItem(tree, newRegionWithStat("b", "c", 3, 4)) + re.Equal(int64(4), tree.totalSize) + updateNewItem(tree, newRegionWithStat("b", "e", 5, 6)) + re.Equal(int64(6), tree.totalSize) + tree.remove(newRegionWithStat("a", "b", 1, 2)) + re.Equal(int64(5), tree.totalSize) + tree.remove(newRegionWithStat("f", "g", 1, 2)) + re.Equal(int64(5), tree.totalSize) } -func (s *testRegionSuite) TestRegionTreeMerge(c *C) { +func TestRegionTreeMerge(t *testing.T) { + re := require.New(t) tree := newRegionTree() - updateNewItem(tree, s.newRegionWithStat("a", "b", 1, 2)) - updateNewItem(tree, s.newRegionWithStat("b", "c", 3, 4)) - c.Assert(tree.totalSize, Equals, int64(4)) - updateNewItem(tree, s.newRegionWithStat("a", "c", 5, 5)) - c.Assert(tree.totalSize, Equals, int64(5)) + updateNewItem(tree, newRegionWithStat("a", "b", 1, 2)) + updateNewItem(tree, newRegionWithStat("b", "c", 3, 4)) + re.Equal(int64(4), tree.totalSize) + updateNewItem(tree, newRegionWithStat("a", "c", 5, 5)) + re.Equal(int64(5), tree.totalSize) } -func (s *testRegionSuite) TestRegionTree(c *C) { +func TestRegionTree(t *testing.T) { + re := require.New(t) tree := newRegionTree() - c.Assert(tree.search([]byte("a")), IsNil) + re.Nil(tree.search([]byte("a"))) regionA := NewTestRegionInfo([]byte("a"), []byte("b")) regionB := NewTestRegionInfo([]byte("b"), []byte("c")) @@ -157,86 +158,87 @@ func (s *testRegionSuite) TestRegionTree(c *C) { updateNewItem(tree, regionA) updateNewItem(tree, regionC) - c.Assert(tree.search([]byte{}), IsNil) - c.Assert(tree.search([]byte("a")), Equals, regionA) - c.Assert(tree.search([]byte("b")), IsNil) - c.Assert(tree.search([]byte("c")), Equals, regionC) - c.Assert(tree.search([]byte("d")), IsNil) + re.Nil(tree.search([]byte{})) + re.Equal(regionA, tree.search([]byte("a"))) + re.Nil(tree.search([]byte("b"))) + re.Equal(regionC, tree.search([]byte("c"))) + re.Nil(tree.search([]byte("d"))) // search previous region - c.Assert(tree.searchPrev([]byte("a")), IsNil) - c.Assert(tree.searchPrev([]byte("b")), IsNil) - c.Assert(tree.searchPrev([]byte("c")), IsNil) + re.Nil(tree.searchPrev([]byte("a"))) + re.Nil(tree.searchPrev([]byte("b"))) + re.Nil(tree.searchPrev([]byte("c"))) updateNewItem(tree, regionB) // search previous region - c.Assert(tree.searchPrev([]byte("c")), Equals, regionB) - c.Assert(tree.searchPrev([]byte("b")), Equals, regionA) + re.Equal(regionB, tree.searchPrev([]byte("c"))) + re.Equal(regionA, tree.searchPrev([]byte("b"))) tree.remove(regionC) updateNewItem(tree, regionD) - c.Assert(tree.search([]byte{}), IsNil) - c.Assert(tree.search([]byte("a")), Equals, regionA) - c.Assert(tree.search([]byte("b")), Equals, regionB) - c.Assert(tree.search([]byte("c")), IsNil) - c.Assert(tree.search([]byte("d")), Equals, regionD) + re.Nil(tree.search([]byte{})) + re.Equal(regionA, tree.search([]byte("a"))) + re.Equal(regionB, tree.search([]byte("b"))) + re.Nil(tree.search([]byte("c"))) + re.Equal(regionD, tree.search([]byte("d"))) // check get adjacent regions prev, next := tree.getAdjacentRegions(regionA) - c.Assert(prev, IsNil) - c.Assert(next.region, Equals, regionB) + re.Nil(prev) + re.Equal(regionB, next.region) prev, next = tree.getAdjacentRegions(regionB) - c.Assert(prev.region, Equals, regionA) - c.Assert(next.region, Equals, regionD) + re.Equal(regionA, prev.region) + re.Equal(regionD, next.region) prev, next = tree.getAdjacentRegions(regionC) - c.Assert(prev.region, Equals, regionB) - c.Assert(next.region, Equals, regionD) + re.Equal(regionB, prev.region) + re.Equal(regionD, next.region) prev, next = tree.getAdjacentRegions(regionD) - c.Assert(prev.region, Equals, regionB) - c.Assert(next, IsNil) + re.Equal(regionB, prev.region) + re.Nil(next) // region with the same range and different region id will not be delete. region0 := newRegionItem([]byte{}, []byte("a")).region updateNewItem(tree, region0) - c.Assert(tree.search([]byte{}), Equals, region0) + re.Equal(region0, tree.search([]byte{})) anotherRegion0 := newRegionItem([]byte{}, []byte("a")).region anotherRegion0.meta.Id = 123 tree.remove(anotherRegion0) - c.Assert(tree.search([]byte{}), Equals, region0) + re.Equal(region0, tree.search([]byte{})) // overlaps with 0, A, B, C. region0D := newRegionItem([]byte(""), []byte("d")).region updateNewItem(tree, region0D) - c.Assert(tree.search([]byte{}), Equals, region0D) - c.Assert(tree.search([]byte("a")), Equals, region0D) - c.Assert(tree.search([]byte("b")), Equals, region0D) - c.Assert(tree.search([]byte("c")), Equals, region0D) - c.Assert(tree.search([]byte("d")), Equals, regionD) + re.Equal(region0D, tree.search([]byte{})) + re.Equal(region0D, tree.search([]byte("a"))) + re.Equal(region0D, tree.search([]byte("b"))) + re.Equal(region0D, tree.search([]byte("c"))) + re.Equal(regionD, tree.search([]byte("d"))) // overlaps with D. regionE := newRegionItem([]byte("e"), []byte{}).region updateNewItem(tree, regionE) - c.Assert(tree.search([]byte{}), Equals, region0D) - c.Assert(tree.search([]byte("a")), Equals, region0D) - c.Assert(tree.search([]byte("b")), Equals, region0D) - c.Assert(tree.search([]byte("c")), Equals, region0D) - c.Assert(tree.search([]byte("d")), IsNil) - c.Assert(tree.search([]byte("e")), Equals, regionE) + re.Equal(region0D, tree.search([]byte{})) + re.Equal(region0D, tree.search([]byte("a"))) + re.Equal(region0D, tree.search([]byte("b"))) + re.Equal(region0D, tree.search([]byte("c"))) + re.Nil(tree.search([]byte("d"))) + re.Equal(regionE, tree.search([]byte("e"))) } -func updateRegions(c *C, tree *regionTree, regions []*RegionInfo) { +func updateRegions(re *require.Assertions, tree *regionTree, regions []*RegionInfo) { for _, region := range regions { updateNewItem(tree, region) - c.Assert(tree.search(region.GetStartKey()), Equals, region) + re.Equal(region, tree.search(region.GetStartKey())) if len(region.GetEndKey()) > 0 { end := region.GetEndKey()[0] - c.Assert(tree.search([]byte{end - 1}), Equals, region) - c.Assert(tree.search([]byte{end + 1}), Not(Equals), region) + re.Equal(region, tree.search([]byte{end - 1})) + re.NotEqual(region, tree.search([]byte{end + 1})) } } } -func (s *testRegionSuite) TestRegionTreeSplitAndMerge(c *C) { +func TestRegionTreeSplitAndMerge(t *testing.T) { + re := require.New(t) tree := newRegionTree() regions := []*RegionInfo{newRegionItem([]byte{}, []byte{}).region} @@ -246,13 +248,13 @@ func (s *testRegionSuite) TestRegionTreeSplitAndMerge(c *C) { // Split. for i := 0; i < n; i++ { regions = SplitRegions(regions) - updateRegions(c, tree, regions) + updateRegions(re, tree, regions) } // Merge. for i := 0; i < n; i++ { regions = MergeRegions(regions) - updateRegions(c, tree, regions) + updateRegions(re, tree, regions) } // Split twice and merge once. @@ -262,19 +264,20 @@ func (s *testRegionSuite) TestRegionTreeSplitAndMerge(c *C) { } else { regions = SplitRegions(regions) } - updateRegions(c, tree, regions) + updateRegions(re, tree, regions) } } -func (s *testRegionSuite) TestRandomRegion(c *C) { +func TestRandomRegion(t *testing.T) { + re := require.New(t) tree := newRegionTree() r := tree.RandomRegion(nil) - c.Assert(r, IsNil) + re.Nil(r) regionA := NewTestRegionInfo([]byte(""), []byte("g")) updateNewItem(tree, regionA) ra := tree.RandomRegion([]KeyRange{NewKeyRange("", "")}) - c.Assert(ra, DeepEquals, regionA) + re.Equal(regionA, ra) regionB := NewTestRegionInfo([]byte("g"), []byte("n")) regionC := NewTestRegionInfo([]byte("n"), []byte("t")) @@ -284,70 +287,71 @@ func (s *testRegionSuite) TestRandomRegion(c *C) { updateNewItem(tree, regionD) rb := tree.RandomRegion([]KeyRange{NewKeyRange("g", "n")}) - c.Assert(rb, DeepEquals, regionB) + re.Equal(regionB, rb) rc := tree.RandomRegion([]KeyRange{NewKeyRange("n", "t")}) - c.Assert(rc, DeepEquals, regionC) + re.Equal(regionC, rc) rd := tree.RandomRegion([]KeyRange{NewKeyRange("t", "")}) - c.Assert(rd, DeepEquals, regionD) - - re := tree.RandomRegion([]KeyRange{NewKeyRange("", "a")}) - c.Assert(re, IsNil) - re = tree.RandomRegion([]KeyRange{NewKeyRange("o", "s")}) - c.Assert(re, IsNil) - re = tree.RandomRegion([]KeyRange{NewKeyRange("", "a")}) - c.Assert(re, IsNil) - re = tree.RandomRegion([]KeyRange{NewKeyRange("z", "")}) - c.Assert(re, IsNil) - - checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []KeyRange{NewKeyRange("", "")}) - checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB}, []KeyRange{NewKeyRange("", "n")}) - checkRandomRegion(c, tree, []*RegionInfo{regionC, regionD}, []KeyRange{NewKeyRange("n", "")}) - checkRandomRegion(c, tree, []*RegionInfo{}, []KeyRange{NewKeyRange("h", "s")}) - checkRandomRegion(c, tree, []*RegionInfo{regionB, regionC}, []KeyRange{NewKeyRange("a", "z")}) + re.Equal(regionD, rd) + + rf := tree.RandomRegion([]KeyRange{NewKeyRange("", "a")}) + re.Nil(rf) + rf = tree.RandomRegion([]KeyRange{NewKeyRange("o", "s")}) + re.Nil(rf) + rf = tree.RandomRegion([]KeyRange{NewKeyRange("", "a")}) + re.Nil(rf) + rf = tree.RandomRegion([]KeyRange{NewKeyRange("z", "")}) + re.Nil(rf) + + checkRandomRegion(re, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []KeyRange{NewKeyRange("", "")}) + checkRandomRegion(re, tree, []*RegionInfo{regionA, regionB}, []KeyRange{NewKeyRange("", "n")}) + checkRandomRegion(re, tree, []*RegionInfo{regionC, regionD}, []KeyRange{NewKeyRange("n", "")}) + checkRandomRegion(re, tree, []*RegionInfo{}, []KeyRange{NewKeyRange("h", "s")}) + checkRandomRegion(re, tree, []*RegionInfo{regionB, regionC}, []KeyRange{NewKeyRange("a", "z")}) } -func (s *testRegionSuite) TestRandomRegionDiscontinuous(c *C) { +func TestRandomRegionDiscontinuous(t *testing.T) { + re := require.New(t) tree := newRegionTree() r := tree.RandomRegion([]KeyRange{NewKeyRange("c", "f")}) - c.Assert(r, IsNil) + re.Nil(r) // test for single region regionA := NewTestRegionInfo([]byte("c"), []byte("f")) updateNewItem(tree, regionA) ra := tree.RandomRegion([]KeyRange{NewKeyRange("c", "e")}) - c.Assert(ra, IsNil) + re.Nil(ra) ra = tree.RandomRegion([]KeyRange{NewKeyRange("c", "f")}) - c.Assert(ra, DeepEquals, regionA) + re.Equal(regionA, ra) ra = tree.RandomRegion([]KeyRange{NewKeyRange("c", "g")}) - c.Assert(ra, DeepEquals, regionA) + re.Equal(regionA, ra) ra = tree.RandomRegion([]KeyRange{NewKeyRange("a", "e")}) - c.Assert(ra, IsNil) + re.Nil(ra) ra = tree.RandomRegion([]KeyRange{NewKeyRange("a", "f")}) - c.Assert(ra, DeepEquals, regionA) + re.Equal(regionA, ra) ra = tree.RandomRegion([]KeyRange{NewKeyRange("a", "g")}) - c.Assert(ra, DeepEquals, regionA) + re.Equal(regionA, ra) regionB := NewTestRegionInfo([]byte("n"), []byte("x")) updateNewItem(tree, regionB) rb := tree.RandomRegion([]KeyRange{NewKeyRange("g", "x")}) - c.Assert(rb, DeepEquals, regionB) + re.Equal(regionB, rb) rb = tree.RandomRegion([]KeyRange{NewKeyRange("g", "y")}) - c.Assert(rb, DeepEquals, regionB) + re.Equal(regionB, rb) rb = tree.RandomRegion([]KeyRange{NewKeyRange("n", "y")}) - c.Assert(rb, DeepEquals, regionB) + re.Equal(regionB, rb) rb = tree.RandomRegion([]KeyRange{NewKeyRange("o", "y")}) - c.Assert(rb, IsNil) + re.Nil(rb) regionC := NewTestRegionInfo([]byte("z"), []byte("")) updateNewItem(tree, regionC) rc := tree.RandomRegion([]KeyRange{NewKeyRange("y", "")}) - c.Assert(rc, DeepEquals, regionC) + re.Equal(regionC, rc) regionD := NewTestRegionInfo([]byte(""), []byte("a")) updateNewItem(tree, regionD) rd := tree.RandomRegion([]KeyRange{NewKeyRange("", "b")}) - c.Assert(rd, DeepEquals, regionD) + re.Equal(regionD, rd) - checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []KeyRange{NewKeyRange("", "")}) + checkRandomRegion(re, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []KeyRange{NewKeyRange("", "")}) } func updateNewItem(tree *regionTree, region *RegionInfo) { @@ -355,7 +359,7 @@ func updateNewItem(tree *regionTree, region *RegionInfo) { tree.update(item) } -func checkRandomRegion(c *C, tree *regionTree, regions []*RegionInfo, ranges []KeyRange) { +func checkRandomRegion(re *require.Assertions, tree *regionTree, regions []*RegionInfo, ranges []KeyRange) { keys := make(map[string]struct{}) for i := 0; i < 10000 && len(keys) < len(regions); i++ { re := tree.RandomRegion(ranges) @@ -369,9 +373,9 @@ func checkRandomRegion(c *C, tree *regionTree, regions []*RegionInfo, ranges []K } for _, region := range regions { _, ok := keys[string(region.GetStartKey())] - c.Assert(ok, IsTrue) + re.True(ok) } - c.Assert(keys, HasLen, len(regions)) + re.Len(keys, len(regions)) } func newRegionItem(start, end []byte) *regionItem { diff --git a/server/core/store_stats_test.go b/server/core/store_stats_test.go index 7b046e3d0c6..82598fd9347 100644 --- a/server/core/store_stats_test.go +++ b/server/core/store_stats_test.go @@ -15,16 +15,15 @@ package core import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testStoreStatsSuite{}) - -type testStoreStatsSuite struct{} - -func (s *testStoreStatsSuite) TestStoreStats(c *C) { +func TestStoreStats(t *testing.T) { + re := require.New(t) G := uint64(1024 * 1024 * 1024) meta := &metapb.Store{Id: 1, State: metapb.StoreState_Up} store := NewStoreInfo(meta, SetStoreStats(&pdpb.StoreStats{ @@ -33,11 +32,11 @@ func (s *testStoreStatsSuite) TestStoreStats(c *C) { Available: 150 * G, })) - c.Assert(store.GetCapacity(), Equals, 200*G) - c.Assert(store.GetUsedSize(), Equals, 50*G) - c.Assert(store.GetAvailable(), Equals, 150*G) - c.Assert(store.GetAvgAvailable(), Equals, 150*G) - c.Assert(store.GetAvailableDeviation(), Equals, uint64(0)) + re.Equal(200*G, store.GetCapacity()) + re.Equal(50*G, store.GetUsedSize()) + re.Equal(150*G, store.GetAvailable()) + re.Equal(150*G, store.GetAvgAvailable()) + re.Equal(uint64(0), store.GetAvailableDeviation()) store = store.Clone(SetStoreStats(&pdpb.StoreStats{ Capacity: 200 * G, @@ -45,9 +44,9 @@ func (s *testStoreStatsSuite) TestStoreStats(c *C) { Available: 160 * G, })) - c.Assert(store.GetAvailable(), Equals, 160*G) - c.Assert(store.GetAvgAvailable(), Greater, 150*G) - c.Assert(store.GetAvgAvailable(), Less, 160*G) - c.Assert(store.GetAvailableDeviation(), Greater, uint64(0)) - c.Assert(store.GetAvailableDeviation(), Less, 10*G) + re.Equal(160*G, store.GetAvailable()) + re.Greater(store.GetAvgAvailable(), 150*G) + re.Less(store.GetAvgAvailable(), 160*G) + re.Greater(store.GetAvailableDeviation(), uint64(0)) + re.Less(store.GetAvailableDeviation(), 10*G) } diff --git a/server/core/store_test.go b/server/core/store_test.go index 2bed3783f9e..b311315a29e 100644 --- a/server/core/store_test.go +++ b/server/core/store_test.go @@ -17,18 +17,16 @@ package core import ( "math" "sync" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testDistinctScoreSuite{}) - -type testDistinctScoreSuite struct{} - -func (s *testDistinctScoreSuite) TestDistinctScore(c *C) { +func TestDistinctScore(t *testing.T) { + re := require.New(t) labels := []string{"zone", "rack", "host"} zones := []string{"z1", "z2", "z3"} racks := []string{"r1", "r2", "r3"} @@ -54,19 +52,15 @@ func (s *testDistinctScoreSuite) TestDistinctScore(c *C) { // Number of stores in the same rack but in different hosts. numHosts := k score := (numZones*replicaBaseScore+numRacks)*replicaBaseScore + numHosts - c.Assert(DistinctScore(labels, stores, store), Equals, float64(score)) + re.Equal(float64(score), DistinctScore(labels, stores, store)) } } } store := NewStoreInfoWithLabel(100, 1, nil) - c.Assert(DistinctScore(labels, stores, store), Equals, float64(0)) + re.Equal(float64(0), DistinctScore(labels, stores, store)) } -var _ = Suite(&testConcurrencySuite{}) - -type testConcurrencySuite struct{} - -func (s *testConcurrencySuite) TestCloneStore(c *C) { +func TestCloneStore(t *testing.T) { meta := &metapb.Store{Id: 1, Address: "mock://tikv-1", Labels: []*metapb.StoreLabel{{Key: "zone", Value: "z1"}, {Key: "host", Value: "h1"}}} store := NewStoreInfo(meta) start := time.Now() @@ -96,11 +90,8 @@ func (s *testConcurrencySuite) TestCloneStore(c *C) { wg.Wait() } -var _ = Suite(&testStoreSuite{}) - -type testStoreSuite struct{} - -func (s *testStoreSuite) TestRegionScore(c *C) { +func TestRegionScore(t *testing.T) { + re := require.New(t) stats := &pdpb.StoreStats{} stats.Capacity = 512 * (1 << 20) // 512 MB stats.Available = 100 * (1 << 20) // 100 MB @@ -113,22 +104,24 @@ func (s *testStoreSuite) TestRegionScore(c *C) { ) score := store.RegionScore("v1", 0.7, 0.9, 0) // Region score should never be NaN, or /store API would fail. - c.Assert(math.IsNaN(score), IsFalse) + re.False(math.IsNaN(score)) } -func (s *testStoreSuite) TestLowSpaceRatio(c *C) { +func TestLowSpaceRatio(t *testing.T) { + re := require.New(t) store := NewStoreInfoWithLabel(1, 20, nil) store.rawStats.Capacity = initialMinSpace << 4 store.rawStats.Available = store.rawStats.Capacity >> 3 - c.Assert(store.IsLowSpace(0.8), IsFalse) + re.False(store.IsLowSpace(0.8)) store.regionCount = 31 - c.Assert(store.IsLowSpace(0.8), IsTrue) + re.True(store.IsLowSpace(0.8)) store.rawStats.Available = store.rawStats.Capacity >> 2 - c.Assert(store.IsLowSpace(0.8), IsFalse) + re.False(store.IsLowSpace(0.8)) } -func (s *testStoreSuite) TestLowSpaceScoreV2(c *C) { +func TestLowSpaceScoreV2(t *testing.T) { + re := require.New(t) testdata := []struct { bigger *StoreInfo small *StoreInfo @@ -172,6 +165,6 @@ func (s *testStoreSuite) TestLowSpaceScoreV2(c *C) { for _, v := range testdata { score1 := v.bigger.regionScoreV2(0, 0.8) score2 := v.small.regionScoreV2(0, 0.8) - c.Assert(score1, Greater, score2) + re.Greater(score1, score2) } } From ec1fbdaafd6e23d6041ba126765e0f65321d8f85 Mon Sep 17 00:00:00 2001 From: LLThomas Date: Thu, 9 Jun 2022 12:20:30 +0800 Subject: [PATCH 04/35] filter: migrate test framework to testify (#5133) ref tikv/pd#4813 Signed-off-by: LLThomas --- server/schedule/filter/candidates_test.go | 50 +++++++------ server/schedule/filter/filters_test.go | 87 +++++++++++------------ 2 files changed, 66 insertions(+), 71 deletions(-) diff --git a/server/schedule/filter/candidates_test.go b/server/schedule/filter/candidates_test.go index 86fd35e739e..5150bed9b66 100644 --- a/server/schedule/filter/candidates_test.go +++ b/server/schedule/filter/candidates_test.go @@ -15,8 +15,9 @@ package filter import ( - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" + "testing" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" @@ -55,45 +56,42 @@ func (f idFilter) Target(opt *config.PersistOptions, store *core.StoreInfo) bool return f(store.GetID()) } -type testCandidatesSuite struct{} - -var _ = Suite(&testCandidatesSuite{}) - -func (s *testCandidatesSuite) TestCandidates(c *C) { - cs := s.newCandidates(1, 2, 3, 4, 5) +func TestCandidates(t *testing.T) { + re := require.New(t) + cs := newTestCandidates(1, 2, 3, 4, 5) cs.FilterSource(nil, idFilter(func(id uint64) bool { return id > 2 })) - s.check(c, cs, 3, 4, 5) + check(re, cs, 3, 4, 5) cs.FilterTarget(nil, idFilter(func(id uint64) bool { return id%2 == 1 })) - s.check(c, cs, 3, 5) + check(re, cs, 3, 5) cs.FilterTarget(nil, idFilter(func(id uint64) bool { return id > 100 })) - s.check(c, cs) + check(re, cs) store := cs.PickFirst() - c.Assert(store, IsNil) + re.Nil(store) store = cs.RandomPick() - c.Assert(store, IsNil) + re.Nil(store) - cs = s.newCandidates(1, 3, 5, 7, 6, 2, 4) + cs = newTestCandidates(1, 3, 5, 7, 6, 2, 4) cs.Sort(idComparer) - s.check(c, cs, 1, 2, 3, 4, 5, 6, 7) + check(re, cs, 1, 2, 3, 4, 5, 6, 7) store = cs.PickFirst() - c.Assert(store.GetID(), Equals, uint64(1)) + re.Equal(uint64(1), store.GetID()) cs.Reverse() - s.check(c, cs, 7, 6, 5, 4, 3, 2, 1) + check(re, cs, 7, 6, 5, 4, 3, 2, 1) store = cs.PickFirst() - c.Assert(store.GetID(), Equals, uint64(7)) + re.Equal(uint64(7), store.GetID()) cs.Shuffle() cs.Sort(idComparer) - s.check(c, cs, 1, 2, 3, 4, 5, 6, 7) + check(re, cs, 1, 2, 3, 4, 5, 6, 7) store = cs.RandomPick() - c.Assert(store.GetID(), Greater, uint64(0)) - c.Assert(store.GetID(), Less, uint64(8)) + re.Greater(store.GetID(), uint64(0)) + re.Less(store.GetID(), uint64(8)) - cs = s.newCandidates(10, 15, 23, 20, 33, 32, 31) + cs = newTestCandidates(10, 15, 23, 20, 33, 32, 31) cs.Sort(idComparer).Reverse().Top(idComparer2) - s.check(c, cs, 33, 32, 31) + check(re, cs, 33, 32, 31) } -func (s *testCandidatesSuite) newCandidates(ids ...uint64) *StoreCandidates { +func newTestCandidates(ids ...uint64) *StoreCandidates { stores := make([]*core.StoreInfo, 0, len(ids)) for _, id := range ids { stores = append(stores, core.NewStoreInfo(&metapb.Store{Id: id})) @@ -101,9 +99,9 @@ func (s *testCandidatesSuite) newCandidates(ids ...uint64) *StoreCandidates { return NewCandidates(stores) } -func (s *testCandidatesSuite) check(c *C, candidates *StoreCandidates, ids ...uint64) { - c.Assert(candidates.Stores, HasLen, len(ids)) +func check(re *require.Assertions, candidates *StoreCandidates, ids ...uint64) { + re.Len(candidates.Stores, len(ids)) for i, s := range candidates.Stores { - c.Assert(s.GetID(), Equals, ids[i]) + re.Equal(ids[i], s.GetID()) } } diff --git a/server/schedule/filter/filters_test.go b/server/schedule/filter/filters_test.go index 0b44b8cb258..31da16f6ff6 100644 --- a/server/schedule/filter/filters_test.go +++ b/server/schedule/filter/filters_test.go @@ -18,35 +18,17 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/schedule/placement" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testFiltersSuite{}) - -type testFiltersSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testFiltersSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testFiltersSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testFiltersSuite) TestDistinctScoreFilter(c *C) { +func TestDistinctScoreFilter(t *testing.T) { + re := require.New(t) labels := []string{"zone", "rack", "host"} allStores := []*core.StoreInfo{ core.NewStoreInfoWithLabel(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}), @@ -75,14 +57,18 @@ func (s *testFiltersSuite) TestDistinctScoreFilter(c *C) { } ls := NewLocationSafeguard("", labels, stores, allStores[tc.source-1]) li := NewLocationImprover("", labels, stores, allStores[tc.source-1]) - c.Assert(ls.Target(config.NewTestOptions(), allStores[tc.target-1]), Equals, tc.safeGuardRes) - c.Assert(li.Target(config.NewTestOptions(), allStores[tc.target-1]), Equals, tc.improverRes) + re.Equal(tc.safeGuardRes, ls.Target(config.NewTestOptions(), allStores[tc.target-1])) + re.Equal(tc.improverRes, li.Target(config.NewTestOptions(), allStores[tc.target-1])) } } -func (s *testFiltersSuite) TestLabelConstraintsFilter(c *C) { +func TestLabelConstraintsFilter(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + opt := config.NewTestOptions() - testCluster := mockcluster.NewCluster(s.ctx, opt) + testCluster := mockcluster.NewCluster(ctx, opt) store := core.NewStoreInfoWithLabel(1, 1, map[string]string{"id": "1"}) testCases := []struct { @@ -103,14 +89,18 @@ func (s *testFiltersSuite) TestLabelConstraintsFilter(c *C) { } for _, tc := range testCases { filter := NewLabelConstaintFilter("", []placement.LabelConstraint{{Key: tc.key, Op: placement.LabelConstraintOp(tc.op), Values: tc.values}}) - c.Assert(filter.Source(testCluster.GetOpts(), store), Equals, tc.res) + re.Equal(tc.res, filter.Source(testCluster.GetOpts(), store)) } } -func (s *testFiltersSuite) TestRuleFitFilter(c *C) { +func TestRuleFitFilter(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(false) - testCluster := mockcluster.NewCluster(s.ctx, opt) + testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetLocationLabels([]string{"zone"}) testCluster.SetEnablePlacementRules(true) region := core.NewRegionInfo(&metapb.Region{Peers: []*metapb.Peer{ @@ -139,12 +129,13 @@ func (s *testFiltersSuite) TestRuleFitFilter(c *C) { } for _, tc := range testCases { filter := newRuleFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, 1) - c.Assert(filter.Source(testCluster.GetOpts(), testCluster.GetStore(tc.storeID)), Equals, tc.sourceRes) - c.Assert(filter.Target(testCluster.GetOpts(), testCluster.GetStore(tc.storeID)), Equals, tc.targetRes) + re.Equal(tc.sourceRes, filter.Source(testCluster.GetOpts(), testCluster.GetStore(tc.storeID))) + re.Equal(tc.targetRes, filter.Target(testCluster.GetOpts(), testCluster.GetStore(tc.storeID))) } } -func (s *testFiltersSuite) TestStoreStateFilter(c *C) { +func TestStoreStateFilter(t *testing.T) { + re := require.New(t) filters := []Filter{ &StoreStateFilter{TransferLeader: true}, &StoreStateFilter{MoveRegion: true}, @@ -162,8 +153,8 @@ func (s *testFiltersSuite) TestStoreStateFilter(c *C) { check := func(store *core.StoreInfo, testCases []testCase) { for _, tc := range testCases { - c.Assert(filters[tc.filterIdx].Source(opt, store), Equals, tc.sourceRes) - c.Assert(filters[tc.filterIdx].Target(opt, store), Equals, tc.targetRes) + re.Equal(tc.sourceRes, filters[tc.filterIdx].Source(opt, store)) + re.Equal(tc.targetRes, filters[tc.filterIdx].Target(opt, store)) } } @@ -195,9 +186,13 @@ func (s *testFiltersSuite) TestStoreStateFilter(c *C) { check(store, testCases) } -func (s *testFiltersSuite) TestIsolationFilter(c *C) { +func TestIsolationFilter(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + opt := config.NewTestOptions() - testCluster := mockcluster.NewCluster(s.ctx, opt) + testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetLocationLabels([]string{"zone", "rack", "host"}) allStores := []struct { storeID uint64 @@ -256,16 +251,20 @@ func (s *testFiltersSuite) TestIsolationFilter(c *C) { for _, tc := range testCases { filter := NewIsolationFilter("", tc.isolationLevel, testCluster.GetLocationLabels(), testCluster.GetRegionStores(tc.region)) for idx, store := range allStores { - c.Assert(filter.Source(testCluster.GetOpts(), testCluster.GetStore(store.storeID)), Equals, tc.sourceRes[idx]) - c.Assert(filter.Target(testCluster.GetOpts(), testCluster.GetStore(store.storeID)), Equals, tc.targetRes[idx]) + re.Equal(tc.sourceRes[idx], filter.Source(testCluster.GetOpts(), testCluster.GetStore(store.storeID))) + re.Equal(tc.targetRes[idx], filter.Target(testCluster.GetOpts(), testCluster.GetStore(store.storeID))) } } } -func (s *testFiltersSuite) TestPlacementGuard(c *C) { +func TestPlacementGuard(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(false) - testCluster := mockcluster.NewCluster(s.ctx, opt) + testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetLocationLabels([]string{"zone"}) testCluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) testCluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) @@ -279,13 +278,11 @@ func (s *testFiltersSuite) TestPlacementGuard(c *C) { }}, &metapb.Peer{StoreId: 1, Id: 1}) store := testCluster.GetStore(1) - c.Assert(NewPlacementSafeguard("", testCluster.GetOpts(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store), - FitsTypeOf, - NewLocationSafeguard("", []string{"zone"}, testCluster.GetRegionStores(region), store)) + re.IsType(NewLocationSafeguard("", []string{"zone"}, testCluster.GetRegionStores(region), store), + NewPlacementSafeguard("", testCluster.GetOpts(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store)) testCluster.SetEnablePlacementRules(true) - c.Assert(NewPlacementSafeguard("", testCluster.GetOpts(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store), - FitsTypeOf, - newRuleFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, 1)) + re.IsType(newRuleFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, 1), + NewPlacementSafeguard("", testCluster.GetOpts(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store)) } func BenchmarkCloneRegionTest(b *testing.B) { From 5ace930a4bcc33cce752e6174ff3c1c5746119be Mon Sep 17 00:00:00 2001 From: JmPotato Date: Thu, 9 Jun 2022 14:54:31 +0800 Subject: [PATCH 05/35] server, tests: remove EnableZap (#5119) close tikv/pd#5118 Remove `EnableZap`. Signed-off-by: JmPotato Co-authored-by: Ti Chi Robot --- server/api/server_test.go | 1 - server/server.go | 16 ++-------------- server/server_test.go | 1 - tests/compatibility/version_upgrade_test.go | 1 - tests/dashboard/race_test.go | 3 --- tests/dashboard/service_test.go | 3 --- tests/pdbackup/backup_test.go | 5 ----- tests/pdctl/cluster/cluster_test.go | 5 ----- tests/pdctl/config/config_test.go | 5 ----- tests/pdctl/global_test.go | 4 ---- tests/pdctl/health/health_test.go | 5 ----- tests/pdctl/hot/hot_test.go | 5 ----- tests/pdctl/label/label_test.go | 5 ----- tests/pdctl/log/log_test.go | 1 - tests/pdctl/member/member_test.go | 5 ----- tests/pdctl/operator/operator_test.go | 5 ----- tests/pdctl/region/region_test.go | 5 ----- tests/pdctl/scheduler/scheduler_test.go | 2 -- tests/pdctl/store/store_test.go | 5 ----- tests/pdctl/tso/tso_test.go | 5 ----- tests/server/api/api_test.go | 6 ------ tests/server/cluster/cluster_test.go | 1 - tests/server/cluster/cluster_work_test.go | 2 -- tests/server/global_config/global_config_test.go | 1 - tests/server/id/id_test.go | 2 -- tests/server/join/join_fail/join_fail_test.go | 5 ----- tests/server/join/join_test.go | 1 - tests/server/member/member_test.go | 1 - tests/server/region_syncer/region_syncer_test.go | 2 -- tests/server/server_test.go | 2 -- tests/server/storage/hot_region_storage_test.go | 5 ----- tests/server/tso/allocator_test.go | 2 -- tests/server/tso/consistency_test.go | 2 -- tests/server/tso/global_tso_test.go | 2 -- tests/server/tso/manager_test.go | 2 -- tests/server/tso/tso_test.go | 2 -- tests/server/watch/leader_watch_test.go | 2 -- 37 files changed, 2 insertions(+), 125 deletions(-) diff --git a/server/api/server_test.go b/server/api/server_test.go index 8d9f1b4c227..a4c6b6de6fb 100644 --- a/server/api/server_test.go +++ b/server/api/server_test.go @@ -62,7 +62,6 @@ var ( ) func TestAPIServer(t *testing.T) { - server.EnableZap = true TestingT(t) } diff --git a/server/server.go b/server/server.go index c27941f7c85..a63c8c52525 100644 --- a/server/server.go +++ b/server/server.go @@ -82,12 +82,8 @@ const ( pdClusterIDPath = "/pd/cluster_id" ) -var ( - // EnableZap enable the zap logger in embed etcd. - EnableZap = false - // EtcdStartTimeout the timeout of the startup etcd. - EtcdStartTimeout = time.Minute * 5 -) +// EtcdStartTimeout the timeout of the startup etcd. +var EtcdStartTimeout = time.Minute * 5 // Server is the pd server. // nolint @@ -282,14 +278,6 @@ func CreateServer(ctx context.Context, cfg *config.Config, serviceBuilders ...Ha diagnosticspb.RegisterDiagnosticsServer(gs, s) } s.etcdCfg = etcdCfg - if EnableZap { - // The etcd master version has removed embed.Config.SetupLogging. - // Now logger is set up automatically based on embed.Config.Logger, - // Use zap logger in the test, otherwise will panic. - // Reference: https://go.etcd.io/etcd/blob/master/embed/config_logging.go#L45 - s.etcdCfg.Logger = "zap" - s.etcdCfg.LogOutputs = []string{"stdout"} - } s.lg = cfg.GetZapLogger() s.logProps = cfg.GetZapLogProperties() return s, nil diff --git a/server/server_test.go b/server/server_test.go index f433ac2dc31..c6c14fe011f 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -33,7 +33,6 @@ import ( ) func TestServer(t *testing.T) { - EnableZap = true TestingT(t) } diff --git a/tests/compatibility/version_upgrade_test.go b/tests/compatibility/version_upgrade_test.go index 03e34697084..2fcdc1bf5d5 100644 --- a/tests/compatibility/version_upgrade_test.go +++ b/tests/compatibility/version_upgrade_test.go @@ -39,7 +39,6 @@ type compatibilityTestSuite struct { func (s *compatibilityTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *compatibilityTestSuite) TearDownSuite(c *C) { diff --git a/tests/dashboard/race_test.go b/tests/dashboard/race_test.go index ee72f8dbb61..9ca82567b52 100644 --- a/tests/dashboard/race_test.go +++ b/tests/dashboard/race_test.go @@ -21,7 +21,6 @@ import ( . "github.com/pingcap/check" "github.com/tikv/pd/pkg/dashboard" - "github.com/tikv/pd/server" "github.com/tikv/pd/tests" // Register schedulers. @@ -33,14 +32,12 @@ var _ = Suite(&raceTestSuite{}) type raceTestSuite struct{} func (s *raceTestSuite) SetUpSuite(c *C) { - server.EnableZap = true dashboard.SetCheckInterval(50 * time.Millisecond) tests.WaitLeaderReturnDelay = 0 tests.WaitLeaderCheckInterval = 20 * time.Millisecond } func (s *raceTestSuite) TearDownSuite(c *C) { - server.EnableZap = false dashboard.SetCheckInterval(time.Second) tests.WaitLeaderReturnDelay = 20 * time.Millisecond tests.WaitLeaderCheckInterval = 500 * time.Millisecond diff --git a/tests/dashboard/service_test.go b/tests/dashboard/service_test.go index 911cf80be30..f11f39e466a 100644 --- a/tests/dashboard/service_test.go +++ b/tests/dashboard/service_test.go @@ -27,7 +27,6 @@ import ( "github.com/tikv/pd/pkg/dashboard" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" @@ -54,7 +53,6 @@ type dashboardTestSuite struct { } func (s *dashboardTestSuite) SetUpSuite(c *C) { - server.EnableZap = true dashboard.SetCheckInterval(10 * time.Millisecond) s.ctx, s.cancel = context.WithCancel(context.Background()) s.httpClient = &http.Client{ @@ -71,7 +69,6 @@ func (s *dashboardTestSuite) SetUpSuite(c *C) { func (s *dashboardTestSuite) TearDownSuite(c *C) { s.cancel() s.httpClient.CloseIdleConnections() - server.EnableZap = false dashboard.SetCheckInterval(time.Second) } diff --git a/tests/pdbackup/backup_test.go b/tests/pdbackup/backup_test.go index 49e348699b7..a36cf89f44d 100644 --- a/tests/pdbackup/backup_test.go +++ b/tests/pdbackup/backup_test.go @@ -23,7 +23,6 @@ import ( "time" . "github.com/pingcap/check" - "github.com/tikv/pd/server" "github.com/tikv/pd/tests" "github.com/tikv/pd/tools/pd-backup/pdbackup" "go.etcd.io/etcd/clientv3" @@ -37,10 +36,6 @@ var _ = Suite(&backupTestSuite{}) type backupTestSuite struct{} -func (s *backupTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *backupTestSuite) TestBackup(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/cluster/cluster_test.go b/tests/pdctl/cluster/cluster_test.go index 4f1b67b63e7..4b8cceb3bc5 100644 --- a/tests/pdctl/cluster/cluster_test.go +++ b/tests/pdctl/cluster/cluster_test.go @@ -23,7 +23,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/tikv/pd/server" clusterpkg "github.com/tikv/pd/server/cluster" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" @@ -38,10 +37,6 @@ var _ = Suite(&clusterTestSuite{}) type clusterTestSuite struct{} -func (s *clusterTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *clusterTestSuite) TestClusterAndPing(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 297cc538606..ad99d583133 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -28,7 +28,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/typeutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/tests" @@ -44,10 +43,6 @@ var _ = Suite(&configTestSuite{}) type configTestSuite struct{} -func (s *configTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - type testItem struct { name string value interface{} diff --git a/tests/pdctl/global_test.go b/tests/pdctl/global_test.go index bb14eeafac2..de165eea600 100644 --- a/tests/pdctl/global_test.go +++ b/tests/pdctl/global_test.go @@ -37,10 +37,6 @@ var _ = Suite(&globalTestSuite{}) type globalTestSuite struct{} -func (s *globalTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *globalTestSuite) TestSendAndGetComponent(c *C) { handler := func(ctx context.Context, s *server.Server) (http.Handler, server.ServiceGroup, error) { mux := http.NewServeMux() diff --git a/tests/pdctl/health/health_test.go b/tests/pdctl/health/health_test.go index ecc9b6deb2f..06e287dcb36 100644 --- a/tests/pdctl/health/health_test.go +++ b/tests/pdctl/health/health_test.go @@ -20,7 +20,6 @@ import ( "testing" . "github.com/pingcap/check" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/cluster" "github.com/tikv/pd/tests" @@ -36,10 +35,6 @@ var _ = Suite(&healthTestSuite{}) type healthTestSuite struct{} -func (s *healthTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *healthTestSuite) TestHealth(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/hot/hot_test.go b/tests/pdctl/hot/hot_test.go index de40564c2d9..06a657df7d7 100644 --- a/tests/pdctl/hot/hot_test.go +++ b/tests/pdctl/hot/hot_test.go @@ -25,7 +25,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" @@ -44,10 +43,6 @@ var _ = Suite(&hotTestSuite{}) type hotTestSuite struct{} -func (s *hotTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *hotTestSuite) TestHot(c *C) { statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/pdctl/label/label_test.go b/tests/pdctl/label/label_test.go index c3cd0b105ee..50a52413e82 100644 --- a/tests/pdctl/label/label_test.go +++ b/tests/pdctl/label/label_test.go @@ -23,7 +23,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" @@ -39,10 +38,6 @@ var _ = Suite(&labelTestSuite{}) type labelTestSuite struct{} -func (s *labelTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *labelTestSuite) TestLabel(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/log/log_test.go b/tests/pdctl/log/log_test.go index 7103b842530..6499b2694c7 100644 --- a/tests/pdctl/log/log_test.go +++ b/tests/pdctl/log/log_test.go @@ -42,7 +42,6 @@ type logTestSuite struct { } func (s *logTestSuite) SetUpSuite(c *C) { - server.EnableZap = true s.ctx, s.cancel = context.WithCancel(context.Background()) var err error s.cluster, err = tests.NewTestCluster(s.ctx, 3) diff --git a/tests/pdctl/member/member_test.go b/tests/pdctl/member/member_test.go index 1dac74ce6a4..f85f2d946df 100644 --- a/tests/pdctl/member/member_test.go +++ b/tests/pdctl/member/member_test.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" @@ -39,10 +38,6 @@ var _ = Suite(&memberTestSuite{}) type memberTestSuite struct{} -func (s *memberTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *memberTestSuite) TestMember(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/operator/operator_test.go b/tests/pdctl/operator/operator_test.go index d6eec639770..73ae2687c80 100644 --- a/tests/pdctl/operator/operator_test.go +++ b/tests/pdctl/operator/operator_test.go @@ -23,7 +23,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" @@ -39,10 +38,6 @@ var _ = Suite(&operatorTestSuite{}) type operatorTestSuite struct{} -func (s *operatorTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *operatorTestSuite) TestOperator(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/region/region_test.go b/tests/pdctl/region/region_test.go index f6142878268..dd83accea55 100644 --- a/tests/pdctl/region/region_test.go +++ b/tests/pdctl/region/region_test.go @@ -24,7 +24,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" @@ -40,10 +39,6 @@ var _ = Suite(®ionTestSuite{}) type regionTestSuite struct{} -func (s *regionTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *regionTestSuite) TestRegionKeyFormat(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/scheduler/scheduler_test.go b/tests/pdctl/scheduler/scheduler_test.go index de0e523dc8e..53ed808f410 100644 --- a/tests/pdctl/scheduler/scheduler_test.go +++ b/tests/pdctl/scheduler/scheduler_test.go @@ -23,7 +23,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/versioninfo" "github.com/tikv/pd/tests" @@ -43,7 +42,6 @@ type schedulerTestSuite struct { } func (s *schedulerTestSuite) SetUpSuite(c *C) { - server.EnableZap = true s.context, s.cancel = context.WithCancel(context.Background()) } diff --git a/tests/pdctl/store/store_test.go b/tests/pdctl/store/store_test.go index 84a9a4e383d..a43d70722e8 100644 --- a/tests/pdctl/store/store_test.go +++ b/tests/pdctl/store/store_test.go @@ -23,7 +23,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/core/storelimit" @@ -41,10 +40,6 @@ var _ = Suite(&storeTestSuite{}) type storeTestSuite struct{} -func (s *storeTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *storeTestSuite) TestStore(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/pdctl/tso/tso_test.go b/tests/pdctl/tso/tso_test.go index 0f67e0ff7cf..1d2cdb77dc0 100644 --- a/tests/pdctl/tso/tso_test.go +++ b/tests/pdctl/tso/tso_test.go @@ -21,7 +21,6 @@ import ( "time" . "github.com/pingcap/check" - "github.com/tikv/pd/server" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) @@ -34,10 +33,6 @@ var _ = Suite(&tsoTestSuite{}) type tsoTestSuite struct{} -func (s *tsoTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *tsoTestSuite) TestTSO(c *C) { cmd := pdctlCmd.GetRootCmd() diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index 68af7235316..9e6248f9cec 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -133,7 +133,6 @@ type testMiddlewareSuite struct { func (s *testMiddlewareSuite) SetUpSuite(c *C) { c.Assert(failpoint.Enable("github.com/tikv/pd/server/api/enableFailpointAPI", "return(true)"), IsNil) ctx, cancel := context.WithCancel(context.Background()) - server.EnableZap = true s.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 3) c.Assert(err, IsNil) @@ -200,7 +199,6 @@ func (s *testMiddlewareSuite) TestRequestInfoMiddleware(c *C) { func BenchmarkDoRequestWithServiceMiddleware(b *testing.B) { b.StopTimer() ctx, cancel := context.WithCancel(context.Background()) - server.EnableZap = true cluster, _ := tests.NewTestCluster(ctx, 1) cluster.RunInitialServers() cluster.WaitLeader() @@ -223,7 +221,6 @@ func BenchmarkDoRequestWithServiceMiddleware(b *testing.B) { func BenchmarkDoRequestWithoutServiceMiddleware(b *testing.B) { b.StopTimer() ctx, cancel := context.WithCancel(context.Background()) - server.EnableZap = true cluster, _ := tests.NewTestCluster(ctx, 1) cluster.RunInitialServers() cluster.WaitLeader() @@ -348,7 +345,6 @@ func (s *testMiddlewareSuite) TestAuditLocalLogBackend(c *C) { func BenchmarkDoRequestWithLocalLogAudit(b *testing.B) { b.StopTimer() ctx, cancel := context.WithCancel(context.Background()) - server.EnableZap = true cluster, _ := tests.NewTestCluster(ctx, 1) cluster.RunInitialServers() cluster.WaitLeader() @@ -371,7 +367,6 @@ func BenchmarkDoRequestWithLocalLogAudit(b *testing.B) { func BenchmarkDoRequestWithoutLocalLogAudit(b *testing.B) { b.StopTimer() ctx, cancel := context.WithCancel(context.Background()) - server.EnableZap = true cluster, _ := tests.NewTestCluster(ctx, 1) cluster.RunInitialServers() cluster.WaitLeader() @@ -400,7 +395,6 @@ type testRedirectorSuite struct { func (s *testRedirectorSuite) SetUpSuite(c *C) { ctx, cancel := context.WithCancel(context.Background()) - server.EnableZap = true s.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index bbb10c525aa..f493ed21b00 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -68,7 +68,6 @@ type clusterTestSuite struct { func (s *clusterTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true // to prevent GetStorage dashboard.SetCheckInterval(30 * time.Minute) } diff --git a/tests/server/cluster/cluster_work_test.go b/tests/server/cluster/cluster_work_test.go index a9d83ca5fcf..b3d9fdcf9e0 100644 --- a/tests/server/cluster/cluster_work_test.go +++ b/tests/server/cluster/cluster_work_test.go @@ -23,7 +23,6 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" ) @@ -37,7 +36,6 @@ type clusterWorkerTestSuite struct { func (s *clusterWorkerTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *clusterWorkerTestSuite) TearDownSuite(c *C) { diff --git a/tests/server/global_config/global_config_test.go b/tests/server/global_config/global_config_test.go index cc4b73b8a56..87dc62e35a2 100644 --- a/tests/server/global_config/global_config_test.go +++ b/tests/server/global_config/global_config_test.go @@ -68,7 +68,6 @@ func (s *GlobalConfigTestSuite) SetUpSuite(c *C) { gsi, s.cleanup, err = server.NewTestServer(assertutil.NewChecker(func() {})) s.server = &server.GrpcServer{Server: gsi} c.Assert(err, IsNil) - server.EnableZap = true addr := s.server.GetAddr() s.client, err = grpc.Dial(strings.TrimPrefix(addr, "http://"), grpc.WithInsecure()) c.Assert(err, IsNil) diff --git a/tests/server/id/id_test.go b/tests/server/id/id_test.go index 1fb3563d039..b624ceb056f 100644 --- a/tests/server/id/id_test.go +++ b/tests/server/id/id_test.go @@ -22,7 +22,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/tests" "go.uber.org/goleak" ) @@ -46,7 +45,6 @@ type testAllocIDSuite struct { func (s *testAllocIDSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *testAllocIDSuite) TearDownSuite(c *C) { diff --git a/tests/server/join/join_fail/join_fail_test.go b/tests/server/join/join_fail/join_fail_test.go index 8fed271ea95..bc4e98abdce 100644 --- a/tests/server/join/join_fail/join_fail_test.go +++ b/tests/server/join/join_fail/join_fail_test.go @@ -22,7 +22,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/tests" "go.uber.org/goleak" ) @@ -39,10 +38,6 @@ var _ = Suite(&joinTestSuite{}) type joinTestSuite struct{} -func (s *joinTestSuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *joinTestSuite) TestFailedPDJoinInStep1(c *C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index 520e5c817ad..8cc9cdcdb34 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -46,7 +46,6 @@ type joinTestSuite struct { func (s *joinTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true server.EtcdStartTimeout = 10 * time.Second } diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 2eb2ef19ddd..0215d95e5ff 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -63,7 +63,6 @@ type memberTestSuite struct { func (s *memberTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *memberTestSuite) TearDownSuite(c *C) { diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index dd7dc1fb5f1..c4c91806c9f 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" @@ -48,7 +47,6 @@ type regionSyncerTestSuite struct { func (s *regionSyncerTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *regionSyncerTestSuite) TearDownSuite(c *C) { diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 56b357f7fcd..f75ef4e15f0 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -21,7 +21,6 @@ import ( . "github.com/pingcap/check" "github.com/tikv/pd/pkg/tempurl" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" "go.uber.org/goleak" @@ -47,7 +46,6 @@ type serverTestSuite struct { func (s *serverTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *serverTestSuite) TearDownSuite(c *C) { diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index 78fdb5632c6..5a11f8c23c4 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -22,7 +22,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/statistics" @@ -39,10 +38,6 @@ var _ = Suite(&hotRegionHistorySuite{}) type hotRegionHistorySuite struct{} -func (s *hotRegionHistorySuite) SetUpSuite(c *C) { - server.EnableZap = true -} - func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/server/tso/allocator_test.go b/tests/server/tso/allocator_test.go index b3b2002f2b0..c7bb38e5d9a 100644 --- a/tests/server/tso/allocator_test.go +++ b/tests/server/tso/allocator_test.go @@ -28,7 +28,6 @@ import ( "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/tso" "github.com/tikv/pd/tests" @@ -43,7 +42,6 @@ type testAllocatorSuite struct { func (s *testAllocatorSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *testAllocatorSuite) TearDownSuite(c *C) { diff --git a/tests/server/tso/consistency_test.go b/tests/server/tso/consistency_test.go index 974c0bb71c2..170a1b4e9a8 100644 --- a/tests/server/tso/consistency_test.go +++ b/tests/server/tso/consistency_test.go @@ -28,7 +28,6 @@ import ( "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/pkg/tsoutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/tso" "github.com/tikv/pd/tests" @@ -51,7 +50,6 @@ func (s *testTSOConsistencySuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) s.dcClientMap = make(map[string]pdpb.PDClient) s.tsPool = make(map[uint64]struct{}) - server.EnableZap = true } func (s *testTSOConsistencySuite) TearDownSuite(c *C) { diff --git a/tests/server/tso/global_tso_test.go b/tests/server/tso/global_tso_test.go index e67ed1d3798..1086751fa08 100644 --- a/tests/server/tso/global_tso_test.go +++ b/tests/server/tso/global_tso_test.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/tso" "github.com/tikv/pd/tests" ) @@ -51,7 +50,6 @@ type testNormalGlobalTSOSuite struct { func (s *testNormalGlobalTSOSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *testNormalGlobalTSOSuite) TearDownSuite(c *C) { diff --git a/tests/server/tso/manager_test.go b/tests/server/tso/manager_test.go index 68edc073445..26fa07cc1d5 100644 --- a/tests/server/tso/manager_test.go +++ b/tests/server/tso/manager_test.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/failpoint" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/tso" "github.com/tikv/pd/tests" @@ -42,7 +41,6 @@ type testManagerSuite struct { func (s *testManagerSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *testManagerSuite) TearDownSuite(c *C) { diff --git a/tests/server/tso/tso_test.go b/tests/server/tso/tso_test.go index b46a7b902aa..27bc53d5652 100644 --- a/tests/server/tso/tso_test.go +++ b/tests/server/tso/tso_test.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" ) @@ -39,7 +38,6 @@ type testTSOSuite struct { func (s *testTSOSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *testTSOSuite) TearDownSuite(c *C) { diff --git a/tests/server/watch/leader_watch_test.go b/tests/server/watch/leader_watch_test.go index 160e089ecb7..88d1470d733 100644 --- a/tests/server/watch/leader_watch_test.go +++ b/tests/server/watch/leader_watch_test.go @@ -22,7 +22,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/tikv/pd/pkg/testutil" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" "go.uber.org/goleak" @@ -45,7 +44,6 @@ type watchTestSuite struct { func (s *watchTestSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EnableZap = true } func (s *watchTestSuite) TearDownSuite(c *C) { From f82e15ed161acf1955d55fa0c9738a81b60bed74 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Thu, 9 Jun 2022 15:06:31 +0800 Subject: [PATCH 06/35] tests: testify the autoscaling/compatibility/dashboard/pdbackup tests (#5120) ref tikv/pd#4813 Testify the autoscaling/compatibility/dashboard/pdbackup tests. Signed-off-by: JmPotato --- tests/autoscaling/autoscaling_test.go | 23 ++---- tests/compatibility/version_upgrade_test.go | 71 +++++++--------- tests/dashboard/race_test.go | 40 +++++---- tests/dashboard/service_test.go | 91 ++++++++++----------- tests/pdbackup/backup_test.go | 31 +++---- 5 files changed, 113 insertions(+), 143 deletions(-) diff --git a/tests/autoscaling/autoscaling_test.go b/tests/autoscaling/autoscaling_test.go index 62fcf9ee886..ce60f648136 100644 --- a/tests/autoscaling/autoscaling_test.go +++ b/tests/autoscaling/autoscaling_test.go @@ -20,37 +20,30 @@ import ( "net/http" "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/tests" "go.uber.org/goleak" ) -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&apiTestSuite{}) - -type apiTestSuite struct{} - -func (s *apiTestSuite) TestAPI(c *C) { +func TestAPI(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) var jsonStr = []byte(` { @@ -102,7 +95,7 @@ func (s *apiTestSuite) TestAPI(c *C) { ] }`) resp, err := http.Post(leaderServer.GetAddr()+"/autoscaling", "application/json", bytes.NewBuffer(jsonStr)) - c.Assert(err, IsNil) + re.NoError(err) defer resp.Body.Close() - c.Assert(resp.StatusCode, Equals, 200) + re.Equal(200, resp.StatusCode) } diff --git a/tests/compatibility/version_upgrade_test.go b/tests/compatibility/version_upgrade_test.go index 2fcdc1bf5d5..e600a848b3a 100644 --- a/tests/compatibility/version_upgrade_test.go +++ b/tests/compatibility/version_upgrade_test.go @@ -19,42 +19,26 @@ import ( "testing" "github.com/coreos/go-semver/semver" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server" "github.com/tikv/pd/tests" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&compatibilityTestSuite{}) - -type compatibilityTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *compatibilityTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *compatibilityTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *compatibilityTestSuite) TestStoreRegister(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) +func TestStoreRegister(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) putStoreRequest := &pdpb.PutStoreRequest{ Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, @@ -67,21 +51,21 @@ func (s *compatibilityTestSuite) TestStoreRegister(c *C) { svr := &server.GrpcServer{Server: leaderServer.GetServer()} _, err = svr.PutStore(context.Background(), putStoreRequest) - c.Assert(err, IsNil) + re.NoError(err) // FIX ME: read v0.0.0 in sometime cluster.WaitLeader() version := leaderServer.GetClusterVersion() // Restart all PDs. err = cluster.StopAll() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer = cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer, NotNil) + re.NotNil(leaderServer) newVersion := leaderServer.GetClusterVersion() - c.Assert(version, Equals, newVersion) + re.Equal(version, newVersion) // putNewStore with old version putStoreRequest = &pdpb.PutStoreRequest{ @@ -93,18 +77,21 @@ func (s *compatibilityTestSuite) TestStoreRegister(c *C) { }, } _, err = svr.PutStore(context.Background(), putStoreRequest) - c.Assert(err, NotNil) + re.Error(err) } -func (s *compatibilityTestSuite) TestRollingUpgrade(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) +func TestRollingUpgrade(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) stores := []*pdpb.PutStoreRequest{ { @@ -144,9 +131,9 @@ func (s *compatibilityTestSuite) TestRollingUpgrade(c *C) { svr := &server.GrpcServer{Server: leaderServer.GetServer()} for _, store := range stores { _, err = svr.PutStore(context.Background(), store) - c.Assert(err, IsNil) + re.NoError(err) } - c.Assert(leaderServer.GetClusterVersion(), Equals, semver.Version{Major: 2, Minor: 0, Patch: 1}) + re.Equal(semver.Version{Major: 2, Minor: 0, Patch: 1}, leaderServer.GetClusterVersion()) // rolling update for i, store := range stores { if i == 0 { @@ -155,11 +142,11 @@ func (s *compatibilityTestSuite) TestRollingUpgrade(c *C) { } store.Store.Version = "2.1.0" resp, err := svr.PutStore(context.Background(), store) - c.Assert(err, IsNil) + re.NoError(err) if i != len(stores)-1 { - c.Assert(leaderServer.GetClusterVersion(), Equals, semver.Version{Major: 2, Minor: 0, Patch: 1}) - c.Assert(resp.GetHeader().GetError(), IsNil) + re.Equal(semver.Version{Major: 2, Minor: 0, Patch: 1}, leaderServer.GetClusterVersion()) + re.Nil(resp.GetHeader().GetError()) } } - c.Assert(leaderServer.GetClusterVersion(), Equals, semver.Version{Major: 2, Minor: 1}) + re.Equal(semver.Version{Major: 2, Minor: 1}, leaderServer.GetClusterVersion()) } diff --git a/tests/dashboard/race_test.go b/tests/dashboard/race_test.go index 9ca82567b52..4bb31f55ecc 100644 --- a/tests/dashboard/race_test.go +++ b/tests/dashboard/race_test.go @@ -16,10 +16,10 @@ package dashboard_test import ( "context" + "testing" "time" - . "github.com/pingcap/check" - + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/dashboard" "github.com/tikv/pd/tests" @@ -27,33 +27,31 @@ import ( _ "github.com/tikv/pd/server/schedulers" ) -var _ = Suite(&raceTestSuite{}) +func TestCancelDuringStarting(t *testing.T) { + prepareTestConfig() + defer resetTestConfig() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -type raceTestSuite struct{} + re := require.New(t) + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) + defer cluster.Destroy() + re.NoError(cluster.RunInitialServers()) + cluster.WaitLeader() -func (s *raceTestSuite) SetUpSuite(c *C) { + time.Sleep(60 * time.Millisecond) + cancel() +} + +func prepareTestConfig() { dashboard.SetCheckInterval(50 * time.Millisecond) tests.WaitLeaderReturnDelay = 0 tests.WaitLeaderCheckInterval = 20 * time.Millisecond } -func (s *raceTestSuite) TearDownSuite(c *C) { +func resetTestConfig() { dashboard.SetCheckInterval(time.Second) tests.WaitLeaderReturnDelay = 20 * time.Millisecond tests.WaitLeaderCheckInterval = 500 * time.Millisecond } - -func (s *raceTestSuite) TestCancelDuringStarting(c *C) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) - defer cluster.Destroy() - err = cluster.RunInitialServers() - c.Assert(err, IsNil) - cluster.WaitLeader() - - time.Sleep(60 * time.Millisecond) - cancel() -} diff --git a/tests/dashboard/service_test.go b/tests/dashboard/service_test.go index f11f39e466a..12bc4496d6c 100644 --- a/tests/dashboard/service_test.go +++ b/tests/dashboard/service_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/suite" "go.uber.org/goleak" "github.com/tikv/pd/pkg/dashboard" @@ -36,26 +36,25 @@ import ( _ "github.com/tikv/pd/server/schedulers" ) -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&dashboardTestSuite{}) - type dashboardTestSuite struct { + suite.Suite ctx context.Context cancel context.CancelFunc httpClient *http.Client } -func (s *dashboardTestSuite) SetUpSuite(c *C) { +func TestDashboardTestSuite(t *testing.T) { + suite.Run(t, new(dashboardTestSuite)) +} + +func (suite *dashboardTestSuite) SetupSuite() { dashboard.SetCheckInterval(10 * time.Millisecond) - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.httpClient = &http.Client{ + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.httpClient = &http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { // ErrUseLastResponse can be returned by Client.CheckRedirect hooks to // control how redirects are processed. If returned, the next request @@ -66,73 +65,73 @@ func (s *dashboardTestSuite) SetUpSuite(c *C) { } } -func (s *dashboardTestSuite) TearDownSuite(c *C) { - s.cancel() - s.httpClient.CloseIdleConnections() +func (suite *dashboardTestSuite) TearDownSuite() { + suite.cancel() + suite.httpClient.CloseIdleConnections() dashboard.SetCheckInterval(time.Second) } -func (s *dashboardTestSuite) TestDashboardRedirect(c *C) { - s.testDashboard(c, false) +func (suite *dashboardTestSuite) TestDashboardRedirect() { + suite.testDashboard(false) } -func (s *dashboardTestSuite) TestDashboardProxy(c *C) { - s.testDashboard(c, true) +func (suite *dashboardTestSuite) TestDashboardProxy() { + suite.testDashboard(true) } -func (s *dashboardTestSuite) checkRespCode(c *C, url string, code int) { - resp, err := s.httpClient.Get(url) - c.Assert(err, IsNil) +func (suite *dashboardTestSuite) checkRespCode(url string, code int) { + resp, err := suite.httpClient.Get(url) + suite.NoError(err) _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(resp.StatusCode, Equals, code) + suite.Equal(code, resp.StatusCode) } -func (s *dashboardTestSuite) waitForConfigSync() { +func waitForConfigSync() { time.Sleep(time.Second) } -func (s *dashboardTestSuite) checkServiceIsStarted(c *C, internalProxy bool, servers map[string]*tests.TestServer, leader *tests.TestServer) string { - s.waitForConfigSync() +func (suite *dashboardTestSuite) checkServiceIsStarted(internalProxy bool, servers map[string]*tests.TestServer, leader *tests.TestServer) string { + waitForConfigSync() dashboardAddress := leader.GetServer().GetPersistOptions().GetDashboardAddress() hasServiceNode := false for _, srv := range servers { - c.Assert(srv.GetPersistOptions().GetDashboardAddress(), Equals, dashboardAddress) + suite.Equal(dashboardAddress, srv.GetPersistOptions().GetDashboardAddress()) addr := srv.GetAddr() if addr == dashboardAddress || internalProxy { - s.checkRespCode(c, fmt.Sprintf("%s/dashboard/", addr), http.StatusOK) - s.checkRespCode(c, fmt.Sprintf("%s/dashboard/api/keyvisual/heatmaps", addr), http.StatusUnauthorized) + suite.checkRespCode(fmt.Sprintf("%s/dashboard/", addr), http.StatusOK) + suite.checkRespCode(fmt.Sprintf("%s/dashboard/api/keyvisual/heatmaps", addr), http.StatusUnauthorized) if addr == dashboardAddress { hasServiceNode = true } } else { - s.checkRespCode(c, fmt.Sprintf("%s/dashboard/", addr), http.StatusTemporaryRedirect) - s.checkRespCode(c, fmt.Sprintf("%s/dashboard/api/keyvisual/heatmaps", addr), http.StatusTemporaryRedirect) + suite.checkRespCode(fmt.Sprintf("%s/dashboard/", addr), http.StatusTemporaryRedirect) + suite.checkRespCode(fmt.Sprintf("%s/dashboard/api/keyvisual/heatmaps", addr), http.StatusTemporaryRedirect) } } - c.Assert(hasServiceNode, IsTrue) + suite.True(hasServiceNode) return dashboardAddress } -func (s *dashboardTestSuite) checkServiceIsStopped(c *C, servers map[string]*tests.TestServer) { - s.waitForConfigSync() +func (suite *dashboardTestSuite) checkServiceIsStopped(servers map[string]*tests.TestServer) { + waitForConfigSync() for _, srv := range servers { - c.Assert(srv.GetPersistOptions().GetDashboardAddress(), Equals, "none") + suite.Equal("none", srv.GetPersistOptions().GetDashboardAddress()) addr := srv.GetAddr() - s.checkRespCode(c, fmt.Sprintf("%s/dashboard/", addr), http.StatusNotFound) - s.checkRespCode(c, fmt.Sprintf("%s/dashboard/api/keyvisual/heatmaps", addr), http.StatusNotFound) + suite.checkRespCode(fmt.Sprintf("%s/dashboard/", addr), http.StatusNotFound) + suite.checkRespCode(fmt.Sprintf("%s/dashboard/api/keyvisual/heatmaps", addr), http.StatusNotFound) } } -func (s *dashboardTestSuite) testDashboard(c *C, internalProxy bool) { - cluster, err := tests.NewTestCluster(s.ctx, 3, func(conf *config.Config, serverName string) { +func (suite *dashboardTestSuite) testDashboard(internalProxy bool) { + cluster, err := tests.NewTestCluster(suite.ctx, 3, func(conf *config.Config, serverName string) { conf.Dashboard.InternalProxy = internalProxy }) - c.Assert(err, IsNil) + suite.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + suite.NoError(err) cmd := pdctlCmd.GetRootCmd() @@ -142,7 +141,7 @@ func (s *dashboardTestSuite) testDashboard(c *C, internalProxy bool) { leaderAddr := leader.GetAddr() // auto select node - dashboardAddress1 := s.checkServiceIsStarted(c, internalProxy, servers, leader) + dashboardAddress1 := suite.checkServiceIsStarted(internalProxy, servers, leader) // pd-ctl set another addr var dashboardAddress2 string @@ -154,13 +153,13 @@ func (s *dashboardTestSuite) testDashboard(c *C, internalProxy bool) { } args := []string{"-u", leaderAddr, "config", "set", "dashboard-address", dashboardAddress2} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - s.checkServiceIsStarted(c, internalProxy, servers, leader) - c.Assert(leader.GetServer().GetPersistOptions().GetDashboardAddress(), Equals, dashboardAddress2) + suite.NoError(err) + suite.checkServiceIsStarted(internalProxy, servers, leader) + suite.Equal(dashboardAddress2, leader.GetServer().GetPersistOptions().GetDashboardAddress()) // pd-ctl set stop args = []string{"-u", leaderAddr, "config", "set", "dashboard-address", "none"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - s.checkServiceIsStopped(c, servers) + suite.NoError(err) + suite.checkServiceIsStopped(servers) } diff --git a/tests/pdbackup/backup_test.go b/tests/pdbackup/backup_test.go index a36cf89f44d..b5034742f69 100644 --- a/tests/pdbackup/backup_test.go +++ b/tests/pdbackup/backup_test.go @@ -22,27 +22,20 @@ import ( "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/tests" "github.com/tikv/pd/tools/pd-backup/pdbackup" "go.etcd.io/etcd/clientv3" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&backupTestSuite{}) - -type backupTestSuite struct{} - -func (s *backupTestSuite) TestBackup(c *C) { +func TestBackup(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() urls := strings.Split(pdAddr, ",") @@ -52,18 +45,18 @@ func (s *backupTestSuite) TestBackup(c *C) { DialTimeout: 3 * time.Second, TLS: nil, }) - c.Assert(err, IsNil) + re.NoError(err) backupInfo, err := pdbackup.GetBackupInfo(client, pdAddr) - c.Assert(err, IsNil) - c.Assert(backupInfo, NotNil) + re.NoError(err) + re.NotNil(backupInfo) backBytes, err := json.Marshal(backupInfo) - c.Assert(err, IsNil) + re.NoError(err) var formatBuffer bytes.Buffer err = json.Indent(&formatBuffer, backBytes, "", " ") - c.Assert(err, IsNil) + re.NoError(err) newInfo := &pdbackup.BackupInfo{} err = json.Unmarshal(formatBuffer.Bytes(), newInfo) - c.Assert(err, IsNil) - c.Assert(backupInfo, DeepEquals, newInfo) + re.NoError(err) + re.Equal(newInfo, backupInfo) } From ae157b5eb647a6b1adb7138c1ae486bd3952eb1c Mon Sep 17 00:00:00 2001 From: LLThomas Date: Fri, 10 Jun 2022 11:22:30 +0800 Subject: [PATCH 07/35] storage: migrate test framework to testify (#5139) ref tikv/pd#4813 As the title says. Signed-off-by: LLThomas --- server/storage/hot_region_storage_test.go | 32 ++-- server/storage/storage_gc_test.go | 102 ++++++------ server/storage/storage_test.go | 192 +++++++++++----------- 3 files changed, 159 insertions(+), 167 deletions(-) diff --git a/server/storage/hot_region_storage_test.go b/server/storage/hot_region_storage_test.go index 7447c47f8d8..29dc4140317 100644 --- a/server/storage/hot_region_storage_test.go +++ b/server/storage/hot_region_storage_test.go @@ -21,11 +21,10 @@ import ( "math/rand" "os" "path/filepath" - "reflect" "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" ) @@ -104,21 +103,11 @@ func (m *MockPackHotRegionInfo) ClearHotRegion() { m.historyHotWrites = make([]HistoryHotRegion, 0) } -var _ = SerialSuites(&testHotRegionStorage{}) - -type testHotRegionStorage struct { - ctx context.Context - cancel context.CancelFunc -} - -func (t *testHotRegionStorage) SetUpSuite(c *C) { - t.ctx, t.cancel = context.WithCancel(context.Background()) -} - -func (t *testHotRegionStorage) TestHotRegionWrite(c *C) { +func TestHotRegionWrite(t *testing.T) { + re := require.New(t) packHotRegionInfo := &MockPackHotRegionInfo{} store, clean, err := newTestHotRegionStorage(10*time.Minute, 1, packHotRegionInfo) - c.Assert(err, IsNil) + re.NoError(err) defer clean() now := time.Now() hotRegionStorages := []HistoryHotRegion{ @@ -172,20 +161,21 @@ func (t *testHotRegionStorage) TestHotRegionWrite(c *C) { for next, err := iter.Next(); next != nil && err == nil; next, err = iter.Next() { copyHotRegionStorages[index].StartKey = core.HexRegionKeyStr([]byte(copyHotRegionStorages[index].StartKey)) copyHotRegionStorages[index].EndKey = core.HexRegionKeyStr([]byte(copyHotRegionStorages[index].EndKey)) - c.Assert(reflect.DeepEqual(©HotRegionStorages[index], next), IsTrue) + re.Equal(©HotRegionStorages[index], next) index++ } - c.Assert(err, IsNil) - c.Assert(index, Equals, 3) + re.NoError(err) + re.Equal(3, index) } -func (t *testHotRegionStorage) TestHotRegionDelete(c *C) { +func TestHotRegionDelete(t *testing.T) { + re := require.New(t) defaultRemainDay := 7 defaultDelteData := 30 deleteDate := time.Now().AddDate(0, 0, 0) packHotRegionInfo := &MockPackHotRegionInfo{} store, clean, err := newTestHotRegionStorage(10*time.Minute, uint64(defaultRemainDay), packHotRegionInfo) - c.Assert(err, IsNil) + re.NoError(err) defer clean() historyHotRegions := make([]HistoryHotRegion, 0) for i := 0; i < defaultDelteData; i++ { @@ -207,7 +197,7 @@ func (t *testHotRegionStorage) TestHotRegionDelete(c *C) { num := 0 for next, err := iter.Next(); next != nil && err == nil; next, err = iter.Next() { num++ - c.Assert(reflect.DeepEqual(next, &historyHotRegions[defaultRemainDay-num]), IsTrue) + re.Equal(&historyHotRegions[defaultRemainDay-num], next) } } diff --git a/server/storage/storage_gc_test.go b/server/storage/storage_gc_test.go index eb0e51c79d0..371dc5759f9 100644 --- a/server/storage/storage_gc_test.go +++ b/server/storage/storage_gc_test.go @@ -16,18 +16,14 @@ package storage import ( "math" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/storage/endpoint" ) -var _ = Suite(&testStorageGCSuite{}) - -type testStorageGCSuite struct { -} - func testGCSafePoints() ([]string, []uint64) { spaceIDs := []string{ "keySpace1", @@ -73,20 +69,22 @@ func testServiceSafePoints() ([]string, []*endpoint.ServiceSafePoint) { return spaceIDs, serviceSafePoints } -func (s *testStorageGCSuite) TestSaveLoadServiceSafePoint(c *C) { +func TestSaveLoadServiceSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() testSpaceID, testSafePoints := testServiceSafePoints() for i := range testSpaceID { - c.Assert(storage.SaveServiceSafePoint(testSpaceID[i], testSafePoints[i]), IsNil) + re.NoError(storage.SaveServiceSafePoint(testSpaceID[i], testSafePoints[i])) } for i := range testSpaceID { loadedSafePoint, err := storage.LoadServiceSafePoint(testSpaceID[i], testSafePoints[i].ServiceID) - c.Assert(err, IsNil) - c.Assert(loadedSafePoint, DeepEquals, testSafePoints[i]) + re.NoError(err) + re.Equal(testSafePoints[i], loadedSafePoint) } } -func (s *testStorageGCSuite) TestLoadMinServiceSafePoint(c *C) { +func TestLoadMinServiceSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() currentTime := time.Now() expireAt1 := currentTime.Add(100 * time.Second).Unix() @@ -101,116 +99,120 @@ func (s *testStorageGCSuite) TestLoadMinServiceSafePoint(c *C) { testKeySpace := "test" for _, serviceSafePoint := range serviceSafePoints { - c.Assert(storage.SaveServiceSafePoint(testKeySpace, serviceSafePoint), IsNil) + re.NoError(storage.SaveServiceSafePoint(testKeySpace, serviceSafePoint)) } // enabling failpoint to make expired key removal immediately observable - c.Assert(failpoint.Enable("github.com/tikv/pd/server/storage/endpoint/removeExpiredKeys", "return(true)"), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/storage/endpoint/removeExpiredKeys", "return(true)")) minSafePoint, err := storage.LoadMinServiceSafePoint(testKeySpace, currentTime) - c.Assert(err, IsNil) - c.Assert(minSafePoint, DeepEquals, serviceSafePoints[0]) + re.NoError(err) + re.Equal(serviceSafePoints[0], minSafePoint) // the safePoint with ServiceID 0 should be removed due to expiration minSafePoint2, err := storage.LoadMinServiceSafePoint(testKeySpace, currentTime.Add(150*time.Second)) - c.Assert(err, IsNil) - c.Assert(minSafePoint2, DeepEquals, serviceSafePoints[1]) + re.NoError(err) + re.Equal(serviceSafePoints[1], minSafePoint2) // verify that service safe point with ServiceID 0 has been removed ssp, err := storage.LoadServiceSafePoint(testKeySpace, "0") - c.Assert(err, IsNil) - c.Assert(ssp, IsNil) + re.NoError(err) + re.Nil(ssp) // all remaining service safePoints should be removed due to expiration ssp, err = storage.LoadMinServiceSafePoint(testKeySpace, currentTime.Add(500*time.Second)) - c.Assert(err, IsNil) - c.Assert(ssp, IsNil) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/storage/endpoint/removeExpiredKeys"), IsNil) + re.NoError(err) + re.Nil(ssp) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/storage/endpoint/removeExpiredKeys")) } -func (s *testStorageGCSuite) TestRemoveServiceSafePoint(c *C) { +func TestRemoveServiceSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() testSpaceID, testSafePoints := testServiceSafePoints() // save service safe points for i := range testSpaceID { - c.Assert(storage.SaveServiceSafePoint(testSpaceID[i], testSafePoints[i]), IsNil) + re.NoError(storage.SaveServiceSafePoint(testSpaceID[i], testSafePoints[i])) } // remove saved service safe points for i := range testSpaceID { - c.Assert(storage.RemoveServiceSafePoint(testSpaceID[i], testSafePoints[i].ServiceID), IsNil) + re.NoError(storage.RemoveServiceSafePoint(testSpaceID[i], testSafePoints[i].ServiceID)) } // check that service safe points are empty for i := range testSpaceID { loadedSafePoint, err := storage.LoadServiceSafePoint(testSpaceID[i], testSafePoints[i].ServiceID) - c.Assert(err, IsNil) - c.Assert(loadedSafePoint, IsNil) + re.NoError(err) + re.Nil(loadedSafePoint) } } -func (s *testStorageGCSuite) TestSaveLoadGCSafePoint(c *C) { +func TestSaveLoadGCSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() testSpaceIDs, testSafePoints := testGCSafePoints() for i := range testSpaceIDs { testSpaceID := testSpaceIDs[i] testSafePoint := testSafePoints[i] err := storage.SaveKeySpaceGCSafePoint(testSpaceID, testSafePoint) - c.Assert(err, IsNil) + re.NoError(err) loaded, err := storage.LoadKeySpaceGCSafePoint(testSpaceID) - c.Assert(err, IsNil) - c.Assert(loaded, Equals, testSafePoint) + re.NoError(err) + re.Equal(testSafePoint, loaded) } } -func (s *testStorageGCSuite) TestLoadAllKeySpaceGCSafePoints(c *C) { +func TestLoadAllKeySpaceGCSafePoints(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() testSpaceIDs, testSafePoints := testGCSafePoints() for i := range testSpaceIDs { err := storage.SaveKeySpaceGCSafePoint(testSpaceIDs[i], testSafePoints[i]) - c.Assert(err, IsNil) + re.NoError(err) } loadedSafePoints, err := storage.LoadAllKeySpaceGCSafePoints(true) - c.Assert(err, IsNil) + re.NoError(err) for i := range loadedSafePoints { - c.Assert(loadedSafePoints[i].SpaceID, Equals, testSpaceIDs[i]) - c.Assert(loadedSafePoints[i].SafePoint, Equals, testSafePoints[i]) + re.Equal(testSpaceIDs[i], loadedSafePoints[i].SpaceID) + re.Equal(testSafePoints[i], loadedSafePoints[i].SafePoint) } // saving some service safe points. spaceIDs, safePoints := testServiceSafePoints() for i := range spaceIDs { - c.Assert(storage.SaveServiceSafePoint(spaceIDs[i], safePoints[i]), IsNil) + re.NoError(storage.SaveServiceSafePoint(spaceIDs[i], safePoints[i])) } // verify that service safe points do not interfere with gc safe points. loadedSafePoints, err = storage.LoadAllKeySpaceGCSafePoints(true) - c.Assert(err, IsNil) + re.NoError(err) for i := range loadedSafePoints { - c.Assert(loadedSafePoints[i].SpaceID, Equals, testSpaceIDs[i]) - c.Assert(loadedSafePoints[i].SafePoint, Equals, testSafePoints[i]) + re.Equal(testSpaceIDs[i], loadedSafePoints[i].SpaceID) + re.Equal(testSafePoints[i], loadedSafePoints[i].SafePoint) } // verify that when withGCSafePoint set to false, returned safePoints is 0 loadedSafePoints, err = storage.LoadAllKeySpaceGCSafePoints(false) - c.Assert(err, IsNil) + re.NoError(err) for i := range loadedSafePoints { - c.Assert(loadedSafePoints[i].SpaceID, Equals, testSpaceIDs[i]) - c.Assert(loadedSafePoints[i].SafePoint, Equals, uint64(0)) + re.Equal(testSpaceIDs[i], loadedSafePoints[i].SpaceID) + re.Equal(uint64(0), loadedSafePoints[i].SafePoint) } } -func (s *testStorageGCSuite) TestLoadEmpty(c *C) { +func TestLoadEmpty(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() // loading non-existing GC safepoint should return 0 gcSafePoint, err := storage.LoadKeySpaceGCSafePoint("testKeySpace") - c.Assert(err, IsNil) - c.Assert(gcSafePoint, Equals, uint64(0)) + re.NoError(err) + re.Equal(uint64(0), gcSafePoint) // loading non-existing service safepoint should return nil serviceSafePoint, err := storage.LoadServiceSafePoint("testKeySpace", "testService") - c.Assert(err, IsNil) - c.Assert(serviceSafePoint, IsNil) + re.NoError(err) + re.Nil(serviceSafePoint) // loading empty key spaces should return empty slices safePoints, err := storage.LoadAllKeySpaceGCSafePoints(true) - c.Assert(err, IsNil) - c.Assert(safePoints, HasLen, 0) + re.NoError(err) + re.Len(safePoints, 0) } diff --git a/server/storage/storage_test.go b/server/storage/storage_test.go index 51870a62133..3310c10de74 100644 --- a/server/storage/storage_test.go +++ b/server/storage/storage_test.go @@ -23,69 +23,61 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/storage/endpoint" "go.etcd.io/etcd/clientv3" ) -func TestStorage(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testStorageSuite{}) - -type testStorageSuite struct { -} - -func (s *testStorageSuite) TestBasic(c *C) { +func TestBasic(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() - c.Assert(endpoint.StorePath(123), Equals, "raft/s/00000000000000000123") - c.Assert(endpoint.RegionPath(123), Equals, "raft/r/00000000000000000123") + re.Equal("raft/s/00000000000000000123", endpoint.StorePath(123)) + re.Equal("raft/r/00000000000000000123", endpoint.RegionPath(123)) meta := &metapb.Cluster{Id: 123} ok, err := storage.LoadMeta(meta) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) - c.Assert(storage.SaveMeta(meta), IsNil) + re.False(ok) + re.NoError(err) + re.NoError(storage.SaveMeta(meta)) newMeta := &metapb.Cluster{} ok, err = storage.LoadMeta(newMeta) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(newMeta, DeepEquals, meta) + re.True(ok) + re.NoError(err) + re.Equal(meta, newMeta) store := &metapb.Store{Id: 123} ok, err = storage.LoadStore(123, store) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) - c.Assert(storage.SaveStore(store), IsNil) + re.False(ok) + re.NoError(err) + re.NoError(storage.SaveStore(store)) newStore := &metapb.Store{} ok, err = storage.LoadStore(123, newStore) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(newStore, DeepEquals, store) + re.True(ok) + re.NoError(err) + re.Equal(store, newStore) region := &metapb.Region{Id: 123} ok, err = storage.LoadRegion(123, region) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) - c.Assert(storage.SaveRegion(region), IsNil) + re.False(ok) + re.NoError(err) + re.NoError(storage.SaveRegion(region)) newRegion := &metapb.Region{} ok, err = storage.LoadRegion(123, newRegion) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(newRegion, DeepEquals, region) + re.True(ok) + re.NoError(err) + re.Equal(region, newRegion) err = storage.DeleteRegion(region) - c.Assert(err, IsNil) + re.NoError(err) ok, err = storage.LoadRegion(123, newRegion) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) + re.False(ok) + re.NoError(err) } -func mustSaveStores(c *C, s Storage, n int) []*metapb.Store { +func mustSaveStores(re *require.Assertions, s Storage, n int) []*metapb.Store { stores := make([]*metapb.Store, 0, n) for i := 0; i < n; i++ { store := &metapb.Store{Id: uint64(i)} @@ -93,60 +85,64 @@ func mustSaveStores(c *C, s Storage, n int) []*metapb.Store { } for _, store := range stores { - c.Assert(s.SaveStore(store), IsNil) + re.NoError(s.SaveStore(store)) } return stores } -func (s *testStorageSuite) TestLoadStores(c *C) { +func TestLoadStores(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() cache := core.NewStoresInfo() n := 10 - stores := mustSaveStores(c, storage, n) - c.Assert(storage.LoadStores(cache.SetStore), IsNil) + stores := mustSaveStores(re, storage, n) + re.NoError(storage.LoadStores(cache.SetStore)) - c.Assert(cache.GetStoreCount(), Equals, n) + re.Equal(n, cache.GetStoreCount()) for _, store := range cache.GetMetaStores() { - c.Assert(store, DeepEquals, stores[store.GetId()]) + re.Equal(stores[store.GetId()], store) } } -func (s *testStorageSuite) TestStoreWeight(c *C) { +func TestStoreWeight(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() cache := core.NewStoresInfo() const n = 3 - mustSaveStores(c, storage, n) - c.Assert(storage.SaveStoreWeight(1, 2.0, 3.0), IsNil) - c.Assert(storage.SaveStoreWeight(2, 0.2, 0.3), IsNil) - c.Assert(storage.LoadStores(cache.SetStore), IsNil) + mustSaveStores(re, storage, n) + re.NoError(storage.SaveStoreWeight(1, 2.0, 3.0)) + re.NoError(storage.SaveStoreWeight(2, 0.2, 0.3)) + re.NoError(storage.LoadStores(cache.SetStore)) leaderWeights := []float64{1.0, 2.0, 0.2} regionWeights := []float64{1.0, 3.0, 0.3} for i := 0; i < n; i++ { - c.Assert(cache.GetStore(uint64(i)).GetLeaderWeight(), Equals, leaderWeights[i]) - c.Assert(cache.GetStore(uint64(i)).GetRegionWeight(), Equals, regionWeights[i]) + re.Equal(leaderWeights[i], cache.GetStore(uint64(i)).GetLeaderWeight()) + re.Equal(regionWeights[i], cache.GetStore(uint64(i)).GetRegionWeight()) } } -func (s *testStorageSuite) TestLoadGCSafePoint(c *C) { +func TestLoadGCSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() testData := []uint64{0, 1, 2, 233, 2333, 23333333333, math.MaxUint64} r, e := storage.LoadGCSafePoint() - c.Assert(r, Equals, uint64(0)) - c.Assert(e, IsNil) + re.Equal(uint64(0), r) + re.NoError(e) for _, safePoint := range testData { err := storage.SaveGCSafePoint(safePoint) - c.Assert(err, IsNil) + re.NoError(err) safePoint1, err := storage.LoadGCSafePoint() - c.Assert(err, IsNil) - c.Assert(safePoint, Equals, safePoint1) + re.NoError(err) + re.Equal(safePoint1, safePoint) } } -func (s *testStorageSuite) TestSaveServiceGCSafePoint(c *C) { +func TestSaveServiceGCSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() expireAt := time.Now().Add(100 * time.Second).Unix() serviceSafePoints := []*endpoint.ServiceSafePoint{ @@ -156,28 +152,29 @@ func (s *testStorageSuite) TestSaveServiceGCSafePoint(c *C) { } for _, ssp := range serviceSafePoints { - c.Assert(storage.SaveServiceGCSafePoint(ssp), IsNil) + re.NoError(storage.SaveServiceGCSafePoint(ssp)) } prefix := endpoint.GCSafePointServicePrefixPath() prefixEnd := clientv3.GetPrefixRangeEnd(prefix) keys, values, err := storage.LoadRange(prefix, prefixEnd, len(serviceSafePoints)) - c.Assert(err, IsNil) - c.Assert(keys, HasLen, 3) - c.Assert(values, HasLen, 3) + re.NoError(err) + re.Len(keys, 3) + re.Len(values, 3) ssp := &endpoint.ServiceSafePoint{} for i, key := range keys { - c.Assert(strings.HasSuffix(key, serviceSafePoints[i].ServiceID), IsTrue) + re.True(strings.HasSuffix(key, serviceSafePoints[i].ServiceID)) - c.Assert(json.Unmarshal([]byte(values[i]), ssp), IsNil) - c.Assert(ssp.ServiceID, Equals, serviceSafePoints[i].ServiceID) - c.Assert(ssp.ExpiredAt, Equals, serviceSafePoints[i].ExpiredAt) - c.Assert(ssp.SafePoint, Equals, serviceSafePoints[i].SafePoint) + re.NoError(json.Unmarshal([]byte(values[i]), ssp)) + re.Equal(serviceSafePoints[i].ServiceID, ssp.ServiceID) + re.Equal(serviceSafePoints[i].ExpiredAt, ssp.ExpiredAt) + re.Equal(serviceSafePoints[i].SafePoint, ssp.SafePoint) } } -func (s *testStorageSuite) TestLoadMinServiceGCSafePoint(c *C) { +func TestLoadMinServiceGCSafePoint(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() expireAt := time.Now().Add(1000 * time.Second).Unix() serviceSafePoints := []*endpoint.ServiceSafePoint{ @@ -187,44 +184,45 @@ func (s *testStorageSuite) TestLoadMinServiceGCSafePoint(c *C) { } for _, ssp := range serviceSafePoints { - c.Assert(storage.SaveServiceGCSafePoint(ssp), IsNil) + re.NoError(storage.SaveServiceGCSafePoint(ssp)) } // gc_worker's safepoint will be automatically inserted when loading service safepoints. Here the returned // safepoint can be either of "gc_worker" or "2". ssp, err := storage.LoadMinServiceGCSafePoint(time.Now()) - c.Assert(err, IsNil) - c.Assert(ssp.SafePoint, Equals, uint64(2)) + re.NoError(err) + re.Equal(uint64(2), ssp.SafePoint) // Advance gc_worker's safepoint - c.Assert(storage.SaveServiceGCSafePoint(&endpoint.ServiceSafePoint{ + re.NoError(storage.SaveServiceGCSafePoint(&endpoint.ServiceSafePoint{ ServiceID: "gc_worker", ExpiredAt: math.MaxInt64, SafePoint: 10, - }), IsNil) + })) ssp, err = storage.LoadMinServiceGCSafePoint(time.Now()) - c.Assert(err, IsNil) - c.Assert(ssp.ServiceID, Equals, "2") - c.Assert(ssp.ExpiredAt, Equals, expireAt) - c.Assert(ssp.SafePoint, Equals, uint64(2)) + re.NoError(err) + re.Equal("2", ssp.ServiceID) + re.Equal(expireAt, ssp.ExpiredAt) + re.Equal(uint64(2), ssp.SafePoint) } -func (s *testStorageSuite) TestLoadRegions(c *C) { +func TestLoadRegions(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() cache := core.NewRegionsInfo() n := 10 - regions := mustSaveRegions(c, storage, n) - c.Assert(storage.LoadRegions(context.Background(), cache.SetRegion), IsNil) + regions := mustSaveRegions(re, storage, n) + re.NoError(storage.LoadRegions(context.Background(), cache.SetRegion)) - c.Assert(cache.GetRegionCount(), Equals, n) + re.Equal(n, cache.GetRegionCount()) for _, region := range cache.GetMetaRegions() { - c.Assert(region, DeepEquals, regions[region.GetId()]) + re.Equal(regions[region.GetId()], region) } } -func mustSaveRegions(c *C, s endpoint.RegionStorage, n int) []*metapb.Region { +func mustSaveRegions(re *require.Assertions, s endpoint.RegionStorage, n int) []*metapb.Region { regions := make([]*metapb.Region, 0, n) for i := 0; i < n; i++ { region := newTestRegionMeta(uint64(i)) @@ -232,7 +230,7 @@ func mustSaveRegions(c *C, s endpoint.RegionStorage, n int) []*metapb.Region { } for _, region := range regions { - c.Assert(s.SaveRegion(region), IsNil) + re.NoError(s.SaveRegion(region)) } return regions @@ -246,36 +244,38 @@ func newTestRegionMeta(regionID uint64) *metapb.Region { } } -func (s *testStorageSuite) TestLoadRegionsToCache(c *C) { +func TestLoadRegionsToCache(t *testing.T) { + re := require.New(t) storage := NewStorageWithMemoryBackend() cache := core.NewRegionsInfo() n := 10 - regions := mustSaveRegions(c, storage, n) - c.Assert(storage.LoadRegionsOnce(context.Background(), cache.SetRegion), IsNil) + regions := mustSaveRegions(re, storage, n) + re.NoError(storage.LoadRegionsOnce(context.Background(), cache.SetRegion)) - c.Assert(cache.GetRegionCount(), Equals, n) + re.Equal(n, cache.GetRegionCount()) for _, region := range cache.GetMetaRegions() { - c.Assert(region, DeepEquals, regions[region.GetId()]) + re.Equal(regions[region.GetId()], region) } n = 20 - mustSaveRegions(c, storage, n) - c.Assert(storage.LoadRegionsOnce(context.Background(), cache.SetRegion), IsNil) - c.Assert(cache.GetRegionCount(), Equals, n) + mustSaveRegions(re, storage, n) + re.NoError(storage.LoadRegionsOnce(context.Background(), cache.SetRegion)) + re.Equal(n, cache.GetRegionCount()) } -func (s *testStorageSuite) TestLoadRegionsExceedRangeLimit(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/storage/kv/withRangeLimit", "return(500)"), IsNil) +func TestLoadRegionsExceedRangeLimit(t *testing.T) { + re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/storage/kv/withRangeLimit", "return(500)")) storage := NewStorageWithMemoryBackend() cache := core.NewRegionsInfo() n := 1000 - regions := mustSaveRegions(c, storage, n) - c.Assert(storage.LoadRegions(context.Background(), cache.SetRegion), IsNil) - c.Assert(cache.GetRegionCount(), Equals, n) + regions := mustSaveRegions(re, storage, n) + re.NoError(storage.LoadRegions(context.Background(), cache.SetRegion)) + re.Equal(n, cache.GetRegionCount()) for _, region := range cache.GetMetaRegions() { - c.Assert(region, DeepEquals, regions[region.GetId()]) + re.Equal(regions[region.GetId()], region) } - c.Assert(failpoint.Disable("github.com/tikv/pd/server/storage/kv/withRangeLimit"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/storage/kv/withRangeLimit")) } From 6807f6e401b8170491dff61a8cda1b336739115e Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 10 Jun 2022 14:46:30 +0800 Subject: [PATCH 08/35] syncer: migrate test framework to testify (#5141) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/region_syncer/client_test.go | 22 ++++---- server/region_syncer/history_buffer_test.go | 59 +++++++++------------ 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/server/region_syncer/client_test.go b/server/region_syncer/client_test.go index 1f10af778ba..ca39cee4859 100644 --- a/server/region_syncer/client_test.go +++ b/server/region_syncer/client_test.go @@ -17,28 +17,26 @@ package syncer import ( "context" "os" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/storage" ) -var _ = Suite(&testClientSuite{}) - -type testClientSuite struct{} - // For issue https://github.com/tikv/pd/issues/3936 -func (t *testClientSuite) TestLoadRegion(c *C) { +func TestLoadRegion(t *testing.T) { + re := require.New(t) tempDir, err := os.MkdirTemp(os.TempDir(), "region_syncer_load_region") - c.Assert(err, IsNil) + re.NoError(err) defer os.RemoveAll(tempDir) rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil) - c.Assert(err, IsNil) + re.NoError(err) server := &mockServer{ ctx: context.Background(), @@ -48,9 +46,9 @@ func (t *testClientSuite) TestLoadRegion(c *C) { for i := 0; i < 30; i++ { rs.SaveRegion(&metapb.Region{Id: uint64(i) + 1}) } - c.Assert(failpoint.Enable("github.com/tikv/pd/server/storage/base_backend/slowLoadRegion", "return(true)"), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/storage/base_backend/slowLoadRegion", "return(true)")) defer func() { - c.Assert(failpoint.Disable("github.com/tikv/pd/server/storage/base_backend/slowLoadRegion"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/storage/base_backend/slowLoadRegion")) }() rc := NewRegionSyncer(server) @@ -58,8 +56,8 @@ func (t *testClientSuite) TestLoadRegion(c *C) { rc.StartSyncWithLeader("") time.Sleep(time.Second) rc.StopSyncWithLeader() - c.Assert(time.Since(start), Greater, time.Second) // make sure failpoint is injected - c.Assert(time.Since(start), Less, time.Second*2) + re.Greater(time.Since(start), time.Second) // make sure failpoint is injected + re.Less(time.Since(start), time.Second*2) } type mockServer struct { diff --git a/server/region_syncer/history_buffer_test.go b/server/region_syncer/history_buffer_test.go index 47fa6b66f8f..49cbebdf266 100644 --- a/server/region_syncer/history_buffer_test.go +++ b/server/region_syncer/history_buffer_test.go @@ -17,21 +17,14 @@ package syncer import ( "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/storage/kv" ) -var _ = Suite(&testHistoryBuffer{}) - -type testHistoryBuffer struct{} - -func Test(t *testing.T) { - TestingT(t) -} - -func (t *testHistoryBuffer) TestBufferSize(c *C) { +func TestBufferSize(t *testing.T) { + re := require.New(t) var regions []*core.RegionInfo for i := 0; i <= 100; i++ { regions = append(regions, core.NewRegionInfo(&metapb.Region{Id: uint64(i)}, nil)) @@ -39,23 +32,23 @@ func (t *testHistoryBuffer) TestBufferSize(c *C) { // size equals 1 h := newHistoryBuffer(1, kv.NewMemoryKV()) - c.Assert(h.len(), Equals, 0) + re.Equal(0, h.len()) for _, r := range regions { h.Record(r) } - c.Assert(h.len(), Equals, 1) - c.Assert(h.get(100), Equals, regions[h.nextIndex()-1]) - c.Assert(h.get(99), IsNil) + re.Equal(1, h.len()) + re.Equal(regions[h.nextIndex()-1], h.get(100)) + re.Nil(h.get(99)) // size equals 2 h = newHistoryBuffer(2, kv.NewMemoryKV()) for _, r := range regions { h.Record(r) } - c.Assert(h.len(), Equals, 2) - c.Assert(h.get(100), Equals, regions[h.nextIndex()-1]) - c.Assert(h.get(99), Equals, regions[h.nextIndex()-2]) - c.Assert(h.get(98), IsNil) + re.Equal(2, h.len()) + re.Equal(regions[h.nextIndex()-1], h.get(100)) + re.Equal(regions[h.nextIndex()-2], h.get(99)) + re.Nil(h.get(98)) // size equals 100 kvMem := kv.NewMemoryKV() @@ -63,33 +56,33 @@ func (t *testHistoryBuffer) TestBufferSize(c *C) { for i := 0; i < 6; i++ { h1.Record(regions[i]) } - c.Assert(h1.len(), Equals, 6) - c.Assert(h1.nextIndex(), Equals, uint64(6)) + re.Equal(6, h1.len()) + re.Equal(uint64(6), h1.nextIndex()) h1.persist() // restart the buffer h2 := newHistoryBuffer(100, kvMem) - c.Assert(h2.nextIndex(), Equals, uint64(6)) - c.Assert(h2.firstIndex(), Equals, uint64(6)) - c.Assert(h2.get(h.nextIndex()-1), IsNil) - c.Assert(h2.len(), Equals, 0) + re.Equal(uint64(6), h2.nextIndex()) + re.Equal(uint64(6), h2.firstIndex()) + re.Nil(h2.get(h.nextIndex() - 1)) + re.Equal(0, h2.len()) for _, r := range regions { index := h2.nextIndex() h2.Record(r) - c.Assert(h2.get(index), Equals, r) + re.Equal(r, h2.get(index)) } - c.Assert(h2.nextIndex(), Equals, uint64(107)) - c.Assert(h2.get(h2.nextIndex()), IsNil) + re.Equal(uint64(107), h2.nextIndex()) + re.Nil(h2.get(h2.nextIndex())) s, err := h2.kv.Load(historyKey) - c.Assert(err, IsNil) + re.NoError(err) // flush in index 106 - c.Assert(s, Equals, "106") + re.Equal("106", s) histories := h2.RecordsFrom(uint64(1)) - c.Assert(histories, HasLen, 0) + re.Len(histories, 0) histories = h2.RecordsFrom(h2.firstIndex()) - c.Assert(histories, HasLen, 100) - c.Assert(h2.firstIndex(), Equals, uint64(7)) - c.Assert(histories, DeepEquals, regions[1:]) + re.Len(histories, 100) + re.Equal(uint64(7), h2.firstIndex()) + re.Equal(regions[1:], histories) } From 0015a5b51efab6f17d2235b11e39290c5075e164 Mon Sep 17 00:00:00 2001 From: disksing Date: Sat, 11 Jun 2022 00:22:31 +0800 Subject: [PATCH 09/35] dr-autosync: cleanup configurations (#5106) ref tikv/pd#4399 Signed-off-by: disksing --- server/api/admin.go | 22 ----- server/api/router.go | 1 - server/config/config.go | 29 +++--- server/config/config_test.go | 1 - server/replication/replication_mode.go | 44 ++------- server/replication/replication_mode_test.go | 102 +++++++++----------- tests/pdctl/config/config_test.go | 5 +- 7 files changed, 66 insertions(+), 138 deletions(-) diff --git a/server/api/admin.go b/server/api/admin.go index 36419234fc9..2954874d7fd 100644 --- a/server/api/admin.go +++ b/server/api/admin.go @@ -116,25 +116,3 @@ func (h *adminHandler) SavePersistFile(w http.ResponseWriter, r *http.Request) { } h.rd.Text(w, http.StatusOK, "") } - -// Intentionally no swagger mark as it is supposed to be only used in -// server-to-server. -func (h *adminHandler) UpdateWaitAsyncTime(w http.ResponseWriter, r *http.Request) { - var input map[string]interface{} - if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { - return - } - memberIDValue, ok := input["member_id"].(string) - if !ok || len(memberIDValue) == 0 { - h.rd.JSON(w, http.StatusBadRequest, "invalid member id") - return - } - memberID, err := strconv.ParseUint(memberIDValue, 10, 64) - if err != nil { - h.rd.JSON(w, http.StatusBadRequest, "invalid member id") - return - } - cluster := getCluster(r) - cluster.GetReplicationMode().UpdateMemberWaitAsyncTime(memberID) - h.rd.JSON(w, http.StatusOK, nil) -} diff --git a/server/api/router.go b/server/api/router.go index 98be613a97e..e755341ebef 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -281,7 +281,6 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { registerFunc(clusterRouter, "/admin/cache/region/{id}", adminHandler.DeleteRegionCache, setMethods("DELETE"), setAuditBackend(localLog)) registerFunc(clusterRouter, "/admin/reset-ts", adminHandler.ResetTS, setMethods("POST"), setAuditBackend(localLog)) registerFunc(apiRouter, "/admin/persist-file/{file_name}", adminHandler.SavePersistFile, setMethods("POST"), setAuditBackend(localLog)) - registerFunc(clusterRouter, "/admin/replication_mode/wait-async", adminHandler.UpdateWaitAsyncTime, setMethods("POST"), setAuditBackend(localLog)) serviceMiddlewareHandler := newServiceMiddlewareHandler(svr, rd) registerFunc(apiRouter, "/service-middleware/config", serviceMiddlewareHandler.GetServiceMiddlewareConfig, setMethods("GET")) diff --git a/server/config/config.go b/server/config/config.go index 384c7108816..df833594f74 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -245,9 +245,8 @@ const ( defaultDashboardAddress = "auto" - defaultDRWaitStoreTimeout = time.Minute - defaultDRWaitSyncTimeout = time.Minute - defaultDRWaitAsyncTimeout = 2 * time.Minute + defaultDRWaitStoreTimeout = time.Minute + defaultDRTiKVSyncTimeoutHint = time.Minute defaultTSOSaveInterval = time.Duration(defaultLeaderLease) * time.Second // DefaultTSOUpdatePhysicalInterval is the default value of the config `TSOUpdatePhysicalInterval`. @@ -1389,26 +1388,22 @@ func NormalizeReplicationMode(m string) string { // DRAutoSyncReplicationConfig is the configuration for auto sync mode between 2 data centers. type DRAutoSyncReplicationConfig struct { - LabelKey string `toml:"label-key" json:"label-key"` - Primary string `toml:"primary" json:"primary"` - DR string `toml:"dr" json:"dr"` - PrimaryReplicas int `toml:"primary-replicas" json:"primary-replicas"` - DRReplicas int `toml:"dr-replicas" json:"dr-replicas"` - WaitStoreTimeout typeutil.Duration `toml:"wait-store-timeout" json:"wait-store-timeout"` - WaitSyncTimeout typeutil.Duration `toml:"wait-sync-timeout" json:"wait-sync-timeout"` - WaitAsyncTimeout typeutil.Duration `toml:"wait-async-timeout" json:"wait-async-timeout"` - PauseRegionSplit bool `toml:"pause-region-split" json:"pause-region-split,string"` + LabelKey string `toml:"label-key" json:"label-key"` + Primary string `toml:"primary" json:"primary"` + DR string `toml:"dr" json:"dr"` + PrimaryReplicas int `toml:"primary-replicas" json:"primary-replicas"` + DRReplicas int `toml:"dr-replicas" json:"dr-replicas"` + WaitStoreTimeout typeutil.Duration `toml:"wait-store-timeout" json:"wait-store-timeout"` + TiKVSyncTimeoutHint typeutil.Duration `toml:"tikv-sync-timeout-hint" json:"tikv-sync-timeout-hint"` + PauseRegionSplit bool `toml:"pause-region-split" json:"pause-region-split,string"` } func (c *DRAutoSyncReplicationConfig) adjust(meta *configMetaData) { if !meta.IsDefined("wait-store-timeout") { c.WaitStoreTimeout = typeutil.NewDuration(defaultDRWaitStoreTimeout) } - if !meta.IsDefined("wait-sync-timeout") { - c.WaitSyncTimeout = typeutil.NewDuration(defaultDRWaitSyncTimeout) - } - if !meta.IsDefined("wait-async-timeout") { - c.WaitAsyncTimeout = typeutil.NewDuration(defaultDRWaitAsyncTimeout) + if !meta.IsDefined("tikv-sync-timeout-hint") { + c.TiKVSyncTimeoutHint = typeutil.NewDuration(defaultDRTiKVSyncTimeoutHint) } } diff --git a/server/config/config_test.go b/server/config/config_test.go index 885e24d8d8b..032c0526739 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -454,7 +454,6 @@ wait-store-timeout = "120s" re.Equal(2, cfg.ReplicationMode.DRAutoSync.PrimaryReplicas) re.Equal(1, cfg.ReplicationMode.DRAutoSync.DRReplicas) re.Equal(2*time.Minute, cfg.ReplicationMode.DRAutoSync.WaitStoreTimeout.Duration) - re.Equal(time.Minute, cfg.ReplicationMode.DRAutoSync.WaitSyncTimeout.Duration) cfg = NewConfig() meta, err = toml.Decode("", &cfg) diff --git a/server/replication/replication_mode.go b/server/replication/replication_mode.go index c5fa078794c..bfd944eff92 100644 --- a/server/replication/replication_mode.go +++ b/server/replication/replication_mode.go @@ -86,19 +86,17 @@ type ModeManager struct { drSampleTotalRegion int // number of regions in sample drTotalRegion int // number of all regions - drMemberWaitAsyncTime map[uint64]time.Time // last sync time with follower nodes - drStoreStatus sync.Map + drStoreStatus sync.Map } // NewReplicationModeManager creates the replicate mode manager. func NewReplicationModeManager(config config.ReplicationModeConfig, storage endpoint.ReplicationStatusStorage, cluster schedule.Cluster, fileReplicater FileReplicater) (*ModeManager, error) { m := &ModeManager{ - initTime: time.Now(), - config: config, - storage: storage, - cluster: cluster, - fileReplicater: fileReplicater, - drMemberWaitAsyncTime: make(map[uint64]time.Time), + initTime: time.Now(), + config: config, + storage: storage, + cluster: cluster, + fileReplicater: fileReplicater, } switch config.ReplicationMode { case modeMajority: @@ -129,15 +127,6 @@ func (m *ModeManager) UpdateConfig(config config.ReplicationModeConfig) error { return nil } -// UpdateMemberWaitAsyncTime updates a member's wait async time. -func (m *ModeManager) UpdateMemberWaitAsyncTime(memberID uint64) { - m.Lock() - defer m.Unlock() - t := time.Now() - log.Info("udpate member wait async time", zap.Uint64("memberID", memberID), zap.Time("time", t)) - m.drMemberWaitAsyncTime[memberID] = t -} - // GetReplicationStatus returns the status to sync with tikv servers. func (m *ModeManager) GetReplicationStatus() *pb.ReplicationStatus { m.RLock() @@ -153,7 +142,7 @@ func (m *ModeManager) GetReplicationStatus() *pb.ReplicationStatus { LabelKey: m.config.DRAutoSync.LabelKey, State: pb.DRAutoSyncState(pb.DRAutoSyncState_value[strings.ToUpper(m.drAutoSync.State)]), StateId: m.drAutoSync.StateID, - WaitSyncTimeoutHint: int32(m.config.DRAutoSync.WaitSyncTimeout.Seconds()), + WaitSyncTimeoutHint: int32(m.config.DRAutoSync.TiKVSyncTimeoutHint.Seconds()), AvailableStores: m.drAutoSync.AvailableStores, PauseRegionSplit: m.config.DRAutoSync.PauseRegionSplit && m.drAutoSync.State != drStateSync, } @@ -239,23 +228,6 @@ func (m *ModeManager) loadDRAutoSync() error { return nil } -func (m *ModeManager) drCheckAsyncTimeout() bool { - m.RLock() - defer m.RUnlock() - timeout := m.config.DRAutoSync.WaitAsyncTimeout.Duration - if timeout == 0 { - return true - } - // make sure all members are timeout. - for _, t := range m.drMemberWaitAsyncTime { - if time.Since(t) <= timeout { - return false - } - } - // make sure all members that have synced with previous leader are timeout. - return time.Since(m.initTime) > timeout -} - func (m *ModeManager) drSwitchToAsyncWait(availableStores []uint64) error { m.Lock() defer m.Unlock() @@ -471,7 +443,7 @@ func (m *ModeManager) tickDR() { switch m.drGetState() { case drStateSync: // If hasMajority is false, the cluster is always unavailable. Switch to async won't help. - if !canSync && hasMajority && m.drCheckAsyncTimeout() { + if !canSync && hasMajority { m.drSwitchToAsyncWait(stores[primaryUp]) } case drStateAsyncWait: diff --git a/server/replication/replication_mode_test.go b/server/replication/replication_mode_test.go index 8162da599ff..ee478d4ce6f 100644 --- a/server/replication/replication_mode_test.go +++ b/server/replication/replication_mode_test.go @@ -43,13 +43,13 @@ func TestInitial(t *testing.T) { re.Equal(&pb.ReplicationStatus{Mode: pb.ReplicationMode_MAJORITY}, rep.GetReplicationStatus()) conf = config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "dr-label", - Primary: "l1", - DR: "l2", - PrimaryReplicas: 2, - DRReplicas: 1, - WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, + LabelKey: "dr-label", + Primary: "l1", + DR: "l2", + PrimaryReplicas: 2, + DRReplicas: 1, + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} rep, err = NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) @@ -70,8 +70,8 @@ func TestStatus(t *testing.T) { defer cancel() store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "dr-label", - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, + LabelKey: "dr-label", + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) @@ -165,13 +165,13 @@ func TestStateSwitch(t *testing.T) { defer cancel() store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "zone", - Primary: "zone1", - DR: "zone2", - PrimaryReplicas: 4, - DRReplicas: 1, - WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + PrimaryReplicas: 4, + DRReplicas: 1, + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) replicator := newMockReplicator([]uint64{1}) @@ -352,13 +352,13 @@ func TestReplicateState(t *testing.T) { defer cancel() store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "zone", - Primary: "zone1", - DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, - WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + PrimaryReplicas: 2, + DRReplicas: 1, + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) replicator := newMockReplicator([]uint64{1}) @@ -395,14 +395,13 @@ func TestAsynctimeout(t *testing.T) { defer cancel() store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "zone", - Primary: "zone1", - DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, - WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, - WaitAsyncTimeout: typeutil.Duration{Duration: 2 * time.Minute}, + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + PrimaryReplicas: 2, + DRReplicas: 1, + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) var replicator mockFileReplicator @@ -415,19 +414,6 @@ func TestAsynctimeout(t *testing.T) { setStoreState(cluster, "up", "up", "down") rep.tickDR() - re.Equal(drStateSync, rep.drGetState()) // cannot switch state due to recently start - - rep.initTime = time.Now().Add(-3 * time.Minute) - rep.tickDR() - re.Equal(drStateAsyncWait, rep.drGetState()) - - rep.drSwitchToSync() - rep.UpdateMemberWaitAsyncTime(42) - rep.tickDR() - re.Equal(drStateSync, rep.drGetState()) // cannot switch state due to member not timeout - - rep.drMemberWaitAsyncTime[42] = time.Now().Add(-3 * time.Minute) - rep.tickDR() re.Equal(drStateAsyncWait, rep.drGetState()) } @@ -453,13 +439,13 @@ func TestRecoverProgress(t *testing.T) { store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "zone", - Primary: "zone1", - DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, - WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + PrimaryReplicas: 2, + DRReplicas: 1, + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) cluster.AddLabelsStore(1, 1, map[string]string{}) @@ -516,13 +502,13 @@ func TestRecoverProgressWithSplitAndMerge(t *testing.T) { store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ - LabelKey: "zone", - Primary: "zone1", - DR: "zone2", - PrimaryReplicas: 2, - DRReplicas: 1, - WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, - WaitSyncTimeout: typeutil.Duration{Duration: time.Minute}, + LabelKey: "zone", + Primary: "zone1", + DR: "zone2", + PrimaryReplicas: 2, + DRReplicas: 1, + WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, + TiKVSyncTimeoutHint: typeutil.Duration{Duration: time.Minute}, }} cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) cluster.AddLabelsStore(1, 1, map[string]string{}) diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index ad99d583133..cb564699b53 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -566,9 +566,8 @@ func (s *configTestSuite) TestReplicationMode(c *C) { conf := config.ReplicationModeConfig{ ReplicationMode: "majority", DRAutoSync: config.DRAutoSyncReplicationConfig{ - WaitStoreTimeout: typeutil.NewDuration(time.Minute), - WaitSyncTimeout: typeutil.NewDuration(time.Minute), - WaitAsyncTimeout: typeutil.NewDuration(2 * time.Minute), + WaitStoreTimeout: typeutil.NewDuration(time.Minute), + TiKVSyncTimeoutHint: typeutil.NewDuration(time.Minute), }, } check := func() { From c8775b6176a03eb25cb1622fc46ba1d496872df7 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Sat, 11 Jun 2022 16:02:31 +0800 Subject: [PATCH 10/35] encryptionkm: migrate test framework to testify (#5136) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/encryptionkm/key_manager_test.go | 576 ++++++++++++------------ 1 file changed, 292 insertions(+), 284 deletions(-) diff --git a/server/encryptionkm/key_manager_test.go b/server/encryptionkm/key_manager_test.go index e9fc39e5789..5e0d864942c 100644 --- a/server/encryptionkm/key_manager_test.go +++ b/server/encryptionkm/key_manager_test.go @@ -15,7 +15,6 @@ package encryptionkm import ( - "bytes" "context" "encoding/hex" "fmt" @@ -26,8 +25,8 @@ import ( "time" "github.com/gogo/protobuf/proto" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/encryptionpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/encryption" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/tempurl" @@ -37,14 +36,6 @@ import ( "go.etcd.io/etcd/embed" ) -func TestKeyManager(t *testing.T) { - TestingT(t) -} - -type testKeyManagerSuite struct{} - -var _ = SerialSuites(&testKeyManagerSuite{}) - const ( testMasterKey = "8fd7e3e917c170d92f3e51a981dd7bc8fba11f3df7d8df994842f6e86f69b530" testMasterKey2 = "8fd7e3e917c170d92f3e51a981dd7bc8fba11f3df7d8df994842f6e86f69b531" @@ -57,29 +48,29 @@ func getTestDataKey() []byte { return key } -func newTestEtcd(c *C) (client *clientv3.Client, cleanup func()) { +func newTestEtcd(re *require.Assertions) (client *clientv3.Client, cleanup func()) { cfg := embed.NewConfig() cfg.Name = "test_etcd" cfg.Dir, _ = os.MkdirTemp("/tmp", "test_etcd") cfg.Logger = "zap" pu, err := url.Parse(tempurl.Alloc()) - c.Assert(err, IsNil) + re.NoError(err) cfg.LPUrls = []url.URL{*pu} cfg.APUrls = cfg.LPUrls cu, err := url.Parse(tempurl.Alloc()) - c.Assert(err, IsNil) + re.NoError(err) cfg.LCUrls = []url.URL{*cu} cfg.ACUrls = cfg.LCUrls cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, &cfg.LPUrls[0]) cfg.ClusterState = embed.ClusterStateFlagNew server, err := embed.StartEtcd(cfg) - c.Assert(err, IsNil) + re.NoError(err) <-server.Server.ReadyNotify() client, err = clientv3.New(clientv3.Config{ Endpoints: []string{cfg.LCUrls[0].String()}, }) - c.Assert(err, IsNil) + re.NoError(err) cleanup = func() { client.Close() @@ -90,16 +81,16 @@ func newTestEtcd(c *C) (client *clientv3.Client, cleanup func()) { return client, cleanup } -func newTestKeyFile(c *C, key ...string) (keyFilePath string, cleanup func()) { +func newTestKeyFile(re *require.Assertions, key ...string) (keyFilePath string, cleanup func()) { testKey := testMasterKey for _, k := range key { testKey = k } tempDir, err := os.MkdirTemp("/tmp", "test_key_file") - c.Assert(err, IsNil) + re.NoError(err) keyFilePath = tempDir + "/key" err = os.WriteFile(keyFilePath, []byte(testKey), 0600) - c.Assert(err, IsNil) + re.NoError(err) cleanup = func() { os.RemoveAll(tempDir) @@ -108,53 +99,55 @@ func newTestKeyFile(c *C, key ...string) (keyFilePath string, cleanup func()) { return keyFilePath, cleanup } -func newTestLeader(c *C, client *clientv3.Client) *election.Leadership { +func newTestLeader(re *require.Assertions, client *clientv3.Client) *election.Leadership { leader := election.NewLeadership(client, "test_leader", "test") timeout := int64(30000000) // about a year. err := leader.Campaign(timeout, "") - c.Assert(err, IsNil) + re.NoError(err) return leader } -func checkMasterKeyMeta(c *C, value []byte, meta *encryptionpb.MasterKey, ciphertextKey []byte) { +func checkMasterKeyMeta(re *require.Assertions, value []byte, meta *encryptionpb.MasterKey, ciphertextKey []byte) { content := &encryptionpb.EncryptedContent{} err := content.Unmarshal(value) - c.Assert(err, IsNil) - c.Assert(proto.Equal(content.MasterKey, meta), IsTrue) - c.Assert(bytes.Equal(content.CiphertextKey, ciphertextKey), IsTrue) + re.NoError(err) + re.True(proto.Equal(content.MasterKey, meta)) + re.Equal(content.CiphertextKey, ciphertextKey) } -func (s *testKeyManagerSuite) TestNewKeyManagerBasic(c *C) { +func TestNewKeyManagerBasic(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() // Use default config. config := &encryption.Config{} err := config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) + re.NoError(err) // Check config. - c.Assert(m.method, Equals, encryptionpb.EncryptionMethod_PLAINTEXT) - c.Assert(m.masterKeyMeta.GetPlaintext(), NotNil) + re.Equal(encryptionpb.EncryptionMethod_PLAINTEXT, m.method) + re.NotNil(m.masterKeyMeta.GetPlaintext()) // Check loaded keys. - c.Assert(m.keys.Load(), IsNil) + re.Nil(m.keys.Load()) // Check etcd KV. value, err := etcdutil.GetValue(client, EncryptionKeysPath) - c.Assert(err, IsNil) - c.Assert(value, IsNil) + re.NoError(err) + re.Nil(value) } -func (s *testKeyManagerSuite) TestNewKeyManagerWithCustomConfig(c *C) { +func TestNewKeyManagerWithCustomConfig(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() // Custom config rotatePeriod, err := time.ParseDuration("100h") - c.Assert(err, IsNil) + re.NoError(err) config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", DataKeyRotationPeriod: typeutil.NewDuration(rotatePeriod), @@ -166,36 +159,37 @@ func (s *testKeyManagerSuite) TestNewKeyManagerWithCustomConfig(c *C) { }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) + re.NoError(err) // Check config. - c.Assert(m.method, Equals, encryptionpb.EncryptionMethod_AES128_CTR) - c.Assert(m.dataKeyRotationPeriod, Equals, rotatePeriod) - c.Assert(m.masterKeyMeta, NotNil) + re.Equal(encryptionpb.EncryptionMethod_AES128_CTR, m.method) + re.Equal(rotatePeriod, m.dataKeyRotationPeriod) + re.NotNil(m.masterKeyMeta) keyFileMeta := m.masterKeyMeta.GetFile() - c.Assert(keyFileMeta, NotNil) - c.Assert(keyFileMeta.Path, Equals, config.MasterKey.MasterKeyFileConfig.FilePath) + re.NotNil(keyFileMeta) + re.Equal(config.MasterKey.MasterKeyFileConfig.FilePath, keyFileMeta.Path) // Check loaded keys. - c.Assert(m.keys.Load(), IsNil) + re.Nil(m.keys.Load()) // Check etcd KV. value, err := etcdutil.GetValue(client, EncryptionKeysPath) - c.Assert(err, IsNil) - c.Assert(value, IsNil) + re.NoError(err) + re.Nil(value) } -func (s *testKeyManagerSuite) TestNewKeyManagerLoadKeys(c *C) { +func TestNewKeyManagerLoadKeys(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Use default config. config := &encryption.Config{} err := config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Store initial keys in etcd. masterKeyMeta := newMasterKey(keyFile) keys := &encryptionpb.KeyDictionary{ @@ -210,39 +204,40 @@ func (s *testKeyManagerSuite) TestNewKeyManagerLoadKeys(c *C) { }, } err = saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) + re.NoError(err) // Check config. - c.Assert(m.method, Equals, encryptionpb.EncryptionMethod_PLAINTEXT) - c.Assert(m.masterKeyMeta.GetPlaintext(), NotNil) + re.Equal(encryptionpb.EncryptionMethod_PLAINTEXT, m.method) + re.NotNil(m.masterKeyMeta.GetPlaintext()) // Check loaded keys. - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) // Check etcd KV. resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, keys)) } -func (s *testKeyManagerSuite) TestGetCurrentKey(c *C) { +func TestGetCurrentKey(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() // Use default config. config := &encryption.Config{} err := config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) + re.NoError(err) // Test encryption disabled. currentKeyID, currentKey, err := m.GetCurrentKey() - c.Assert(err, IsNil) - c.Assert(currentKeyID, Equals, uint64(disableEncryptionKeyID)) - c.Assert(currentKey, IsNil) + re.NoError(err) + re.Equal(uint64(disableEncryptionKeyID), currentKeyID) + re.Nil(currentKey) // Test normal case. keys := &encryptionpb.KeyDictionary{ CurrentKeyId: 123, @@ -257,9 +252,9 @@ func (s *testKeyManagerSuite) TestGetCurrentKey(c *C) { } m.keys.Store(keys) currentKeyID, currentKey, err = m.GetCurrentKey() - c.Assert(err, IsNil) - c.Assert(currentKeyID, Equals, keys.CurrentKeyId) - c.Assert(proto.Equal(currentKey, keys.Keys[keys.CurrentKeyId]), IsTrue) + re.NoError(err) + re.Equal(keys.CurrentKeyId, currentKeyID) + re.True(proto.Equal(currentKey, keys.Keys[keys.CurrentKeyId])) // Test current key missing. keys = &encryptionpb.KeyDictionary{ CurrentKeyId: 123, @@ -267,16 +262,17 @@ func (s *testKeyManagerSuite) TestGetCurrentKey(c *C) { } m.keys.Store(keys) _, _, err = m.GetCurrentKey() - c.Assert(err, NotNil) + re.Error(err) } -func (s *testKeyManagerSuite) TestGetKey(c *C) { +func TestGetKey(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Store initial keys in etcd. masterKeyMeta := newMasterKey(keyFile) keys := &encryptionpb.KeyDictionary{ @@ -297,18 +293,18 @@ func (s *testKeyManagerSuite) TestGetKey(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Use default config. config := &encryption.Config{} err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) + re.NoError(err) // Get existing key. key, err := m.GetKey(uint64(123)) - c.Assert(err, IsNil) - c.Assert(proto.Equal(key, keys.Keys[123]), IsTrue) + re.NoError(err) + re.True(proto.Equal(key, keys.Keys[123])) // Get key that require a reload. // Deliberately cancel watcher, delete a key and check if it has reloaded. loadedKeys := m.keys.Load().(*encryptionpb.KeyDictionary) @@ -317,21 +313,22 @@ func (s *testKeyManagerSuite) TestGetKey(c *C) { m.keys.Store(loadedKeys) m.mu.keysRevision = 0 key, err = m.GetKey(uint64(456)) - c.Assert(err, IsNil) - c.Assert(proto.Equal(key, keys.Keys[456]), IsTrue) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(key, keys.Keys[456])) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) // Get non-existing key. _, err = m.GetKey(uint64(789)) - c.Assert(err, NotNil) + re.Error(err) } -func (s *testKeyManagerSuite) TestLoadKeyEmpty(c *C) { +func TestLoadKeyEmpty(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Store initial keys in etcd. masterKeyMeta := newMasterKey(keyFile) keys := &encryptionpb.KeyDictionary{ @@ -346,29 +343,30 @@ func (s *testKeyManagerSuite) TestLoadKeyEmpty(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Use default config. config := &encryption.Config{} err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) + re.NoError(err) // Simulate keys get deleted. _, err = client.Delete(context.Background(), EncryptionKeysPath) - c.Assert(err, IsNil) - c.Assert(m.loadKeys(), NotNil) + re.NoError(err) + re.NotNil(m.loadKeys()) } -func (s *testKeyManagerSuite) TestWatcher(c *C) { +func TestWatcher(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Listen on watcher event @@ -380,15 +378,15 @@ func (s *testKeyManagerSuite) TestWatcher(c *C) { // Use default config. config := &encryption.Config{} err := config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) + re.NoError(err) go m.StartBackgroundLoop(ctx) _, err = m.GetKey(123) - c.Assert(err, NotNil) + re.Error(err) _, err = m.GetKey(456) - c.Assert(err, NotNil) + re.Error(err) // Update keys in etcd masterKeyMeta := newMasterKey(keyFile) keys := &encryptionpb.KeyDictionary{ @@ -403,13 +401,13 @@ func (s *testKeyManagerSuite) TestWatcher(c *C) { }, } err = saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) <-reloadEvent key, err := m.GetKey(123) - c.Assert(err, IsNil) - c.Assert(proto.Equal(key, keys.Keys[123]), IsTrue) + re.NoError(err) + re.True(proto.Equal(key, keys.Keys[123])) _, err = m.GetKey(456) - c.Assert(err, NotNil) + re.Error(err) // Update again keys = &encryptionpb.KeyDictionary{ CurrentKeyId: 456, @@ -429,48 +427,50 @@ func (s *testKeyManagerSuite) TestWatcher(c *C) { }, } err = saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) <-reloadEvent key, err = m.GetKey(123) - c.Assert(err, IsNil) - c.Assert(proto.Equal(key, keys.Keys[123]), IsTrue) + re.NoError(err) + re.True(proto.Equal(key, keys.Keys[123])) key, err = m.GetKey(456) - c.Assert(err, IsNil) - c.Assert(proto.Equal(key, keys.Keys[456]), IsTrue) + re.NoError(err) + re.True(proto.Equal(key, keys.Keys[456])) } -func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionOff(c *C) { +func TestSetLeadershipWithEncryptionOff(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() // Use default config. config := &encryption.Config{} err := config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := NewKeyManager(client, config) - c.Assert(err, IsNil) - c.Assert(m.keys.Load(), IsNil) + re.NoError(err) + re.Nil(m.keys.Load()) // Set leadership - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check encryption stays off. - c.Assert(m.keys.Load(), IsNil) + re.Nil(m.keys.Load()) value, err := etcdutil.GetValue(client, EncryptionKeysPath) - c.Assert(err, IsNil) - c.Assert(value, IsNil) + re.NoError(err) + re.Nil(value) } -func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionEnabling(c *C) { +func TestSetLeadershipWithEncryptionEnabling(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Listen on watcher event @@ -490,41 +490,42 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionEnabling(c *C) { }, } err := config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(m.keys.Load(), IsNil) + re.NoError(err) + re.Nil(m.keys.Load()) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check encryption is on and persisted. <-reloadEvent - c.Assert(m.keys.Load(), NotNil) + re.NotNil(m.keys.Load()) currentKeyID, currentKey, err := m.GetCurrentKey() - c.Assert(err, IsNil) + re.NoError(err) method, err := config.GetMethod() - c.Assert(err, IsNil) - c.Assert(currentKey.Method, Equals, method) + re.NoError(err) + re.Equal(method, currentKey.Method) loadedKeys := m.keys.Load().(*encryptionpb.KeyDictionary) - c.Assert(proto.Equal(loadedKeys.Keys[currentKeyID], currentKey), IsTrue) + re.True(proto.Equal(loadedKeys.Keys[currentKeyID], currentKey)) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(loadedKeys, storedKeys), IsTrue) + re.NoError(err) + re.True(proto.Equal(loadedKeys, storedKeys)) } -func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionMethodChanged(c *C) { +func TestSetLeadershipWithEncryptionMethodChanged(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -555,7 +556,7 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionMethodChanged(c *C) }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with different encrption method. config := &encryption.Config{ DataEncryptionMethod: "aes256-ctr", @@ -567,41 +568,42 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionMethodChanged(c *C) }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check encryption method is updated. <-reloadEvent - c.Assert(m.keys.Load(), NotNil) + re.NotNil(m.keys.Load()) currentKeyID, currentKey, err := m.GetCurrentKey() - c.Assert(err, IsNil) - c.Assert(currentKey.Method, Equals, encryptionpb.EncryptionMethod_AES256_CTR) - c.Assert(currentKey.Key, HasLen, 32) + re.NoError(err) + re.Equal(encryptionpb.EncryptionMethod_AES256_CTR, currentKey.Method) + re.Len(currentKey.Key, 32) loadedKeys := m.keys.Load().(*encryptionpb.KeyDictionary) - c.Assert(loadedKeys.CurrentKeyId, Equals, currentKeyID) - c.Assert(proto.Equal(loadedKeys.Keys[123], keys.Keys[123]), IsTrue) + re.Equal(currentKeyID, loadedKeys.CurrentKeyId) + re.True(proto.Equal(loadedKeys.Keys[123], keys.Keys[123])) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(loadedKeys, storedKeys), IsTrue) + re.NoError(err) + re.True(proto.Equal(loadedKeys, storedKeys)) } -func (s *testKeyManagerSuite) TestSetLeadershipWithCurrentKeyExposed(c *C) { +func TestSetLeadershipWithCurrentKeyExposed(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -626,7 +628,7 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithCurrentKeyExposed(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with different encrption method. config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", @@ -638,42 +640,43 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithCurrentKeyExposed(c *C) { }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check encryption method is updated. <-reloadEvent - c.Assert(m.keys.Load(), NotNil) + re.NotNil(m.keys.Load()) currentKeyID, currentKey, err := m.GetCurrentKey() - c.Assert(err, IsNil) - c.Assert(currentKey.Method, Equals, encryptionpb.EncryptionMethod_AES128_CTR) - c.Assert(currentKey.Key, HasLen, 16) - c.Assert(currentKey.WasExposed, IsFalse) + re.NoError(err) + re.Equal(encryptionpb.EncryptionMethod_AES128_CTR, currentKey.Method) + re.Len(currentKey.Key, 16) + re.False(currentKey.WasExposed) loadedKeys := m.keys.Load().(*encryptionpb.KeyDictionary) - c.Assert(loadedKeys.CurrentKeyId, Equals, currentKeyID) - c.Assert(proto.Equal(loadedKeys.Keys[123], keys.Keys[123]), IsTrue) + re.Equal(currentKeyID, loadedKeys.CurrentKeyId) + re.True(proto.Equal(loadedKeys.Keys[123], keys.Keys[123])) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(loadedKeys, storedKeys), IsTrue) + re.NoError(err) + re.True(proto.Equal(loadedKeys, storedKeys)) } -func (s *testKeyManagerSuite) TestSetLeadershipWithCurrentKeyExpired(c *C) { +func TestSetLeadershipWithCurrentKeyExpired(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -698,10 +701,10 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithCurrentKeyExpired(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with 100s rotation period. rotationPeriod, err := time.ParseDuration("100s") - c.Assert(err, IsNil) + re.NoError(err) config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", DataKeyRotationPeriod: typeutil.NewDuration(rotationPeriod), @@ -713,45 +716,46 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithCurrentKeyExpired(c *C) { }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check encryption method is updated. <-reloadEvent - c.Assert(m.keys.Load(), NotNil) + re.NotNil(m.keys.Load()) currentKeyID, currentKey, err := m.GetCurrentKey() - c.Assert(err, IsNil) - c.Assert(currentKey.Method, Equals, encryptionpb.EncryptionMethod_AES128_CTR) - c.Assert(currentKey.Key, HasLen, 16) - c.Assert(currentKey.WasExposed, IsFalse) - c.Assert(currentKey.CreationTime, Equals, uint64(helper.now().Unix())) + re.NoError(err) + re.Equal(encryptionpb.EncryptionMethod_AES128_CTR, currentKey.Method) + re.Len(currentKey.Key, 16) + re.False(currentKey.WasExposed) + re.Equal(uint64(helper.now().Unix()), currentKey.CreationTime) loadedKeys := m.keys.Load().(*encryptionpb.KeyDictionary) - c.Assert(loadedKeys.CurrentKeyId, Equals, currentKeyID) - c.Assert(proto.Equal(loadedKeys.Keys[123], keys.Keys[123]), IsTrue) + re.Equal(currentKeyID, loadedKeys.CurrentKeyId) + re.True(proto.Equal(loadedKeys.Keys[123], keys.Keys[123])) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(loadedKeys, storedKeys), IsTrue) + re.NoError(err) + re.True(proto.Equal(loadedKeys, storedKeys)) } -func (s *testKeyManagerSuite) TestSetLeadershipWithMasterKeyChanged(c *C) { +func TestSetLeadershipWithMasterKeyChanged(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - keyFile2, cleanupKeyFile2 := newTestKeyFile(c, testMasterKey2) + keyFile2, cleanupKeyFile2 := newTestKeyFile(re, testMasterKey2) defer cleanupKeyFile2() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -776,7 +780,7 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithMasterKeyChanged(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with a different master key. config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", @@ -788,35 +792,36 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithMasterKeyChanged(c *C) { }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check keys are the same, but encrypted with the new master key. <-reloadEvent - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, keys)) meta, err := config.GetMasterKeyMeta() - c.Assert(err, IsNil) - checkMasterKeyMeta(c, resp.Kvs[0].Value, meta, nil) + re.NoError(err) + checkMasterKeyMeta(re, resp.Kvs[0].Value, meta, nil) } -func (s *testKeyManagerSuite) TestSetLeadershipMasterKeyWithCiphertextKey(c *C) { +func TestSetLeadershipMasterKeyWithCiphertextKey(t *testing.T) { + re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -831,10 +836,10 @@ func (s *testKeyManagerSuite) TestSetLeadershipMasterKeyWithCiphertextKey(c *C) ) (*encryption.MasterKey, error) { if newMasterKeyCalled < 2 { // initial load and save. no ciphertextKey - c.Assert(ciphertext, IsNil) + re.Nil(ciphertext) } else if newMasterKeyCalled == 2 { // called by loadKeys after saveKeys - c.Assert(bytes.Equal(ciphertext, outputCiphertextKey), IsTrue) + re.Equal(ciphertext, outputCiphertextKey) } newMasterKeyCalled += 1 return encryption.NewCustomMasterKeyForTest(outputMasterKey, outputCiphertextKey), nil @@ -853,7 +858,7 @@ func (s *testKeyManagerSuite) TestSetLeadershipMasterKeyWithCiphertextKey(c *C) }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with a different master key. config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", @@ -865,37 +870,38 @@ func (s *testKeyManagerSuite) TestSetLeadershipMasterKeyWithCiphertextKey(c *C) }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) - c.Assert(newMasterKeyCalled, Equals, 3) + re.NoError(err) + re.Equal(3, newMasterKeyCalled) // Check if keys are the same - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, keys)) meta, err := config.GetMasterKeyMeta() - c.Assert(err, IsNil) + re.NoError(err) // Check ciphertext key is stored with keys. - checkMasterKeyMeta(c, resp.Kvs[0].Value, meta, outputCiphertextKey) + checkMasterKeyMeta(re, resp.Kvs[0].Value, meta, outputCiphertextKey) } -func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionDisabling(c *C) { +func TestSetLeadershipWithEncryptionDisabling(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Listen on watcher event @@ -918,41 +924,42 @@ func (s *testKeyManagerSuite) TestSetLeadershipWithEncryptionDisabling(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Use default config. config := &encryption.Config{} err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check encryption is disabled <-reloadEvent expectedKeys := proto.Clone(keys).(*encryptionpb.KeyDictionary) expectedKeys.CurrentKeyId = disableEncryptionKeyID expectedKeys.Keys[123].WasExposed = true - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), expectedKeys), IsTrue) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), expectedKeys)) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, expectedKeys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, expectedKeys)) } -func (s *testKeyManagerSuite) TestKeyRotation(c *C) { +func TestKeyRotation(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -986,10 +993,10 @@ func (s *testKeyManagerSuite) TestKeyRotation(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with 100s rotation period. rotationPeriod, err := time.ParseDuration("100s") - c.Assert(err, IsNil) + re.NoError(err) config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", DataKeyRotationPeriod: typeutil.NewDuration(rotationPeriod), @@ -1001,22 +1008,22 @@ func (s *testKeyManagerSuite) TestKeyRotation(c *C) { }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check keys - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, keys)) // Advance time and trigger ticker atomic.AddInt64(&mockNow, int64(101)) mockTick <- time.Unix(atomic.LoadInt64(&mockNow), 0) @@ -1024,32 +1031,33 @@ func (s *testKeyManagerSuite) TestKeyRotation(c *C) { <-reloadEvent // Check key is rotated. currentKeyID, currentKey, err := m.GetCurrentKey() - c.Assert(err, IsNil) - c.Assert(currentKeyID, Not(Equals), uint64(123)) - c.Assert(currentKey.Method, Equals, encryptionpb.EncryptionMethod_AES128_CTR) - c.Assert(currentKey.Key, HasLen, 16) - c.Assert(currentKey.CreationTime, Equals, uint64(mockNow)) - c.Assert(currentKey.WasExposed, IsFalse) + re.NoError(err) + re.NotEqual(uint64(123), currentKeyID) + re.Equal(encryptionpb.EncryptionMethod_AES128_CTR, currentKey.Method) + re.Len(currentKey.Key, 16) + re.Equal(uint64(mockNow), currentKey.CreationTime) + re.False(currentKey.WasExposed) loadedKeys := m.keys.Load().(*encryptionpb.KeyDictionary) - c.Assert(loadedKeys.CurrentKeyId, Equals, currentKeyID) - c.Assert(proto.Equal(loadedKeys.Keys[123], keys.Keys[123]), IsTrue) - c.Assert(proto.Equal(loadedKeys.Keys[currentKeyID], currentKey), IsTrue) + re.Equal(currentKeyID, loadedKeys.CurrentKeyId) + re.True(proto.Equal(loadedKeys.Keys[123], keys.Keys[123])) + re.True(proto.Equal(loadedKeys.Keys[currentKeyID], currentKey)) resp, err = etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err = extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, loadedKeys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, loadedKeys)) } -func (s *testKeyManagerSuite) TestKeyRotationConflict(c *C) { +func TestKeyRotationConflict(t *testing.T) { + re := require.New(t) // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(c) + client, cleanupEtcd := newTestEtcd(re) defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(c) + keyFile, cleanupKeyFile := newTestKeyFile(re) defer cleanupKeyFile() - leadership := newTestLeader(c, client) + leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() // Mock time @@ -1093,10 +1101,10 @@ func (s *testKeyManagerSuite) TestKeyRotationConflict(c *C) { }, } err := saveKeys(leadership, masterKeyMeta, keys, defaultKeyManagerHelper()) - c.Assert(err, IsNil) + re.NoError(err) // Config with 100s rotation period. rotationPeriod, err := time.ParseDuration("100s") - c.Assert(err, IsNil) + re.NoError(err) config := &encryption.Config{ DataEncryptionMethod: "aes128-ctr", DataKeyRotationPeriod: typeutil.NewDuration(rotationPeriod), @@ -1108,22 +1116,22 @@ func (s *testKeyManagerSuite) TestKeyRotationConflict(c *C) { }, } err = config.Adjust() - c.Assert(err, IsNil) + re.NoError(err) // Create the key manager. m, err := newKeyManagerImpl(client, config, helper) - c.Assert(err, IsNil) - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) go m.StartBackgroundLoop(ctx) // Set leadership err = m.SetLeadership(leadership) - c.Assert(err, IsNil) + re.NoError(err) // Check keys - c.Assert(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys), IsTrue) + re.True(proto.Equal(m.keys.Load().(*encryptionpb.KeyDictionary), keys)) resp, err := etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err := extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, keys)) // Invalidate leader after leader check. atomic.StoreInt32(&shouldResetLeader, 1) atomic.StoreInt32(&shouldListenSaveKeysFailure, 1) @@ -1134,10 +1142,10 @@ func (s *testKeyManagerSuite) TestKeyRotationConflict(c *C) { <-saveKeysFailureEvent // Check keys is unchanged. resp, err = etcdutil.EtcdKVGet(client, EncryptionKeysPath) - c.Assert(err, IsNil) + re.NoError(err) storedKeys, err = extractKeysFromKV(resp.Kvs[0], defaultKeyManagerHelper()) - c.Assert(err, IsNil) - c.Assert(proto.Equal(storedKeys, keys), IsTrue) + re.NoError(err) + re.True(proto.Equal(storedKeys, keys)) } func newMasterKey(keyFile string) *encryptionpb.MasterKey { From 83f14760e1c2cb53d68667f3192036ade7c5dcc0 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Mon, 13 Jun 2022 13:32:32 +0800 Subject: [PATCH 11/35] server: fix the bug that causes wrong statistics for over/undersized regions (#5137) close tikv/pd#5107 Fix the bug that causes wrong statistics for over/undersized regions. Signed-off-by: JmPotato Co-authored-by: Ti Chi Robot --- server/cluster/cluster.go | 5 ++++ server/cluster/cluster_test.go | 39 ++++++++++++++++++++++++++ server/config/persist_options.go | 14 +++++++++ server/core/region.go | 5 ++++ server/statistics/region_collection.go | 35 ++++++++++++++++++++--- 5 files changed, 94 insertions(+), 4 deletions(-) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 7c6f0c3701b..7b39a29d580 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -792,6 +792,11 @@ func (c *RaftCluster) processRegionHeartbeat(region *core.RegionInfo) error { // Mark isNew if the region in cache does not have leader. isNew, saveKV, saveCache, needSync := regionGuide(region, origin) if !saveKV && !saveCache && !isNew { + // Due to some config changes need to update the region stats as well, + // so we do some extra checks here. + if c.regionStats != nil && c.regionStats.RegionStatsNeedUpdate(region) { + c.regionStats.Observe(region, c.getRegionStoresLocked(region)) + } return nil } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index c91899c662d..530abff2b87 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -881,6 +881,45 @@ func (s *testClusterInfoSuite) TestRegionFlowChanged(c *C) { c.Assert(newRegion.GetBytesRead(), Equals, uint64(1000)) } +func (s *testClusterInfoSuite) TestRegionSizeChanged(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + cluster.regionStats = statistics.NewRegionStatistics(cluster.GetOpts(), cluster.ruleManager, cluster.storeConfigManager) + region := newTestRegions(1, 3, 3)[0] + cluster.opt.GetMaxMergeRegionKeys() + curMaxMergeSize := int64(cluster.opt.GetMaxMergeRegionSize()) + curMaxMergeKeys := int64(cluster.opt.GetMaxMergeRegionKeys()) + region = region.Clone( + core.WithLeader(region.GetPeers()[2]), + core.SetApproximateSize(curMaxMergeSize-1), + core.SetApproximateKeys(curMaxMergeKeys-1), + core.SetFromHeartbeat(true), + ) + cluster.processRegionHeartbeat(region) + regionID := region.GetID() + c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsTrue) + // Test ApproximateSize and ApproximateKeys change. + region = region.Clone( + core.WithLeader(region.GetPeers()[2]), + core.SetApproximateSize(curMaxMergeSize+1), + core.SetApproximateKeys(curMaxMergeKeys+1), + core.SetFromHeartbeat(true), + ) + cluster.processRegionHeartbeat(region) + c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsFalse) + // Test MaxMergeRegionSize and MaxMergeRegionKeys change. + cluster.opt.SetMaxMergeRegionSize((uint64(curMaxMergeSize + 2))) + cluster.opt.SetMaxMergeRegionKeys((uint64(curMaxMergeKeys + 2))) + cluster.processRegionHeartbeat(region) + c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsTrue) + cluster.opt.SetMaxMergeRegionSize((uint64(curMaxMergeSize))) + cluster.opt.SetMaxMergeRegionKeys((uint64(curMaxMergeKeys))) + cluster.processRegionHeartbeat(region) + c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsFalse) +} + func (s *testClusterInfoSuite) TestConcurrentReportBucket(c *C) { _, opt, err := newTestScheduleConfig() c.Assert(err, IsNil) diff --git a/server/config/persist_options.go b/server/config/persist_options.go index 5882c123947..fe7203722c2 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -260,6 +260,20 @@ func (o *PersistOptions) SetSplitMergeInterval(splitMergeInterval time.Duration) o.SetScheduleConfig(v) } +// SetMaxMergeRegionSize sets the max merge region size. +func (o *PersistOptions) SetMaxMergeRegionSize(maxMergeRegionSize uint64) { + v := o.GetScheduleConfig().Clone() + v.MaxMergeRegionSize = maxMergeRegionSize + o.SetScheduleConfig(v) +} + +// SetMaxMergeRegionKeys sets the max merge region keys. +func (o *PersistOptions) SetMaxMergeRegionKeys(maxMergeRegionKeys uint64) { + v := o.GetScheduleConfig().Clone() + v.MaxMergeRegionKeys = maxMergeRegionKeys + o.SetScheduleConfig(v) +} + // SetStoreLimit sets a store limit for a given type and rate. func (o *PersistOptions) SetStoreLimit(storeID uint64, typ storelimit.Type, ratePerMin float64) { v := o.GetScheduleConfig().Clone() diff --git a/server/core/region.go b/server/core/region.go index 6ec75d4fef5..cc688712ad8 100644 --- a/server/core/region.go +++ b/server/core/region.go @@ -229,6 +229,11 @@ func (r *RegionInfo) NeedMerge(mergeSize int64, mergeKeys int64) bool { return r.GetApproximateSize() <= mergeSize && r.GetApproximateKeys() <= mergeKeys } +// IsOversized indicates whether the region is oversized. +func (r *RegionInfo) IsOversized(maxSize int64, maxKeys int64) bool { + return r.GetApproximateSize() >= maxSize || r.GetApproximateKeys() >= maxKeys +} + // GetTerm returns the current term of the region func (r *RegionInfo) GetTerm() uint64 { return r.term diff --git a/server/statistics/region_collection.go b/server/statistics/region_collection.go index 807a93d87a6..1c46d7acdda 100644 --- a/server/statistics/region_collection.go +++ b/server/statistics/region_collection.go @@ -101,6 +101,14 @@ func (r *RegionStatistics) GetRegionStatsByType(typ RegionStatisticType) []*core return res } +// IsRegionStatsType returns whether the status of the region is the given type. +func (r *RegionStatistics) IsRegionStatsType(regionID uint64, typ RegionStatisticType) bool { + r.RLock() + defer r.RUnlock() + _, exist := r.stats[typ][regionID] + return exist +} + // GetOfflineRegionStatsByType gets the status of the offline region by types. The regions here need to be cloned, otherwise, it may cause data race problems. func (r *RegionStatistics) GetOfflineRegionStatsByType(typ RegionStatisticType) []*core.RegionInfo { r.RLock() @@ -128,6 +136,18 @@ func (r *RegionStatistics) deleteOfflineEntry(deleteIndex RegionStatisticType, r } } +// RegionStatsNeedUpdate checks whether the region's status need to be updated +// due to some special state types. +func (r *RegionStatistics) RegionStatsNeedUpdate(region *core.RegionInfo) bool { + regionID := region.GetID() + if r.IsRegionStatsType(regionID, OversizedRegion) != + region.IsOversized(int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxSize()), int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys())) { + return true + } + return r.IsRegionStatsType(regionID, UndersizedRegion) != + region.NeedMerge(int64(r.opt.GetMaxMergeRegionSize()), int64(r.opt.GetMaxMergeRegionKeys())) +} + // Observe records the current regions' status. func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.StoreInfo) { r.Lock() @@ -169,6 +189,9 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store } } + // Better to make sure once any of these conditions changes, it will trigger the heartbeat `save_cache`. + // Otherwise, the state may be out-of-date for a long time, which needs another way to apply the change ASAP. + // For example, see `RegionStatsNeedUpdate` above to know how `OversizedRegion` and ``UndersizedRegion` are updated. conditions := map[RegionStatisticType]bool{ MissPeer: len(region.GetPeers()) < desiredReplicas, ExtraPeer: len(region.GetPeers()) > desiredReplicas, @@ -176,10 +199,14 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store PendingPeer: len(region.GetPendingPeers()) > 0, LearnerPeer: len(region.GetLearners()) > 0, EmptyRegion: region.GetApproximateSize() <= core.EmptyRegionApproximateSize, - OversizedRegion: region.GetApproximateSize() >= int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxSize()) || - region.GetApproximateKeys() >= int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys()), - UndersizedRegion: region.NeedMerge(int64(r.opt.GetScheduleConfig().MaxMergeRegionSize), - int64(r.opt.GetScheduleConfig().MaxMergeRegionKeys)), + OversizedRegion: region.IsOversized( + int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxSize()), + int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys()), + ), + UndersizedRegion: region.NeedMerge( + int64(r.opt.GetMaxMergeRegionSize()), + int64(r.opt.GetMaxMergeRegionKeys()), + ), } for typ, c := range conditions { From 1c2a4da9aed281634ea36ab8ae5a0cd1960b639e Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 13 Jun 2022 15:04:33 +0800 Subject: [PATCH 12/35] syncer: fix the wrong gRPC code usage (#5142) ref tikv/pd#5122 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/region_syncer/client.go | 4 ++-- server/region_syncer/client_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/server/region_syncer/client.go b/server/region_syncer/client.go index 3f2419ad720..ba9fd2edcd6 100644 --- a/server/region_syncer/client.go +++ b/server/region_syncer/client.go @@ -90,7 +90,7 @@ func (s *RegionSyncer) syncRegion(ctx context.Context, conn *grpc.ClientConn) (C cli := pdpb.NewPDClient(conn) syncStream, err := cli.SyncRegions(ctx) if err != nil { - return nil, errs.ErrGRPCCreateStream.Wrap(err).FastGenWithCause() + return nil, err } err = syncStream.Send(&pdpb.SyncRegionRequest{ Header: &pdpb.RequestHeader{ClusterId: s.server.ClusterID()}, @@ -98,7 +98,7 @@ func (s *RegionSyncer) syncRegion(ctx context.Context, conn *grpc.ClientConn) (C StartIndex: s.history.GetNextIndex(), }) if err != nil { - return nil, errs.ErrGRPCSend.Wrap(err).FastGenWithCause() + return nil, err } return syncStream, nil diff --git a/server/region_syncer/client_test.go b/server/region_syncer/client_test.go index ca39cee4859..b63deaae3e0 100644 --- a/server/region_syncer/client_test.go +++ b/server/region_syncer/client_test.go @@ -27,6 +27,8 @@ import ( "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/storage" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // For issue https://github.com/tikv/pd/issues/3936 @@ -60,6 +62,29 @@ func TestLoadRegion(t *testing.T) { re.Less(time.Since(start), time.Second*2) } +func TestErrorCode(t *testing.T) { + re := require.New(t) + tempDir, err := os.MkdirTemp(os.TempDir(), "region_syncer_err") + re.NoError(err) + defer os.RemoveAll(tempDir) + rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil) + re.NoError(err) + server := &mockServer{ + ctx: context.Background(), + storage: storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs), + bc: core.NewBasicCluster(), + } + ctx, cancel := context.WithCancel(context.TODO()) + rc := NewRegionSyncer(server) + conn, err := grpcutil.GetClientConn(ctx, "127.0.0.1", nil) + re.NoError(err) + cancel() + _, err = rc.syncRegion(ctx, conn) + ev, ok := status.FromError(err) + re.True(ok) + re.Equal(codes.Canceled, ev.Code()) +} + type mockServer struct { ctx context.Context member, leader *pdpb.Member From 958d687c32389ce5a06eb1dc3303d5d440328d6e Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 13 Jun 2022 15:24:34 +0800 Subject: [PATCH 13/35] statistics: migrate test framework to testify (#5140) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/statistics/hot_peer_cache_test.go | 310 ++++++++++---------- server/statistics/kind_test.go | 43 ++- server/statistics/region_collection_test.go | 153 +++++----- server/statistics/store_collection_test.go | 46 ++- server/statistics/store_test.go | 18 +- server/statistics/topn_test.go | 72 ++--- 6 files changed, 314 insertions(+), 328 deletions(-) diff --git a/server/statistics/hot_peer_cache_test.go b/server/statistics/hot_peer_cache_test.go index 347e2a423d8..c021f05df3f 100644 --- a/server/statistics/hot_peer_cache_test.go +++ b/server/statistics/hot_peer_cache_test.go @@ -20,27 +20,24 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/movingaverage" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testHotPeerCache{}) - -type testHotPeerCache struct{} - -func (t *testHotPeerCache) TestStoreTimeUnsync(c *C) { +func TestStoreTimeUnsync(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Write) intervals := []uint64{120, 60} for _, interval := range intervals { region := buildRegion(Write, 3, interval) - checkAndUpdate(c, cache, region, 3) + checkAndUpdate(re, cache, region, 3) { stats := cache.RegionStats(0) - c.Assert(stats, HasLen, 3) + re.Len(stats, 3) for _, s := range stats { - c.Assert(s, HasLen, 1) + re.Len(s, 1) } } } @@ -62,7 +59,8 @@ type testCacheCase struct { actionType ActionType } -func (t *testHotPeerCache) TestCache(c *C) { +func TestCache(t *testing.T) { + re := require.New(t) tests := []*testCacheCase{ {Read, transferLeader, 3, Update}, {Read, movePeer, 4, Remove}, @@ -71,26 +69,22 @@ func (t *testHotPeerCache) TestCache(c *C) { {Write, movePeer, 4, Remove}, {Write, addReplica, 4, Remove}, } - for _, t := range tests { - testCache(c, t) - } -} - -func testCache(c *C, t *testCacheCase) { - defaultSize := map[RWType]int{ - Read: 3, // all peers - Write: 3, // all peers - } - cache := NewHotPeerCache(t.kind) - region := buildRegion(t.kind, 3, 60) - checkAndUpdate(c, cache, region, defaultSize[t.kind]) - checkHit(c, cache, region, t.kind, Add) // all peers are new - - srcStore, region := schedule(c, t.operator, region, 10) - res := checkAndUpdate(c, cache, region, t.expect) - checkHit(c, cache, region, t.kind, Update) // hit cache - if t.expect != defaultSize[t.kind] { - checkOp(c, res, srcStore, t.actionType) + for _, test := range tests { + defaultSize := map[RWType]int{ + Read: 3, // all peers + Write: 3, // all peers + } + cache := NewHotPeerCache(test.kind) + region := buildRegion(test.kind, 3, 60) + checkAndUpdate(re, cache, region, defaultSize[test.kind]) + checkHit(re, cache, region, test.kind, Add) // all peers are new + + srcStore, region := schedule(re, test.operator, region, 10) + res := checkAndUpdate(re, cache, region, test.expect) + checkHit(re, cache, region, test.kind, Update) // hit cache + if test.expect != defaultSize[test.kind] { + checkOp(re, res, srcStore, test.actionType) + } } } @@ -127,35 +121,35 @@ func updateFlow(cache *hotPeerCache, res []*HotPeerStat) []*HotPeerStat { return res } -type check func(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) +type check func(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) -func checkAndUpdate(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { +func checkAndUpdate(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { res = checkFlow(cache, region, region.GetPeers()) if len(expect) != 0 { - c.Assert(res, HasLen, expect[0]) + re.Len(res, expect[0]) } return updateFlow(cache, res) } // Check and update peers in the specified order that old item that he items that have not expired come first, and the items that have expired come second. // This order is also similar to the previous version. By the way the order in now version is random. -func checkAndUpdateWithOrdering(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { +func checkAndUpdateWithOrdering(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { res = checkFlow(cache, region, orderingPeers(cache, region)) if len(expect) != 0 { - c.Assert(res, HasLen, expect[0]) + re.Len(res, expect[0]) } return updateFlow(cache, res) } -func checkAndUpdateSkipOne(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { +func checkAndUpdateSkipOne(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { res = checkFlow(cache, region, region.GetPeers()[1:]) if len(expect) != 0 { - c.Assert(res, HasLen, expect[0]) + re.Len(res, expect[0]) } return updateFlow(cache, res) } -func checkHit(c *C, cache *hotPeerCache, region *core.RegionInfo, kind RWType, actionType ActionType) { +func checkHit(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, kind RWType, actionType ActionType) { var peers []*metapb.Peer if kind == Read { peers = []*metapb.Peer{region.GetLeader()} @@ -164,15 +158,15 @@ func checkHit(c *C, cache *hotPeerCache, region *core.RegionInfo, kind RWType, a } for _, peer := range peers { item := cache.getOldHotPeerStat(region.GetID(), peer.StoreId) - c.Assert(item, NotNil) - c.Assert(item.actionType, Equals, actionType) + re.NotNil(item) + re.Equal(actionType, item.actionType) } } -func checkOp(c *C, ret []*HotPeerStat, storeID uint64, actionType ActionType) { +func checkOp(re *require.Assertions, ret []*HotPeerStat, storeID uint64, actionType ActionType) { for _, item := range ret { if item.StoreID == storeID { - c.Assert(item.actionType, Equals, actionType) + re.Equal(actionType, item.actionType) return } } @@ -192,7 +186,7 @@ func checkIntervalSum(cache *hotPeerCache, region *core.RegionInfo) bool { } // checkIntervalSumContinuous checks whether the interval sum of the peer is continuous. -func checkIntervalSumContinuous(c *C, intervalSums map[uint64]int, rets []*HotPeerStat, interval uint64) { +func checkIntervalSumContinuous(re *require.Assertions, intervalSums map[uint64]int, rets []*HotPeerStat, interval uint64) { for _, ret := range rets { if ret.actionType == Remove { delete(intervalSums, ret.StoreID) @@ -201,27 +195,27 @@ func checkIntervalSumContinuous(c *C, intervalSums map[uint64]int, rets []*HotPe new := int(ret.getIntervalSum() / 1000000000) if ret.source == direct { if old, ok := intervalSums[ret.StoreID]; ok { - c.Assert(new, Equals, (old+int(interval))%RegionHeartBeatReportInterval) + re.Equal((old+int(interval))%RegionHeartBeatReportInterval, new) } } intervalSums[ret.StoreID] = new } } -func schedule(c *C, operator operator, region *core.RegionInfo, targets ...uint64) (srcStore uint64, _ *core.RegionInfo) { +func schedule(re *require.Assertions, operator operator, region *core.RegionInfo, targets ...uint64) (srcStore uint64, _ *core.RegionInfo) { switch operator { case transferLeader: _, newLeader := pickFollower(region) return region.GetLeader().StoreId, region.Clone(core.WithLeader(newLeader)) case movePeer: - c.Assert(targets, HasLen, 1) + re.Len(targets, 1) index, _ := pickFollower(region) srcStore := region.GetPeers()[index].StoreId region := region.Clone(core.WithAddPeer(&metapb.Peer{Id: targets[0]*10 + 1, StoreId: targets[0]})) region = region.Clone(core.WithRemoveStorePeer(srcStore)) return srcStore, region case addReplica: - c.Assert(targets, HasLen, 1) + re.Len(targets, 1) region := region.Clone(core.WithAddPeer(&metapb.Peer{Id: targets[0]*10 + 1, StoreId: targets[0]})) return 0, region case removeReplica: @@ -307,7 +301,8 @@ func newPeers(n int, pid genID, sid genID) []*metapb.Peer { return peers } -func (t *testHotPeerCache) TestUpdateHotPeerStat(c *C) { +func TestUpdateHotPeerStat(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Read) // we statistic read peer info from store heartbeat rather than region heartbeat m := RegionHeartBeatReportInterval / StoreHeartBeatReportInterval @@ -315,69 +310,70 @@ func (t *testHotPeerCache) TestUpdateHotPeerStat(c *C) { // skip interval=0 newItem := &HotPeerStat{actionType: Update, thresholds: []float64{0.0, 0.0, 0.0}, Kind: Read} newItem = cache.updateHotPeerStat(nil, newItem, nil, []float64{0.0, 0.0, 0.0}, 0) - c.Check(newItem, IsNil) + re.Nil(newItem) // new peer, interval is larger than report interval, but no hot newItem = &HotPeerStat{actionType: Update, thresholds: []float64{1.0, 1.0, 1.0}, Kind: Read} newItem = cache.updateHotPeerStat(nil, newItem, nil, []float64{0.0, 0.0, 0.0}, 10*time.Second) - c.Check(newItem, IsNil) + re.Nil(newItem) // new peer, interval is less than report interval newItem = &HotPeerStat{actionType: Update, thresholds: []float64{0.0, 0.0, 0.0}, Kind: Read} newItem = cache.updateHotPeerStat(nil, newItem, nil, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem, NotNil) - c.Check(newItem.HotDegree, Equals, 0) - c.Check(newItem.AntiCount, Equals, 0) + re.NotNil(newItem) + re.Equal(0, newItem.HotDegree) + re.Equal(0, newItem.AntiCount) // sum of interval is less than report interval oldItem := newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem.HotDegree, Equals, 0) - c.Check(newItem.AntiCount, Equals, 0) + re.Equal(0, newItem.HotDegree) + re.Equal(0, newItem.AntiCount) // sum of interval is larger than report interval, and hot oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem.HotDegree, Equals, 1) - c.Check(newItem.AntiCount, Equals, 2*m) + re.Equal(1, newItem.HotDegree) + re.Equal(2*m, newItem.AntiCount) // sum of interval is less than report interval oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem.HotDegree, Equals, 1) - c.Check(newItem.AntiCount, Equals, 2*m) + re.Equal(1, newItem.HotDegree) + re.Equal(2*m, newItem.AntiCount) // sum of interval is larger than report interval, and hot oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 10*time.Second) - c.Check(newItem.HotDegree, Equals, 2) - c.Check(newItem.AntiCount, Equals, 2*m) + re.Equal(2, newItem.HotDegree) + re.Equal(2*m, newItem.AntiCount) // sum of interval is larger than report interval, and cold oldItem = newItem newItem.thresholds = []float64{10.0, 10.0, 10.0} newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 10*time.Second) - c.Check(newItem.HotDegree, Equals, 1) - c.Check(newItem.AntiCount, Equals, 2*m-1) + re.Equal(1, newItem.HotDegree) + re.Equal(2*m-1, newItem.AntiCount) // sum of interval is larger than report interval, and cold for i := 0; i < 2*m-1; i++ { oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 10*time.Second) } - c.Check(newItem.HotDegree, Less, 0) - c.Check(newItem.AntiCount, Equals, 0) - c.Check(newItem.actionType, Equals, Remove) + re.Less(newItem.HotDegree, 0) + re.Equal(0, newItem.AntiCount) + re.Equal(Remove, newItem.actionType) } -func (t *testHotPeerCache) TestThresholdWithUpdateHotPeerStat(c *C) { +func TestThresholdWithUpdateHotPeerStat(t *testing.T) { + re := require.New(t) byteRate := minHotThresholds[RegionReadBytes] * 2 expectThreshold := byteRate * HotThresholdRatio - t.testMetrics(c, 120., byteRate, expectThreshold) - t.testMetrics(c, 60., byteRate, expectThreshold) - t.testMetrics(c, 30., byteRate, expectThreshold) - t.testMetrics(c, 17., byteRate, expectThreshold) - t.testMetrics(c, 1., byteRate, expectThreshold) + testMetrics(re, 120., byteRate, expectThreshold) + testMetrics(re, 60., byteRate, expectThreshold) + testMetrics(re, 30., byteRate, expectThreshold) + testMetrics(re, 17., byteRate, expectThreshold) + testMetrics(re, 1., byteRate, expectThreshold) } -func (t *testHotPeerCache) testMetrics(c *C, interval, byteRate, expectThreshold float64) { +func testMetrics(re *require.Assertions, interval, byteRate, expectThreshold float64) { cache := NewHotPeerCache(Read) storeID := uint64(1) - c.Assert(byteRate, GreaterEqual, minHotThresholds[RegionReadBytes]) + re.GreaterOrEqual(byteRate, minHotThresholds[RegionReadBytes]) for i := uint64(1); i < TopNN+10; i++ { var oldItem *HotPeerStat for { @@ -401,14 +397,15 @@ func (t *testHotPeerCache) testMetrics(c *C, interval, byteRate, expectThreshold } thresholds := cache.calcHotThresholds(storeID) if i < TopNN { - c.Assert(thresholds[RegionReadBytes], Equals, minHotThresholds[RegionReadBytes]) + re.Equal(minHotThresholds[RegionReadBytes], thresholds[RegionReadBytes]) } else { - c.Assert(thresholds[RegionReadBytes], Equals, expectThreshold) + re.Equal(expectThreshold, thresholds[RegionReadBytes]) } } } -func (t *testHotPeerCache) TestRemoveFromCache(c *C) { +func TestRemoveFromCache(t *testing.T) { + re := require.New(t) peerCount := 3 interval := uint64(5) checkers := []check{checkAndUpdate, checkAndUpdateWithOrdering} @@ -418,29 +415,30 @@ func (t *testHotPeerCache) TestRemoveFromCache(c *C) { // prepare intervalSums := make(map[uint64]int) for i := 1; i <= 200; i++ { - rets := checker(c, cache, region) - checkIntervalSumContinuous(c, intervalSums, rets, interval) + rets := checker(re, cache, region) + checkIntervalSumContinuous(re, intervalSums, rets, interval) } // make the interval sum of peers are different - checkAndUpdateSkipOne(c, cache, region) + checkAndUpdateSkipOne(re, cache, region) checkIntervalSum(cache, region) // check whether cold cache is cleared var isClear bool intervalSums = make(map[uint64]int) region = region.Clone(core.SetWrittenBytes(0), core.SetWrittenKeys(0), core.SetWrittenQuery(0)) for i := 1; i <= 200; i++ { - rets := checker(c, cache, region) - checkIntervalSumContinuous(c, intervalSums, rets, interval) + rets := checker(re, cache, region) + checkIntervalSumContinuous(re, intervalSums, rets, interval) if len(cache.storesOfRegion[region.GetID()]) == 0 { isClear = true break } } - c.Assert(isClear, IsTrue) + re.True(isClear) } } -func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { +func TestRemoveFromCacheRandom(t *testing.T) { + re := require.New(t) peerCounts := []int{3, 5} intervals := []uint64{120, 60, 10, 5} checkers := []check{checkAndUpdate, checkAndUpdateWithOrdering} @@ -455,12 +453,12 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { step := func(i int) { tmp := uint64(0) if i%5 == 0 { - tmp, region = schedule(c, removeReplica, region) + tmp, region = schedule(re, removeReplica, region) } - rets := checker(c, cache, region) - checkIntervalSumContinuous(c, intervalSums, rets, interval) + rets := checker(re, cache, region) + checkIntervalSumContinuous(re, intervalSums, rets, interval) if i%5 == 0 { - _, region = schedule(c, addReplica, region, target) + _, region = schedule(re, addReplica, region, target) target = tmp } } @@ -473,9 +471,9 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { } } if interval < RegionHeartBeatReportInterval { - c.Assert(checkIntervalSum(cache, region), IsTrue) + re.True(checkIntervalSum(cache, region)) } - c.Assert(cache.storesOfRegion[region.GetID()], HasLen, peerCount) + re.Len(cache.storesOfRegion[region.GetID()], peerCount) // check whether cold cache is cleared var isClear bool @@ -488,119 +486,98 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { break } } - c.Assert(isClear, IsTrue) + re.True(isClear) } } } } -func checkCoolDown(c *C, cache *hotPeerCache, region *core.RegionInfo, expect bool) { +func checkCoolDown(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect bool) { item := cache.getOldHotPeerStat(region.GetID(), region.GetLeader().GetStoreId()) - c.Assert(item.IsNeedCoolDownTransferLeader(3), Equals, expect) + re.Equal(expect, item.IsNeedCoolDownTransferLeader(3)) } -func (t *testHotPeerCache) TestCoolDownTransferLeader(c *C) { +func TestCoolDownTransferLeader(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Read) region := buildRegion(Read, 3, 60) moveLeader := func() { - _, region = schedule(c, movePeer, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) - _, region = schedule(c, transferLeader, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, true) + _, region = schedule(re, movePeer, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) + _, region = schedule(re, transferLeader, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, true) } transferLeader := func() { - _, region = schedule(c, transferLeader, region) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, true) + _, region = schedule(re, transferLeader, region) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, true) } movePeer := func() { - _, region = schedule(c, movePeer, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) + _, region = schedule(re, movePeer, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) } addReplica := func() { - _, region = schedule(c, addReplica, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) + _, region = schedule(re, addReplica, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) } removeReplica := func() { - _, region = schedule(c, removeReplica, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) + _, region = schedule(re, removeReplica, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) } cases := []func(){moveLeader, transferLeader, movePeer, addReplica, removeReplica} for _, runCase := range cases { cache = NewHotPeerCache(Read) region = buildRegion(Read, 3, 60) for i := 1; i <= 200; i++ { - checkAndUpdate(c, cache, region) + checkAndUpdate(re, cache, region) } - checkCoolDown(c, cache, region, false) + checkCoolDown(re, cache, region, false) runCase() } } // See issue #4510 -func (t *testHotPeerCache) TestCacheInherit(c *C) { +func TestCacheInherit(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Read) region := buildRegion(Read, 3, 10) // prepare for i := 1; i <= 200; i++ { - checkAndUpdate(c, cache, region) + checkAndUpdate(re, cache, region) } // move peer newStoreID := uint64(10) - _, region = schedule(c, addReplica, region, newStoreID) - checkAndUpdate(c, cache, region) - newStoreID, region = schedule(c, removeReplica, region) - rets := checkAndUpdate(c, cache, region) + _, region = schedule(re, addReplica, region, newStoreID) + checkAndUpdate(re, cache, region) + newStoreID, region = schedule(re, removeReplica, region) + rets := checkAndUpdate(re, cache, region) for _, ret := range rets { if ret.actionType != Remove { flow := ret.GetLoads()[RegionReadBytes] - c.Assert(flow, Equals, float64(region.GetBytesRead()/ReadReportInterval)) + re.Equal(float64(region.GetBytesRead()/ReadReportInterval), flow) } } // new flow newFlow := region.GetBytesRead() * 10 region = region.Clone(core.SetReadBytes(newFlow)) for i := 1; i <= 200; i++ { - checkAndUpdate(c, cache, region) + checkAndUpdate(re, cache, region) } // move peer - _, region = schedule(c, addReplica, region, newStoreID) - checkAndUpdate(c, cache, region) - _, region = schedule(c, removeReplica, region) - rets = checkAndUpdate(c, cache, region) + _, region = schedule(re, addReplica, region, newStoreID) + checkAndUpdate(re, cache, region) + _, region = schedule(re, removeReplica, region) + rets = checkAndUpdate(re, cache, region) for _, ret := range rets { if ret.actionType != Remove { flow := ret.GetLoads()[RegionReadBytes] - c.Assert(flow, Equals, float64(newFlow/ReadReportInterval)) - } - } -} - -func BenchmarkCheckRegionFlow(b *testing.B) { - cache := NewHotPeerCache(Read) - region := buildRegion(Read, 3, 10) - peerInfos := make([]*core.PeerInfo, 0) - for _, peer := range region.GetPeers() { - peerInfo := core.NewPeerInfo(peer, region.GetLoads(), 10) - peerInfos = append(peerInfos, peerInfo) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - items := make([]*HotPeerStat, 0) - for _, peerInfo := range peerInfos { - item := cache.checkPeerFlow(peerInfo, region) - if item != nil { - items = append(items, item) - } - } - for _, ret := range items { - cache.updateStat(ret) + re.Equal(float64(newFlow/ReadReportInterval), flow) } } } @@ -610,7 +587,7 @@ type testMovingAverageCase struct { expect []float64 } -func checkMovingAverage(c *C, testCase *testMovingAverageCase) { +func checkMovingAverage(re *require.Assertions, testCase *testMovingAverageCase) { interval := 1 * time.Second tm := movingaverage.NewTimeMedian(DefaultAotSize, DefaultWriteMfSize, interval) var results []float64 @@ -618,11 +595,11 @@ func checkMovingAverage(c *C, testCase *testMovingAverageCase) { tm.Add(data, interval) results = append(results, tm.Get()) } - c.Assert(results, DeepEquals, testCase.expect) + re.Equal(testCase.expect, results) } -// -func (t *testHotPeerCache) TestUnstableData(c *C) { +func TestUnstableData(t *testing.T) { + re := require.New(t) cases := []*testMovingAverageCase{ { report: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, @@ -650,6 +627,29 @@ func (t *testHotPeerCache) TestUnstableData(c *C) { }, } for i := range cases { - checkMovingAverage(c, cases[i]) + checkMovingAverage(re, cases[i]) + } +} + +func BenchmarkCheckRegionFlow(b *testing.B) { + cache := NewHotPeerCache(Read) + region := buildRegion(Read, 3, 10) + peerInfos := make([]*core.PeerInfo, 0) + for _, peer := range region.GetPeers() { + peerInfo := core.NewPeerInfo(peer, region.GetLoads(), 10) + peerInfos = append(peerInfos, peerInfo) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + items := make([]*HotPeerStat, 0) + for _, peerInfo := range peerInfos { + item := cache.checkPeerFlow(peerInfo, region) + if item != nil { + items = append(items, item) + } + } + for _, ret := range items { + cache.updateStat(ret) + } } } diff --git a/server/statistics/kind_test.go b/server/statistics/kind_test.go index 86e9a77e10b..ccde182eefe 100644 --- a/server/statistics/kind_test.go +++ b/server/statistics/kind_test.go @@ -15,17 +15,16 @@ package statistics import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testRegionInfoSuite{}) - -type testRegionInfoSuite struct{} - -func (s *testRegionInfoSuite) TestGetLoads(c *C) { +func TestGetLoads(t *testing.T) { + re := require.New(t) queryStats := &pdpb.QueryStats{ Get: 5, Coprocessor: 6, @@ -45,24 +44,24 @@ func (s *testRegionInfoSuite) TestGetLoads(c *C) { core.SetWrittenKeys(4), core.SetQueryStats(queryStats)) loads := regionA.GetLoads() - c.Assert(loads, HasLen, int(RegionStatCount)) - c.Assert(float64(regionA.GetBytesRead()), Equals, loads[RegionReadBytes]) - c.Assert(float64(regionA.GetKeysRead()), Equals, loads[RegionReadKeys]) - c.Assert(float64(regionA.GetReadQueryNum()), Equals, loads[RegionReadQuery]) + re.Len(loads, int(RegionStatCount)) + re.Equal(float64(regionA.GetBytesRead()), loads[RegionReadBytes]) + re.Equal(float64(regionA.GetKeysRead()), loads[RegionReadKeys]) + re.Equal(float64(regionA.GetReadQueryNum()), loads[RegionReadQuery]) readQuery := float64(queryStats.Coprocessor + queryStats.Get + queryStats.Scan) - c.Assert(float64(regionA.GetReadQueryNum()), Equals, readQuery) - c.Assert(float64(regionA.GetBytesWritten()), Equals, loads[RegionWriteBytes]) - c.Assert(float64(regionA.GetKeysWritten()), Equals, loads[RegionWriteKeys]) - c.Assert(float64(regionA.GetWriteQueryNum()), Equals, loads[RegionWriteQuery]) + re.Equal(float64(regionA.GetReadQueryNum()), readQuery) + re.Equal(float64(regionA.GetBytesWritten()), loads[RegionWriteBytes]) + re.Equal(float64(regionA.GetKeysWritten()), loads[RegionWriteKeys]) + re.Equal(float64(regionA.GetWriteQueryNum()), loads[RegionWriteQuery]) writeQuery := float64(queryStats.Put + queryStats.Delete + queryStats.DeleteRange + queryStats.AcquirePessimisticLock + queryStats.Rollback + queryStats.Prewrite + queryStats.Commit) - c.Assert(float64(regionA.GetWriteQueryNum()), Equals, writeQuery) + re.Equal(float64(regionA.GetWriteQueryNum()), writeQuery) loads = regionA.GetWriteLoads() - c.Assert(loads, HasLen, int(RegionStatCount)) - c.Assert(0.0, Equals, loads[RegionReadBytes]) - c.Assert(0.0, Equals, loads[RegionReadKeys]) - c.Assert(0.0, Equals, loads[RegionReadQuery]) - c.Assert(float64(regionA.GetBytesWritten()), Equals, loads[RegionWriteBytes]) - c.Assert(float64(regionA.GetKeysWritten()), Equals, loads[RegionWriteKeys]) - c.Assert(float64(regionA.GetWriteQueryNum()), Equals, loads[RegionWriteQuery]) + re.Len(loads, int(RegionStatCount)) + re.Equal(0.0, loads[RegionReadBytes]) + re.Equal(0.0, loads[RegionReadKeys]) + re.Equal(0.0, loads[RegionReadQuery]) + re.Equal(float64(regionA.GetBytesWritten()), loads[RegionWriteBytes]) + re.Equal(float64(regionA.GetKeysWritten()), loads[RegionWriteKeys]) + re.Equal(float64(regionA.GetWriteQueryNum()), loads[RegionWriteQuery]) } diff --git a/server/statistics/region_collection_test.go b/server/statistics/region_collection_test.go index eb100e958fd..932c35f139e 100644 --- a/server/statistics/region_collection_test.go +++ b/server/statistics/region_collection_test.go @@ -17,36 +17,21 @@ package statistics import ( "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/server/storage" - "github.com/tikv/pd/server/storage/endpoint" ) -func TestStatistics(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testRegionStatisticsSuite{}) - -type testRegionStatisticsSuite struct { - store endpoint.RuleStorage - manager *placement.RuleManager -} - -func (t *testRegionStatisticsSuite) SetUpTest(c *C) { - t.store = storage.NewStorageWithMemoryBackend() - var err error - t.manager = placement.NewRuleManager(t.store, nil, nil) - err = t.manager.Initialize(3, []string{"zone", "rack", "host"}) - c.Assert(err, IsNil) -} - -func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { +func TestRegionStatistics(t *testing.T) { + re := require.New(t) + store := storage.NewStorageWithMemoryBackend() + manager := placement.NewRuleManager(store, nil, nil) + err := manager.Initialize(3, []string{"zone", "rack", "host"}) + re.NoError(err) opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(false) peers := []*metapb.Peer{ @@ -80,14 +65,14 @@ func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { r2 := &metapb.Region{Id: 2, Peers: peers[0:2], StartKey: []byte("cc"), EndKey: []byte("dd")} region1 := core.NewRegionInfo(r1, peers[0]) region2 := core.NewRegionInfo(r2, peers[0]) - regionStats := NewRegionStatistics(opt, t.manager, nil) + regionStats := NewRegionStatistics(opt, manager, nil) regionStats.Observe(region1, stores) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.stats[EmptyRegion], HasLen, 1) - c.Assert(regionStats.stats[UndersizedRegion], HasLen, 1) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 1) + re.Len(regionStats.stats[EmptyRegion], 1) + re.Len(regionStats.stats[UndersizedRegion], 1) + re.Len(regionStats.offlineStats[ExtraPeer], 1) + re.Len(regionStats.offlineStats[LearnerPeer], 1) region1 = region1.Clone( core.WithDownPeers(downPeers), @@ -95,58 +80,63 @@ func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { core.SetApproximateSize(144), ) regionStats.Observe(region1, stores) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.stats[MissPeer], HasLen, 0) - c.Assert(regionStats.stats[DownPeer], HasLen, 1) - c.Assert(regionStats.stats[PendingPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.stats[EmptyRegion], HasLen, 0) - c.Assert(regionStats.stats[OversizedRegion], HasLen, 1) - c.Assert(regionStats.stats[UndersizedRegion], HasLen, 0) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[MissPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[DownPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[PendingPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[OfflinePeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) + re.Len(regionStats.stats[MissPeer], 0) + re.Len(regionStats.stats[DownPeer], 1) + re.Len(regionStats.stats[PendingPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 1) + re.Len(regionStats.stats[EmptyRegion], 0) + re.Len(regionStats.stats[OversizedRegion], 1) + re.Len(regionStats.stats[UndersizedRegion], 0) + re.Len(regionStats.offlineStats[ExtraPeer], 1) + re.Len(regionStats.offlineStats[MissPeer], 0) + re.Len(regionStats.offlineStats[DownPeer], 1) + re.Len(regionStats.offlineStats[PendingPeer], 1) + re.Len(regionStats.offlineStats[LearnerPeer], 1) + re.Len(regionStats.offlineStats[OfflinePeer], 1) region2 = region2.Clone(core.WithDownPeers(downPeers[0:1])) regionStats.Observe(region2, stores[0:2]) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.stats[MissPeer], HasLen, 1) - c.Assert(regionStats.stats[DownPeer], HasLen, 2) - c.Assert(regionStats.stats[PendingPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.stats[OversizedRegion], HasLen, 1) - c.Assert(regionStats.stats[UndersizedRegion], HasLen, 1) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[MissPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[DownPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[PendingPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[OfflinePeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) + re.Len(regionStats.stats[MissPeer], 1) + re.Len(regionStats.stats[DownPeer], 2) + re.Len(regionStats.stats[PendingPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 1) + re.Len(regionStats.stats[OversizedRegion], 1) + re.Len(regionStats.stats[UndersizedRegion], 1) + re.Len(regionStats.offlineStats[ExtraPeer], 1) + re.Len(regionStats.offlineStats[MissPeer], 0) + re.Len(regionStats.offlineStats[DownPeer], 1) + re.Len(regionStats.offlineStats[PendingPeer], 1) + re.Len(regionStats.offlineStats[LearnerPeer], 1) + re.Len(regionStats.offlineStats[OfflinePeer], 1) region1 = region1.Clone(core.WithRemoveStorePeer(7)) regionStats.Observe(region1, stores[0:3]) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 0) - c.Assert(regionStats.stats[MissPeer], HasLen, 1) - c.Assert(regionStats.stats[DownPeer], HasLen, 2) - c.Assert(regionStats.stats[PendingPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[MissPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[DownPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[PendingPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[OfflinePeer], HasLen, 0) + re.Len(regionStats.stats[ExtraPeer], 0) + re.Len(regionStats.stats[MissPeer], 1) + re.Len(regionStats.stats[DownPeer], 2) + re.Len(regionStats.stats[PendingPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 0) + re.Len(regionStats.offlineStats[ExtraPeer], 0) + re.Len(regionStats.offlineStats[MissPeer], 0) + re.Len(regionStats.offlineStats[DownPeer], 0) + re.Len(regionStats.offlineStats[PendingPeer], 0) + re.Len(regionStats.offlineStats[LearnerPeer], 0) + re.Len(regionStats.offlineStats[OfflinePeer], 0) store3 = stores[3].Clone(core.UpStore()) stores[3] = store3 regionStats.Observe(region1, stores) - c.Assert(regionStats.stats[OfflinePeer], HasLen, 0) + re.Len(regionStats.stats[OfflinePeer], 0) } -func (t *testRegionStatisticsSuite) TestRegionStatisticsWithPlacementRule(c *C) { +func TestRegionStatisticsWithPlacementRule(t *testing.T) { + re := require.New(t) + store := storage.NewStorageWithMemoryBackend() + manager := placement.NewRuleManager(store, nil, nil) + err := manager.Initialize(3, []string{"zone", "rack", "host"}) + re.NoError(err) opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(true) peers := []*metapb.Peer{ @@ -173,20 +163,21 @@ func (t *testRegionStatisticsSuite) TestRegionStatisticsWithPlacementRule(c *C) region2 := core.NewRegionInfo(r2, peers[0]) region3 := core.NewRegionInfo(r3, peers[0]) region4 := core.NewRegionInfo(r4, peers[0]) - regionStats := NewRegionStatistics(opt, t.manager, nil) + regionStats := NewRegionStatistics(opt, manager, nil) // r2 didn't match the rules regionStats.Observe(region2, stores) - c.Assert(regionStats.stats[MissPeer], HasLen, 1) + re.Len(regionStats.stats[MissPeer], 1) regionStats.Observe(region3, stores) // r3 didn't match the rules - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) regionStats.Observe(region4, stores) // r4 match the rules - c.Assert(regionStats.stats[MissPeer], HasLen, 1) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) + re.Len(regionStats.stats[MissPeer], 1) + re.Len(regionStats.stats[ExtraPeer], 1) } -func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { +func TestRegionLabelIsolationLevel(t *testing.T) { + re := require.New(t) locationLabels := []string{"zone", "rack", "host"} labelLevelStats := NewLabelStatistics() labelsSet := [][]map[string]string{ @@ -256,7 +247,7 @@ func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { region := core.NewRegionInfo(&metapb.Region{Id: uint64(regionID)}, nil) label := GetRegionLabelIsolation(stores, locationLabels) labelLevelStats.Observe(region, stores, locationLabels) - c.Assert(label, Equals, res) + re.Equal(res, label) regionID++ } @@ -264,16 +255,16 @@ func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { f(labels, res[i], locationLabels) } for i, res := range counter { - c.Assert(labelLevelStats.labelCounter[i], Equals, res) + re.Equal(res, labelLevelStats.labelCounter[i]) } label := GetRegionLabelIsolation(nil, locationLabels) - c.Assert(label, Equals, nonIsolation) + re.Equal(nonIsolation, label) label = GetRegionLabelIsolation(nil, nil) - c.Assert(label, Equals, nonIsolation) + re.Equal(nonIsolation, label) store := core.NewStoreInfo(&metapb.Store{Id: 1, Address: "mock://tikv-1"}, core.SetStoreLabels([]*metapb.StoreLabel{{Key: "foo", Value: "bar"}})) label = GetRegionLabelIsolation([]*core.StoreInfo{store}, locationLabels) - c.Assert(label, Equals, "zone") + re.Equal("zone", label) regionID = 1 res = []string{"rack", "none", "zone", "rack", "none", "rack", "none"} @@ -284,6 +275,6 @@ func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { f(labels, res[i], locationLabels) } for i, res := range counter { - c.Assert(labelLevelStats.labelCounter[i], Equals, res) + re.Equal(res, labelLevelStats.labelCounter[i]) } } diff --git a/server/statistics/store_collection_test.go b/server/statistics/store_collection_test.go index f93a4b54bb5..388fc13b27e 100644 --- a/server/statistics/store_collection_test.go +++ b/server/statistics/store_collection_test.go @@ -15,19 +15,17 @@ package statistics import ( + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testStoreStatisticsSuite{}) - -type testStoreStatisticsSuite struct{} - -func (t *testStoreStatisticsSuite) TestStoreStatistics(c *C) { +func TestStoreStatistics(t *testing.T) { + re := require.New(t) opt := config.NewTestOptions() rep := opt.GetReplicationConfig().Clone() rep.LocationLabels = []string{"zone", "host"} @@ -62,22 +60,22 @@ func (t *testStoreStatisticsSuite) TestStoreStatistics(c *C) { } stats := storeStats.stats - c.Assert(stats.Up, Equals, 6) - c.Assert(stats.Preparing, Equals, 7) - c.Assert(stats.Serving, Equals, 0) - c.Assert(stats.Removing, Equals, 1) - c.Assert(stats.Removed, Equals, 1) - c.Assert(stats.Down, Equals, 1) - c.Assert(stats.Offline, Equals, 1) - c.Assert(stats.RegionCount, Equals, 0) - c.Assert(stats.Unhealthy, Equals, 0) - c.Assert(stats.Disconnect, Equals, 0) - c.Assert(stats.Tombstone, Equals, 1) - c.Assert(stats.LowSpace, Equals, 8) - c.Assert(stats.LabelCounter["zone:z1"], Equals, 2) - c.Assert(stats.LabelCounter["zone:z2"], Equals, 2) - c.Assert(stats.LabelCounter["zone:z3"], Equals, 2) - c.Assert(stats.LabelCounter["host:h1"], Equals, 4) - c.Assert(stats.LabelCounter["host:h2"], Equals, 4) - c.Assert(stats.LabelCounter["zone:unknown"], Equals, 2) + re.Equal(6, stats.Up) + re.Equal(7, stats.Preparing) + re.Equal(0, stats.Serving) + re.Equal(1, stats.Removing) + re.Equal(1, stats.Removed) + re.Equal(1, stats.Down) + re.Equal(1, stats.Offline) + re.Equal(0, stats.RegionCount) + re.Equal(0, stats.Unhealthy) + re.Equal(0, stats.Disconnect) + re.Equal(1, stats.Tombstone) + re.Equal(8, stats.LowSpace) + re.Equal(2, stats.LabelCounter["zone:z1"]) + re.Equal(2, stats.LabelCounter["zone:z2"]) + re.Equal(2, stats.LabelCounter["zone:z3"]) + re.Equal(4, stats.LabelCounter["host:h1"]) + re.Equal(4, stats.LabelCounter["host:h2"]) + re.Equal(2, stats.LabelCounter["zone:unknown"]) } diff --git a/server/statistics/store_test.go b/server/statistics/store_test.go index e3247ea1c46..89508be41b7 100644 --- a/server/statistics/store_test.go +++ b/server/statistics/store_test.go @@ -15,26 +15,24 @@ package statistics import ( + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testStoreSuite{}) - -type testStoreSuite struct{} - -func (s *testStoreSuite) TestFilterUnhealtyStore(c *C) { +func TestFilterUnhealtyStore(t *testing.T) { + re := require.New(t) stats := NewStoresStats() cluster := core.NewBasicCluster() for i := uint64(1); i <= 5; i++ { cluster.PutStore(core.NewStoreInfo(&metapb.Store{Id: i}, core.SetLastHeartbeatTS(time.Now()))) stats.Observe(i, &pdpb.StoreStats{}) } - c.Assert(stats.GetStoresLoads(), HasLen, 5) + re.Len(stats.GetStoresLoads(), 5) cluster.PutStore(cluster.GetStore(1).Clone(core.SetLastHeartbeatTS(time.Now().Add(-24 * time.Hour)))) cluster.PutStore(cluster.GetStore(2).Clone(core.TombstoneStore())) @@ -42,7 +40,7 @@ func (s *testStoreSuite) TestFilterUnhealtyStore(c *C) { stats.FilterUnhealthyStore(cluster) loads := stats.GetStoresLoads() - c.Assert(loads, HasLen, 2) - c.Assert(loads[4], NotNil) - c.Assert(loads[5], NotNil) + re.Len(loads, 2) + re.NotNil(loads[4]) + re.NotNil(loads[5]) } diff --git a/server/statistics/topn_test.go b/server/statistics/topn_test.go index 0bf1c4e4d21..fa9e4ebd5f1 100644 --- a/server/statistics/topn_test.go +++ b/server/statistics/topn_test.go @@ -17,15 +17,12 @@ package statistics import ( "math/rand" "sort" + "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testTopNSuite{}) - -type testTopNSuite struct{} - type item struct { id uint64 values []float64 @@ -39,21 +36,22 @@ func (it *item) Less(k int, than TopNItem) bool { return it.values[k] < than.(*item).values[k] } -func (s *testTopNSuite) TestPut(c *C) { +func TestPut(t *testing.T) { + re := require.New(t) const Total, N = 10000, 50 tn := NewTopN(DimLen, N, 1*time.Hour) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) + 1 }, false /*insert*/) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) }, true /*update*/) // check GetTopNMin for k := 0; k < DimLen; k++ { - c.Assert(tn.GetTopNMin(k).(*item).values[k], Equals, float64(1-N)) + re.Equal(float64(1-N), tn.GetTopNMin(k).(*item).values[k]) } { @@ -65,7 +63,7 @@ func (s *testTopNSuite) TestPut(c *C) { } // check update worked for i, v := range topns { - c.Assert(v, Equals, float64(-i)) + re.Equal(float64(-i), v) } } @@ -78,7 +76,7 @@ func (s *testTopNSuite) TestPut(c *C) { } // check update worked for i, v := range all { - c.Assert(v, Equals, float64(-i)) + re.Equal(float64(-i), v) } } @@ -96,19 +94,19 @@ func (s *testTopNSuite) TestPut(c *C) { } sort.Sort(sort.Reverse(sort.Float64Slice(all))) - c.Assert(topn, DeepEquals, all[:N]) + re.Equal(all[:N], topn) } } // check Get for i := uint64(0); i < Total; i++ { it := tn.Get(i).(*item) - c.Assert(it.id, Equals, i) - c.Assert(it.values[0], Equals, -float64(i)) + re.Equal(i, it.id) + re.Equal(-float64(i), it.values[0]) } } -func putPerm(c *C, tn *TopN, total int, f func(x int) float64, isUpdate bool) { +func putPerm(re *require.Assertions, tn *TopN, total int, f func(x int) float64, isUpdate bool) { { // insert dims := make([][]int, DimLen) for k := 0; k < DimLen; k++ { @@ -122,16 +120,17 @@ func putPerm(c *C, tn *TopN, total int, f func(x int) float64, isUpdate bool) { for k := 0; k < DimLen; k++ { item.values[k] = f(dims[k][i]) } - c.Assert(tn.Put(item), Equals, isUpdate) + re.Equal(isUpdate, tn.Put(item)) } } } -func (s *testTopNSuite) TestRemove(c *C) { +func TestRemove(t *testing.T) { + re := require.New(t) const Total, N = 10000, 50 tn := NewTopN(DimLen, N, 1*time.Hour) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) }, false /*insert*/) @@ -139,28 +138,28 @@ func (s *testTopNSuite) TestRemove(c *C) { for i := 0; i < Total; i++ { if i%3 != 0 { it := tn.Remove(uint64(i)).(*item) - c.Assert(it.id, Equals, uint64(i)) + re.Equal(uint64(i), it.id) } } // check Remove worked for i := 0; i < Total; i++ { if i%3 != 0 { - c.Assert(tn.Remove(uint64(i)), IsNil) + re.Nil(tn.Remove(uint64(i))) } } - c.Assert(tn.GetTopNMin(0).(*item).id, Equals, uint64(3*(N-1))) + re.Equal(uint64(3*(N-1)), tn.GetTopNMin(0).(*item).id) { topns := make([]float64, N) for _, it := range tn.GetAllTopN(0) { it := it.(*item) topns[it.id/3] = it.values[0] - c.Assert(it.id%3, Equals, uint64(0)) + re.Equal(uint64(0), it.id%3) } for i, v := range topns { - c.Assert(v, Equals, float64(-i*3)) + re.Equal(float64(-i*3), v) } } @@ -169,10 +168,10 @@ func (s *testTopNSuite) TestRemove(c *C) { for _, it := range tn.GetAll() { it := it.(*item) all[it.id/3] = it.values[0] - c.Assert(it.id%3, Equals, uint64(0)) + re.Equal(uint64(0), it.id%3) } for i, v := range all { - c.Assert(v, Equals, float64(-i*3)) + re.Equal(float64(-i*3), v) } } @@ -190,22 +189,23 @@ func (s *testTopNSuite) TestRemove(c *C) { } sort.Sort(sort.Reverse(sort.Float64Slice(all))) - c.Assert(topn, DeepEquals, all[:N]) + re.Equal(all[:N], topn) } } for i := uint64(0); i < Total; i += 3 { it := tn.Get(i).(*item) - c.Assert(it.id, Equals, i) - c.Assert(it.values[0], Equals, -float64(i)) + re.Equal(i, it.id) + re.Equal(-float64(i), it.values[0]) } } -func (s *testTopNSuite) TestTTL(c *C) { +func TestTTL(t *testing.T) { + re := require.New(t) const Total, N = 1000, 50 tn := NewTopN(DimLen, 50, 900*time.Millisecond) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) }, false /*insert*/) @@ -215,27 +215,27 @@ func (s *testTopNSuite) TestTTL(c *C) { for k := 1; k < DimLen; k++ { item.values = append(item.values, rand.NormFloat64()) } - c.Assert(tn.Put(item), IsTrue) + re.True(tn.Put(item)) } for i := 3; i < Total; i += 3 { item := &item{id: uint64(i), values: []float64{float64(-i) + 100}} for k := 1; k < DimLen; k++ { item.values = append(item.values, rand.NormFloat64()) } - c.Assert(tn.Put(item), IsFalse) + re.False(tn.Put(item)) } tn.RemoveExpired() - c.Assert(tn.Len(), Equals, Total/3+1) + re.Equal(Total/3+1, tn.Len()) items := tn.GetAllTopN(0) v := make([]float64, N) for _, it := range items { it := it.(*item) - c.Assert(it.id%3, Equals, uint64(0)) + re.Equal(uint64(0), it.id%3) v[it.id/3] = it.values[0] } for i, x := range v { - c.Assert(x, Equals, float64(-i*3)+100) + re.Equal(float64(-i*3)+100, x) } { // check all dimensions @@ -252,7 +252,7 @@ func (s *testTopNSuite) TestTTL(c *C) { } sort.Sort(sort.Reverse(sort.Float64Slice(all))) - c.Assert(topn, DeepEquals, all[:N]) + re.Equal(all[:N], topn) } } } From 5a64486d5ff9fbe860d65c3377f30a6f319b04a1 Mon Sep 17 00:00:00 2001 From: Yongbo Jiang Date: Mon, 13 Jun 2022 15:34:32 +0800 Subject: [PATCH 14/35] server: add limiter config and reload mechanism (#4842) ref tikv/pd#4666, ref tikv/pd#4839, ref tikv/pd#4869 update limiter config when reload presist config Signed-off-by: Cabinfever_B Co-authored-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/api/service_middleware.go | 26 +++- server/api/service_middleware_test.go | 111 +++++++++++++++++- server/config/service_middleware_config.go | 29 ++++- .../service_middleware_persist_options.go | 23 +++- server/server.go | 41 ++++++- tests/server/config/config_test.go | 105 +++++++++++++++++ 6 files changed, 322 insertions(+), 13 deletions(-) create mode 100644 tests/server/config/config_test.go diff --git a/server/api/service_middleware.go b/server/api/service_middleware.go index c136f8fbf4e..0f41f8ae725 100644 --- a/server/api/service_middleware.go +++ b/server/api/service_middleware.go @@ -103,8 +103,11 @@ func (h *serviceMiddlewareHandler) SetServiceMiddlewareConfig(w http.ResponseWri func (h *serviceMiddlewareHandler) updateServiceMiddlewareConfig(cfg *config.ServiceMiddlewareConfig, key string, value interface{}) error { kp := strings.Split(key, ".") - if kp[0] == "audit" { + switch kp[0] { + case "audit": return h.updateAudit(cfg, kp[len(kp)-1], value) + case "rate-limit": + return h.updateRateLimit(cfg, kp[len(kp)-1], value) } return errors.Errorf("config prefix %s not found", kp[0]) } @@ -129,3 +132,24 @@ func (h *serviceMiddlewareHandler) updateAudit(config *config.ServiceMiddlewareC } return err } + +func (h *serviceMiddlewareHandler) updateRateLimit(config *config.ServiceMiddlewareConfig, key string, value interface{}) error { + data, err := json.Marshal(map[string]interface{}{key: value}) + if err != nil { + return err + } + + updated, found, err := mergeConfig(&config.RateLimitConfig, data) + if err != nil { + return err + } + + if !found { + return errors.Errorf("config item %s not found", key) + } + + if updated { + err = h.svr.SetRateLimitConfig(config.RateLimitConfig) + } + return err +} diff --git a/server/api/service_middleware_test.go b/server/api/service_middleware_test.go index 3d29b23a693..a1d4804650c 100644 --- a/server/api/service_middleware_test.go +++ b/server/api/service_middleware_test.go @@ -21,20 +21,21 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/failpoint" + "github.com/tikv/pd/pkg/ratelimit" tu "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" ) -var _ = Suite(&testServiceMiddlewareSuite{}) +var _ = Suite(&testAuditMiddlewareSuite{}) -type testServiceMiddlewareSuite struct { +type testAuditMiddlewareSuite struct { svr *server.Server cleanup cleanUpFunc urlPrefix string } -func (s *testServiceMiddlewareSuite) SetUpSuite(c *C) { +func (s *testAuditMiddlewareSuite) SetUpSuite(c *C) { s.svr, s.cleanup = mustNewServer(c, func(cfg *config.Config) { cfg.Replication.EnablePlacementRules = false }) @@ -44,19 +45,24 @@ func (s *testServiceMiddlewareSuite) SetUpSuite(c *C) { s.urlPrefix = fmt.Sprintf("%s%s/api/v1", addr, apiPrefix) } -func (s *testServiceMiddlewareSuite) TearDownSuite(c *C) { +func (s *testAuditMiddlewareSuite) TearDownSuite(c *C) { s.cleanup() } -func (s *testServiceMiddlewareSuite) TestConfigAudit(c *C) { +func (s *testAuditMiddlewareSuite) TestConfigAuditSwitch(c *C) { addr := fmt.Sprintf("%s/service-middleware/config", s.urlPrefix) + + sc := &config.ServiceMiddlewareConfig{} + c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) + c.Assert(sc.EnableAudit, Equals, false) + ms := map[string]interface{}{ "enable-audit": "true", } postData, err := json.Marshal(ms) c.Assert(err, IsNil) c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(c)), IsNil) - sc := &config.ServiceMiddlewareConfig{} + sc = &config.ServiceMiddlewareConfig{} c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) c.Assert(sc.EnableAudit, Equals, true) ms = map[string]interface{}{ @@ -98,3 +104,96 @@ func (s *testServiceMiddlewareSuite) TestConfigAudit(c *C) { c.Assert(err, IsNil) c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "config item audit not found")), IsNil) } + +var _ = Suite(&testRateLimitConfigSuite{}) + +type testRateLimitConfigSuite struct { + svr *server.Server + cleanup cleanUpFunc + urlPrefix string +} + +func (s *testRateLimitConfigSuite) SetUpSuite(c *C) { + s.svr, s.cleanup = mustNewServer(c) + mustWaitLeader(c, []*server.Server{s.svr}) + mustBootstrapCluster(c, s.svr) + s.urlPrefix = fmt.Sprintf("%s%s/api/v1", s.svr.GetAddr(), apiPrefix) +} + +func (s *testRateLimitConfigSuite) TearDownSuite(c *C) { + s.cleanup() +} + +func (s *testRateLimitConfigSuite) TestConfigRateLimitSwitch(c *C) { + addr := fmt.Sprintf("%s/service-middleware/config", s.urlPrefix) + + sc := &config.ServiceMiddlewareConfig{} + c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) + c.Assert(sc.EnableRateLimit, Equals, false) + + ms := map[string]interface{}{ + "enable-rate-limit": "true", + } + postData, err := json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(c)), IsNil) + sc = &config.ServiceMiddlewareConfig{} + c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) + c.Assert(sc.EnableRateLimit, Equals, true) + ms = map[string]interface{}{ + "enable-rate-limit": "false", + } + postData, err = json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(c)), IsNil) + sc = &config.ServiceMiddlewareConfig{} + c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) + c.Assert(sc.EnableRateLimit, Equals, false) + + // test empty + ms = map[string]interface{}{} + postData, err = json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(c), tu.StringContain(c, "The input is empty.")), IsNil) + + ms = map[string]interface{}{ + "rate-limit": "false", + } + postData, err = json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "config item rate-limit not found")), IsNil) + + c.Assert(failpoint.Enable("github.com/tikv/pd/server/config/persistServiceMiddlewareFail", "return(true)"), IsNil) + ms = map[string]interface{}{ + "rate-limit.enable-rate-limit": "true", + } + postData, err = json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.Status(c, http.StatusBadRequest)), IsNil) + c.Assert(failpoint.Disable("github.com/tikv/pd/server/config/persistServiceMiddlewareFail"), IsNil) + + ms = map[string]interface{}{ + "rate-limit.rate-limit": "false", + } + postData, err = json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "config item rate-limit not found")), IsNil) +} + +func (s *testRateLimitConfigSuite) TestConfigLimiterConifgByOriginAPI(c *C) { + // this test case is used to test updating `limiter-config` by origin API simply + addr := fmt.Sprintf("%s/service-middleware/config", s.urlPrefix) + dimensionConfig := ratelimit.DimensionConfig{QPS: 1} + limiterConfig := map[string]interface{}{ + "CreateOperator": dimensionConfig, + } + ms := map[string]interface{}{ + "limiter-config": limiterConfig, + } + postData, err := json.Marshal(ms) + c.Assert(err, IsNil) + c.Assert(tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(c)), IsNil) + sc := &config.ServiceMiddlewareConfig{} + c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) + c.Assert(sc.RateLimitConfig.LimiterConfig["CreateOperator"].QPS, Equals, 1.) +} diff --git a/server/config/service_middleware_config.go b/server/config/service_middleware_config.go index d1b600ccaf2..38f51fce3fd 100644 --- a/server/config/service_middleware_config.go +++ b/server/config/service_middleware_config.go @@ -14,13 +14,17 @@ package config +import "github.com/tikv/pd/pkg/ratelimit" + const ( - defaultEnableAuditMiddleware = false + defaultEnableAuditMiddleware = false + defaultEnableRateLimitMiddleware = false ) // ServiceMiddlewareConfig is is the configuration for PD Service middleware. type ServiceMiddlewareConfig struct { - AuditConfig `json:"audit"` + AuditConfig `json:"audit"` + RateLimitConfig `json:"rate-limit"` } // NewServiceMiddlewareConfig returns a new service middleware config @@ -28,8 +32,13 @@ func NewServiceMiddlewareConfig() *ServiceMiddlewareConfig { audit := AuditConfig{ EnableAudit: defaultEnableAuditMiddleware, } + ratelimit := RateLimitConfig{ + EnableRateLimit: defaultEnableRateLimitMiddleware, + LimiterConfig: make(map[string]ratelimit.DimensionConfig), + } cfg := &ServiceMiddlewareConfig{ - AuditConfig: audit, + AuditConfig: audit, + RateLimitConfig: ratelimit, } return cfg } @@ -51,3 +60,17 @@ func (c *AuditConfig) Clone() *AuditConfig { cfg := *c return &cfg } + +// RateLimitConfig is the configuration for rate limit +type RateLimitConfig struct { + // EnableRateLimit controls the switch of the rate limit middleware + EnableRateLimit bool `json:"enable-rate-limit,string"` + // RateLimitConfig is the config of rate limit middleware + LimiterConfig map[string]ratelimit.DimensionConfig `json:"limiter-config"` +} + +// Clone returns a cloned rate limit config. +func (c *RateLimitConfig) Clone() *RateLimitConfig { + cfg := *c + return &cfg +} diff --git a/server/config/service_middleware_persist_options.go b/server/config/service_middleware_persist_options.go index 7fde025b8c1..20f8c110a5f 100644 --- a/server/config/service_middleware_persist_options.go +++ b/server/config/service_middleware_persist_options.go @@ -25,13 +25,15 @@ import ( // ServiceMiddlewarePersistOptions wraps all service middleware configurations that need to persist to storage and // allows to access them safely. type ServiceMiddlewarePersistOptions struct { - audit atomic.Value + audit atomic.Value + rateLimit atomic.Value } // NewServiceMiddlewarePersistOptions creates a new ServiceMiddlewarePersistOptions instance. func NewServiceMiddlewarePersistOptions(cfg *ServiceMiddlewareConfig) *ServiceMiddlewarePersistOptions { o := &ServiceMiddlewarePersistOptions{} o.audit.Store(&cfg.AuditConfig) + o.rateLimit.Store(&cfg.RateLimitConfig) return o } @@ -50,10 +52,26 @@ func (o *ServiceMiddlewarePersistOptions) IsAuditEnabled() bool { return o.GetAuditConfig().EnableAudit } +// GetRateLimitConfig returns pd service middleware configurations. +func (o *ServiceMiddlewarePersistOptions) GetRateLimitConfig() *RateLimitConfig { + return o.rateLimit.Load().(*RateLimitConfig) +} + +// SetRateLimitConfig sets the PD service middleware configuration. +func (o *ServiceMiddlewarePersistOptions) SetRateLimitConfig(cfg *RateLimitConfig) { + o.rateLimit.Store(cfg) +} + +// IsRateLimitEnabled returns whether rate limit middleware is enabled +func (o *ServiceMiddlewarePersistOptions) IsRateLimitEnabled() bool { + return o.GetRateLimitConfig().EnableRateLimit +} + // Persist saves the configuration to the storage. func (o *ServiceMiddlewarePersistOptions) Persist(storage endpoint.ServiceMiddlewareStorage) error { cfg := &ServiceMiddlewareConfig{ - AuditConfig: *o.GetAuditConfig(), + AuditConfig: *o.GetAuditConfig(), + RateLimitConfig: *o.GetRateLimitConfig(), } err := storage.SaveServiceMiddlewareConfig(cfg) failpoint.Inject("persistServiceMiddlewareFail", func() { @@ -72,6 +90,7 @@ func (o *ServiceMiddlewarePersistOptions) Reload(storage endpoint.ServiceMiddlew } if isExist { o.audit.Store(&cfg.AuditConfig) + o.rateLimit.Store(&cfg.RateLimitConfig) } return nil } diff --git a/server/server.go b/server/server.go index a63c8c52525..b618f97aeed 100644 --- a/server/server.go +++ b/server/server.go @@ -45,6 +45,7 @@ import ( "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/logutil" + "github.com/tikv/pd/pkg/ratelimit" "github.com/tikv/pd/pkg/syncutil" "github.com/tikv/pd/pkg/systimemon" "github.com/tikv/pd/pkg/typeutil" @@ -156,6 +157,7 @@ type Server struct { // the corresponding forwarding TSO channel. tsoDispatcher sync.Map /* Store as map[string]chan *tsoRequest */ + serviceRateLimiter *ratelimit.Limiter serviceLabels map[string][]apiutil.AccessPath apiServiceLabelMap map[apiutil.AccessPath]string @@ -258,6 +260,7 @@ func CreateServer(ctx context.Context, cfg *config.Config, serviceBuilders ...Ha audit.NewPrometheusHistogramBackend(serviceAuditHistogram, false), } s.serviceAuditBackendLabels = make(map[string]*audit.BackendLabels) + s.serviceRateLimiter = ratelimit.NewLimiter() s.serviceLabels = make(map[string][]apiutil.AccessPath) s.apiServiceLabelMap = make(map[apiutil.AccessPath]string) @@ -806,7 +809,8 @@ func (s *Server) GetMembers() ([]*pdpb.Member, error) { // GetServiceMiddlewareConfig gets the service middleware config information. func (s *Server) GetServiceMiddlewareConfig() *config.ServiceMiddlewareConfig { cfg := s.serviceMiddlewareCfg.Clone() - cfg.AuditConfig = *s.serviceMiddlewarePersistOptions.GetAuditConfig() + cfg.AuditConfig = *s.serviceMiddlewarePersistOptions.GetAuditConfig().Clone() + cfg.RateLimitConfig = *s.serviceMiddlewarePersistOptions.GetRateLimitConfig().Clone() return cfg } @@ -978,6 +982,27 @@ func (s *Server) SetAuditConfig(cfg config.AuditConfig) error { return nil } +// GetRateLimitConfig gets the rate limit config information. +func (s *Server) GetRateLimitConfig() *config.RateLimitConfig { + return s.serviceMiddlewarePersistOptions.GetRateLimitConfig().Clone() +} + +// SetRateLimitConfig sets the rate limit config. +func (s *Server) SetRateLimitConfig(cfg config.RateLimitConfig) error { + old := s.serviceMiddlewarePersistOptions.GetRateLimitConfig() + s.serviceMiddlewarePersistOptions.SetRateLimitConfig(&cfg) + if err := s.serviceMiddlewarePersistOptions.Persist(s.storage); err != nil { + s.serviceMiddlewarePersistOptions.SetRateLimitConfig(old) + log.Error("failed to update Rate Limit config", + zap.Reflect("new", cfg), + zap.Reflect("old", old), + errs.ZapError(err)) + return err + } + log.Info("Rate Limit config is updated", zap.Reflect("new", cfg), zap.Reflect("old", old)) + return nil +} + // GetPDServerConfig gets the balance config information. func (s *Server) GetPDServerConfig() *config.PDServerConfig { return s.persistOptions.GetPDServerConfig().Clone() @@ -1195,6 +1220,11 @@ func (s *Server) SetServiceAuditBackendLabels(serviceLabel string, labels []stri s.serviceAuditBackendLabels[serviceLabel] = &audit.BackendLabels{Labels: labels} } +// GetServiceRateLimiter is used to get rate limiter +func (s *Server) GetServiceRateLimiter() *ratelimit.Limiter { + return s.serviceRateLimiter +} + // GetClusterStatus gets cluster status. func (s *Server) GetClusterStatus() (*cluster.Status, error) { s.cluster.Lock() @@ -1450,6 +1480,7 @@ func (s *Server) reloadConfigFromKV() error { if err != nil { return err } + s.loadRateLimitConfig() switchableStorage, ok := s.storage.(interface { SwitchToRegionStorage() SwitchToDefaultStorage() @@ -1467,6 +1498,14 @@ func (s *Server) reloadConfigFromKV() error { return nil } +func (s *Server) loadRateLimitConfig() { + cfg := s.serviceMiddlewarePersistOptions.GetRateLimitConfig().LimiterConfig + for key := range cfg { + value := cfg[key] + s.serviceRateLimiter.Update(key, ratelimit.UpdateDimensionConfig(&value)) + } +} + // ReplicateFileToMember is used to synchronize state to a member. // Each member will write `data` to a local file named `name`. // For security reason, data should be in JSON format. diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go new file mode 100644 index 00000000000..f375397a9f3 --- /dev/null +++ b/tests/server/config/config_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "testing" + + . "github.com/pingcap/check" + "github.com/tikv/pd/pkg/ratelimit" + "github.com/tikv/pd/pkg/testutil" + "github.com/tikv/pd/server" + "github.com/tikv/pd/tests" +) + +// dialClient used to dial http request. +var dialClient = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + }, +} + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testConfigPresistSuite{}) + +type testConfigPresistSuite struct { + cleanup func() + cluster *tests.TestCluster +} + +func (s *testConfigPresistSuite) SetUpSuite(c *C) { + ctx, cancel := context.WithCancel(context.Background()) + s.cleanup = cancel + cluster, err := tests.NewTestCluster(ctx, 3) + c.Assert(err, IsNil) + c.Assert(cluster.RunInitialServers(), IsNil) + c.Assert(cluster.WaitLeader(), Not(HasLen), 0) + s.cluster = cluster +} + +func (s *testConfigPresistSuite) TearDownSuite(c *C) { + s.cleanup() + s.cluster.Destroy() +} + +func (s *testConfigPresistSuite) TestRateLimitConfigReload(c *C) { + leader := s.cluster.GetServer(s.cluster.GetLeader()) + + c.Assert(leader.GetServer().GetServiceMiddlewareConfig().RateLimitConfig.LimiterConfig, HasLen, 0) + limitCfg := make(map[string]ratelimit.DimensionConfig) + limitCfg["GetRegions"] = ratelimit.DimensionConfig{QPS: 1} + + input := map[string]interface{}{ + "enable-rate-limit": "true", + "limiter-config": limitCfg, + } + data, err := json.Marshal(input) + c.Assert(err, IsNil) + req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) + resp, err := dialClient.Do(req) + c.Assert(err, IsNil) + resp.Body.Close() + c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled(), Equals, true) + c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().GetRateLimitConfig().LimiterConfig, HasLen, 1) + + oldLeaderName := leader.GetServer().Name() + leader.GetServer().GetMember().ResignEtcdLeader(leader.GetServer().Context(), oldLeaderName, "") + mustWaitLeader(c, s.cluster.GetServers()) + leader = s.cluster.GetServer(s.cluster.GetLeader()) + + c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled(), Equals, true) + c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().GetRateLimitConfig().LimiterConfig, HasLen, 1) +} + +func mustWaitLeader(c *C, svrs map[string]*tests.TestServer) *server.Server { + var leader *server.Server + testutil.WaitUntil(c, func() bool { + for _, s := range svrs { + if !s.GetServer().IsClosed() && s.GetServer().GetMember().IsLeader() { + leader = s.GetServer() + return true + } + } + return false + }) + return leader +} From 6a266ed7da492d77fab50c1aa3b2ae5ee8fd91c1 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Tue, 14 Jun 2022 11:34:33 +0800 Subject: [PATCH 15/35] tests: use `T.TempDir` to create temporary test directory (#5153) close tikv/pd#5152 Signed-off-by: Eng Zer Jun --- pkg/encryption/master_key_test.go | 18 ++-- pkg/etcdutil/etcdutil.go | 12 +-- pkg/etcdutil/etcdutil_test.go | 12 +-- server/election/leadership_test.go | 3 +- server/election/lease_test.go | 3 +- server/encryptionkm/key_manager_test.go | 123 +++++++++--------------- server/region_syncer/client_test.go | 9 +- server/storage/kv/kv_test.go | 17 +--- tests/client/client_tls_test.go | 18 ++-- 9 files changed, 71 insertions(+), 144 deletions(-) diff --git a/pkg/encryption/master_key_test.go b/pkg/encryption/master_key_test.go index b8d5657c1fc..79a6bb390d9 100644 --- a/pkg/encryption/master_key_test.go +++ b/pkg/encryption/master_key_test.go @@ -99,8 +99,7 @@ func TestNewFileMasterKeyMissingPath(t *testing.T) { func TestNewFileMasterKeyMissingFile(t *testing.T) { t.Parallel() re := require.New(t) - dir, err := os.MkdirTemp("", "test_key_files") - re.NoError(err) + dir := t.TempDir() path := dir + "/key" config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ @@ -109,15 +108,14 @@ func TestNewFileMasterKeyMissingFile(t *testing.T) { }, }, } - _, err = NewMasterKey(config, nil) + _, err := NewMasterKey(config, nil) re.Error(err) } func TestNewFileMasterKeyNotHexString(t *testing.T) { t.Parallel() re := require.New(t) - dir, err := os.MkdirTemp("", "test_key_files") - re.NoError(err) + dir := t.TempDir() path := dir + "/key" os.WriteFile(path, []byte("not-a-hex-string"), 0600) config := &encryptionpb.MasterKey{ @@ -127,15 +125,14 @@ func TestNewFileMasterKeyNotHexString(t *testing.T) { }, }, } - _, err = NewMasterKey(config, nil) + _, err := NewMasterKey(config, nil) re.Error(err) } func TestNewFileMasterKeyLengthMismatch(t *testing.T) { t.Parallel() re := require.New(t) - dir, err := os.MkdirTemp("", "test_key_files") - re.NoError(err) + dir := t.TempDir() path := dir + "/key" os.WriteFile(path, []byte("2f07ec61e5a50284f47f2b402a962ec6"), 0600) config := &encryptionpb.MasterKey{ @@ -145,7 +142,7 @@ func TestNewFileMasterKeyLengthMismatch(t *testing.T) { }, }, } - _, err = NewMasterKey(config, nil) + _, err := NewMasterKey(config, nil) re.Error(err) } @@ -153,8 +150,7 @@ func TestNewFileMasterKey(t *testing.T) { t.Parallel() re := require.New(t) key := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" - dir, err := os.MkdirTemp("", "test_key_files") - re.NoError(err) + dir := t.TempDir() path := dir + "/key" os.WriteFile(path, []byte(key), 0600) config := &encryptionpb.MasterKey{ diff --git a/pkg/etcdutil/etcdutil.go b/pkg/etcdutil/etcdutil.go index ff5ffce9226..9f81b11aaca 100644 --- a/pkg/etcdutil/etcdutil.go +++ b/pkg/etcdutil/etcdutil.go @@ -20,7 +20,7 @@ import ( "fmt" "net/http" "net/url" - "os" + "testing" "time" "github.com/gogo/protobuf/proto" @@ -182,10 +182,10 @@ func EtcdKVPutWithTTL(ctx context.Context, c *clientv3.Client, key string, value } // NewTestSingleConfig is used to create a etcd config for the unit test purpose. -func NewTestSingleConfig() *embed.Config { +func NewTestSingleConfig(t *testing.T) *embed.Config { cfg := embed.NewConfig() cfg.Name = "test_etcd" - cfg.Dir, _ = os.MkdirTemp("/tmp", "test_etcd") + cfg.Dir = t.TempDir() cfg.WalDir = "" cfg.Logger = "zap" cfg.LogOutputs = []string{"stdout"} @@ -202,9 +202,3 @@ func NewTestSingleConfig() *embed.Config { cfg.ClusterState = embed.ClusterStateFlagNew return cfg } - -// CleanConfig is used to clean the etcd data for the unit test purpose. -func CleanConfig(cfg *embed.Config) { - // Clean data directory - os.RemoveAll(cfg.Dir) -} diff --git a/pkg/etcdutil/etcdutil_test.go b/pkg/etcdutil/etcdutil_test.go index bbb8e595c32..942e66d3239 100644 --- a/pkg/etcdutil/etcdutil_test.go +++ b/pkg/etcdutil/etcdutil_test.go @@ -30,11 +30,10 @@ import ( func TestMemberHelpers(t *testing.T) { t.Parallel() re := require.New(t) - cfg1 := NewTestSingleConfig() + cfg1 := NewTestSingleConfig(t) etcd1, err := embed.StartEtcd(cfg1) defer func() { etcd1.Close() - CleanConfig(cfg1) }() re.NoError(err) @@ -55,7 +54,7 @@ func TestMemberHelpers(t *testing.T) { // Test AddEtcdMember // Make a new etcd config. - cfg2 := NewTestSingleConfig() + cfg2 := NewTestSingleConfig(t) cfg2.Name = "etcd2" cfg2.InitialCluster = cfg1.InitialCluster + fmt.Sprintf(",%s=%s", cfg2.Name, &cfg2.LPUrls[0]) cfg2.ClusterState = embed.ClusterStateFlagExisting @@ -68,7 +67,6 @@ func TestMemberHelpers(t *testing.T) { etcd2, err := embed.StartEtcd(cfg2) defer func() { etcd2.Close() - CleanConfig(cfg2) }() re.NoError(err) re.Equal(uint64(etcd2.Server.ID()), addResp.Member.ID) @@ -113,11 +111,10 @@ func TestMemberHelpers(t *testing.T) { func TestEtcdKVGet(t *testing.T) { t.Parallel() re := require.New(t) - cfg := NewTestSingleConfig() + cfg := NewTestSingleConfig(t) etcd, err := embed.StartEtcd(cfg) defer func() { etcd.Close() - CleanConfig(cfg) }() re.NoError(err) @@ -165,11 +162,10 @@ func TestEtcdKVGet(t *testing.T) { func TestEtcdKVPutWithTTL(t *testing.T) { t.Parallel() re := require.New(t) - cfg := NewTestSingleConfig() + cfg := NewTestSingleConfig(t) etcd, err := embed.StartEtcd(cfg) defer func() { etcd.Close() - CleanConfig(cfg) }() re.NoError(err) diff --git a/server/election/leadership_test.go b/server/election/leadership_test.go index 9a4b52f782e..4b3663a2ff6 100644 --- a/server/election/leadership_test.go +++ b/server/election/leadership_test.go @@ -29,11 +29,10 @@ const defaultLeaseTimeout = 1 func TestLeadership(t *testing.T) { re := require.New(t) - cfg := etcdutil.NewTestSingleConfig() + cfg := etcdutil.NewTestSingleConfig(t) etcd, err := embed.StartEtcd(cfg) defer func() { etcd.Close() - etcdutil.CleanConfig(cfg) }() re.NoError(err) diff --git a/server/election/lease_test.go b/server/election/lease_test.go index ef8c12be2e9..6298c22f0f2 100644 --- a/server/election/lease_test.go +++ b/server/election/lease_test.go @@ -27,11 +27,10 @@ import ( func TestLease(t *testing.T) { re := require.New(t) - cfg := etcdutil.NewTestSingleConfig() + cfg := etcdutil.NewTestSingleConfig(t) etcd, err := embed.StartEtcd(cfg) defer func() { etcd.Close() - etcdutil.CleanConfig(cfg) }() re.NoError(err) diff --git a/server/encryptionkm/key_manager_test.go b/server/encryptionkm/key_manager_test.go index 5e0d864942c..3ca8bb320d4 100644 --- a/server/encryptionkm/key_manager_test.go +++ b/server/encryptionkm/key_manager_test.go @@ -20,6 +20,7 @@ import ( "fmt" "net/url" "os" + "path/filepath" "sync/atomic" "testing" "time" @@ -48,10 +49,10 @@ func getTestDataKey() []byte { return key } -func newTestEtcd(re *require.Assertions) (client *clientv3.Client, cleanup func()) { +func newTestEtcd(t *testing.T, re *require.Assertions) (client *clientv3.Client) { cfg := embed.NewConfig() cfg.Name = "test_etcd" - cfg.Dir, _ = os.MkdirTemp("/tmp", "test_etcd") + cfg.Dir = t.TempDir() cfg.Logger = "zap" pu, err := url.Parse(tempurl.Alloc()) re.NoError(err) @@ -72,31 +73,25 @@ func newTestEtcd(re *require.Assertions) (client *clientv3.Client, cleanup func( }) re.NoError(err) - cleanup = func() { + t.Cleanup(func() { client.Close() server.Close() - os.RemoveAll(cfg.Dir) - } + }) - return client, cleanup + return client } -func newTestKeyFile(re *require.Assertions, key ...string) (keyFilePath string, cleanup func()) { +func newTestKeyFile(t *testing.T, re *require.Assertions, key ...string) (keyFilePath string) { testKey := testMasterKey for _, k := range key { testKey = k } - tempDir, err := os.MkdirTemp("/tmp", "test_key_file") - re.NoError(err) - keyFilePath = tempDir + "/key" - err = os.WriteFile(keyFilePath, []byte(testKey), 0600) - re.NoError(err) - cleanup = func() { - os.RemoveAll(tempDir) - } + keyFilePath = filepath.Join(t.TempDir(), "key") + err := os.WriteFile(keyFilePath, []byte(testKey), 0600) + re.NoError(err) - return keyFilePath, cleanup + return keyFilePath } func newTestLeader(re *require.Assertions, client *clientv3.Client) *election.Leadership { @@ -118,8 +113,7 @@ func checkMasterKeyMeta(re *require.Assertions, value []byte, meta *encryptionpb func TestNewKeyManagerBasic(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() + client := newTestEtcd(t, re) // Use default config. config := &encryption.Config{} err := config.Adjust() @@ -141,10 +135,8 @@ func TestNewKeyManagerBasic(t *testing.T) { func TestNewKeyManagerWithCustomConfig(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) // Custom config rotatePeriod, err := time.ParseDuration("100h") re.NoError(err) @@ -181,10 +173,8 @@ func TestNewKeyManagerWithCustomConfig(t *testing.T) { func TestNewKeyManagerLoadKeys(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Use default config. config := &encryption.Config{} @@ -224,8 +214,7 @@ func TestNewKeyManagerLoadKeys(t *testing.T) { func TestGetCurrentKey(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() + client := newTestEtcd(t, re) // Use default config. config := &encryption.Config{} err := config.Adjust() @@ -268,10 +257,8 @@ func TestGetCurrentKey(t *testing.T) { func TestGetKey(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Store initial keys in etcd. masterKeyMeta := newMasterKey(keyFile) @@ -324,10 +311,8 @@ func TestGetKey(t *testing.T) { func TestLoadKeyEmpty(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Store initial keys in etcd. masterKeyMeta := newMasterKey(keyFile) @@ -362,10 +347,8 @@ func TestWatcher(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -440,8 +423,7 @@ func TestWatcher(t *testing.T) { func TestSetLeadershipWithEncryptionOff(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() + client := newTestEtcd(t, re) // Use default config. config := &encryption.Config{} err := config.Adjust() @@ -466,10 +448,8 @@ func TestSetLeadershipWithEncryptionEnabling(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -521,10 +501,8 @@ func TestSetLeadershipWithEncryptionMethodChanged(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -599,10 +577,8 @@ func TestSetLeadershipWithCurrentKeyExposed(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -672,10 +648,8 @@ func TestSetLeadershipWithCurrentKeyExpired(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -749,12 +723,9 @@ func TestSetLeadershipWithMasterKeyChanged(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() - keyFile2, cleanupKeyFile2 := newTestKeyFile(re, testMasterKey2) - defer cleanupKeyFile2() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) + keyFile2 := newTestKeyFile(t, re, testMasterKey2) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -817,10 +788,8 @@ func TestSetLeadershipWithMasterKeyChanged(t *testing.T) { func TestSetLeadershipMasterKeyWithCiphertextKey(t *testing.T) { re := require.New(t) // Initialize. - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -897,10 +866,8 @@ func TestSetLeadershipWithEncryptionDisabling(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -955,10 +922,8 @@ func TestKeyRotation(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() @@ -1053,10 +1018,8 @@ func TestKeyRotationConflict(t *testing.T) { // Initialize. ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, cleanupEtcd := newTestEtcd(re) - defer cleanupEtcd() - keyFile, cleanupKeyFile := newTestKeyFile(re) - defer cleanupKeyFile() + client := newTestEtcd(t, re) + keyFile := newTestKeyFile(t, re) leadership := newTestLeader(re, client) // Setup helper helper := defaultKeyManagerHelper() diff --git a/server/region_syncer/client_test.go b/server/region_syncer/client_test.go index b63deaae3e0..80185e86f94 100644 --- a/server/region_syncer/client_test.go +++ b/server/region_syncer/client_test.go @@ -16,7 +16,6 @@ package syncer import ( "context" - "os" "testing" "time" @@ -34,9 +33,7 @@ import ( // For issue https://github.com/tikv/pd/issues/3936 func TestLoadRegion(t *testing.T) { re := require.New(t) - tempDir, err := os.MkdirTemp(os.TempDir(), "region_syncer_load_region") - re.NoError(err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil) re.NoError(err) @@ -64,9 +61,7 @@ func TestLoadRegion(t *testing.T) { func TestErrorCode(t *testing.T) { re := require.New(t) - tempDir, err := os.MkdirTemp(os.TempDir(), "region_syncer_err") - re.NoError(err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() rs, err := storage.NewStorageWithLevelDBBackend(context.Background(), tempDir, nil) re.NoError(err) server := &mockServer{ diff --git a/server/storage/kv/kv_test.go b/server/storage/kv/kv_test.go index 88bac9b279f..ac2911036aa 100644 --- a/server/storage/kv/kv_test.go +++ b/server/storage/kv/kv_test.go @@ -17,7 +17,6 @@ package kv import ( "fmt" "net/url" - "os" "path" "sort" "strconv" @@ -31,8 +30,7 @@ import ( func TestEtcd(t *testing.T) { re := require.New(t) - cfg := newTestSingleConfig() - defer cleanConfig(cfg) + cfg := newTestSingleConfig(t) etcd, err := embed.StartEtcd(cfg) re.NoError(err) defer etcd.Close() @@ -51,9 +49,7 @@ func TestEtcd(t *testing.T) { func TestLevelDB(t *testing.T) { re := require.New(t) - dir, err := os.MkdirTemp("/tmp", "leveldb_kv") - re.NoError(err) - defer os.RemoveAll(dir) + dir := t.TempDir() kv, err := NewLevelDBKV(dir) re.NoError(err) @@ -121,10 +117,10 @@ func testRange(re *require.Assertions, kv Base) { } } -func newTestSingleConfig() *embed.Config { +func newTestSingleConfig(t *testing.T) *embed.Config { cfg := embed.NewConfig() cfg.Name = "test_etcd" - cfg.Dir, _ = os.MkdirTemp("/tmp", "test_etcd") + cfg.Dir = t.TempDir() cfg.WalDir = "" cfg.Logger = "zap" cfg.LogOutputs = []string{"stdout"} @@ -141,8 +137,3 @@ func newTestSingleConfig() *embed.Config { cfg.ClusterState = embed.ClusterStateFlagNew return cfg } - -func cleanConfig(cfg *embed.Config) { - // Clean data directory - os.RemoveAll(cfg.Dir) -} diff --git a/tests/client/client_tls_test.go b/tests/client/client_tls_test.go index 48a6fec3d2d..997abbf3f35 100644 --- a/tests/client/client_tls_test.go +++ b/tests/client/client_tls_test.go @@ -62,28 +62,22 @@ func TestTLSReloadAtomicReplace(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tmpDir, err := os.MkdirTemp(os.TempDir(), "cert-tmp") - re.NoError(err) + tmpDir := t.TempDir() os.RemoveAll(tmpDir) - defer os.RemoveAll(tmpDir) - certsDir, err := os.MkdirTemp(os.TempDir(), "cert-to-load") - re.NoError(err) - defer os.RemoveAll(certsDir) + certsDir := t.TempDir() - certsDirExp, err := os.MkdirTemp(os.TempDir(), "cert-expired") - re.NoError(err) - defer os.RemoveAll(certsDirExp) + certsDirExp := t.TempDir() cloneFunc := func() transport.TLSInfo { tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) re.NoError(terr) - _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp) + _, err := copyTLSFiles(testTLSInfoExpired, certsDirExp) re.NoError(err) return tlsInfo } replaceFunc := func() { - err = os.Rename(certsDir, tmpDir) + err := os.Rename(certsDir, tmpDir) re.NoError(err) err = os.Rename(certsDirExp, certsDir) re.NoError(err) @@ -93,7 +87,7 @@ func TestTLSReloadAtomicReplace(t *testing.T) { // 'certsDirExp' does not exist } revertFunc := func() { - err = os.Rename(tmpDir, certsDirExp) + err := os.Rename(tmpDir, certsDirExp) re.NoError(err) err = os.Rename(certsDir, tmpDir) From e74e2771d5cec80c7fffc705e75a6ae54e8a5122 Mon Sep 17 00:00:00 2001 From: buffer <1045931706@qq.com> Date: Tue, 14 Jun 2022 11:44:33 +0800 Subject: [PATCH 16/35] config: the defualt value of `max-merge-region-keys` is related with `max-merge-region-size` (#5084) close tikv/pd#5083 Signed-off-by: bufferflies <1045931706@qq.com> Co-authored-by: Ti Chi Robot --- server/api/config.go | 8 ++++++-- server/api/config_test.go | 13 +++++++++++++ server/config/config.go | 13 +++++++++---- server/config/config_test.go | 7 ++++--- server/config/persist_options.go | 11 ++++++++++- tests/pdctl/config/config_test.go | 25 +++++++++++++++++++++++-- 6 files changed, 65 insertions(+), 12 deletions(-) diff --git a/server/api/config.go b/server/api/config.go index de87947b785..d4d90735289 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -54,7 +54,9 @@ func newConfHandler(svr *server.Server, rd *render.Render) *confHandler { // @Success 200 {object} config.Config // @Router /config [get] func (h *confHandler) GetConfig(w http.ResponseWriter, r *http.Request) { - h.rd.JSON(w, http.StatusOK, h.svr.GetConfig()) + cfg := h.svr.GetConfig() + cfg.Schedule.MaxMergeRegionKeys = cfg.Schedule.GetMaxMergeRegionKeys() + h.rd.JSON(w, http.StatusOK, cfg) } // @Tags config @@ -309,7 +311,9 @@ func mergeConfig(v interface{}, data []byte) (updated bool, found bool, err erro // @Success 200 {object} config.ScheduleConfig // @Router /config/schedule [get] func (h *confHandler) GetScheduleConfig(w http.ResponseWriter, r *http.Request) { - h.rd.JSON(w, http.StatusOK, h.svr.GetScheduleConfig()) + cfg := h.svr.GetScheduleConfig() + cfg.MaxMergeRegionKeys = cfg.GetMaxMergeRegionKeys() + h.rd.JSON(w, http.StatusOK, cfg) } // @Tags config diff --git a/server/api/config_test.go b/server/api/config_test.go index 271849ce223..7abfafd04a6 100644 --- a/server/api/config_test.go +++ b/server/api/config_test.go @@ -370,6 +370,19 @@ func (s *testConfigSuite) TestConfigTTL(c *C) { err = tu.CheckPostJSON(testDialClient, createTTLUrl(s.urlPrefix, 1), postData, tu.StatusNotOK(c), tu.StringEqual(c, "\"unsupported ttl config schedule.invalid-ttl-config\"\n")) c.Assert(err, IsNil) + + // only set max-merge-region-size + mergeConfig := map[string]interface{}{ + "schedule.max-merge-region-size": 999, + } + postData, err = json.Marshal(mergeConfig) + c.Assert(err, IsNil) + + err = tu.CheckPostJSON(testDialClient, createTTLUrl(s.urlPrefix, 1), postData, tu.StatusOK(c)) + c.Assert(err, IsNil) + c.Assert(s.svr.GetPersistOptions().GetMaxMergeRegionSize(), Equals, uint64(999)) + // max-merge-region-keys should keep consistence with max-merge-region-size. + c.Assert(s.svr.GetPersistOptions().GetMaxMergeRegionKeys(), Equals, uint64(999*10000)) } func (s *testConfigSuite) TestTTLConflict(c *C) { diff --git a/server/config/config.go b/server/config/config.go index df833594f74..98d3ddbe66d 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -785,7 +785,6 @@ const ( defaultMaxSnapshotCount = 64 defaultMaxPendingPeerCount = 64 defaultMaxMergeRegionSize = 20 - defaultMaxMergeRegionKeys = 200000 defaultSplitMergeInterval = 1 * time.Hour defaultPatrolRegionInterval = 10 * time.Millisecond defaultMaxStoreDownTime = 30 * time.Minute @@ -822,9 +821,6 @@ func (c *ScheduleConfig) adjust(meta *configMetaData, reloading bool) error { if !meta.IsDefined("max-merge-region-size") { adjustUint64(&c.MaxMergeRegionSize, defaultMaxMergeRegionSize) } - if !meta.IsDefined("max-merge-region-keys") { - adjustUint64(&c.MaxMergeRegionKeys, defaultMaxMergeRegionKeys) - } adjustDuration(&c.SplitMergeInterval, defaultSplitMergeInterval) adjustDuration(&c.PatrolRegionInterval, defaultPatrolRegionInterval) adjustDuration(&c.MaxStoreDownTime, defaultMaxStoreDownTime) @@ -910,6 +906,15 @@ func (c *ScheduleConfig) migrateConfigurationMap() map[string][2]*bool { } } +// GetMaxMergeRegionKeys returns the max merge keys. +// it should keep consistent with tikv: https://github.com/tikv/tikv/pull/12484 +func (c *ScheduleConfig) GetMaxMergeRegionKeys() uint64 { + if keys := c.MaxMergeRegionKeys; keys != 0 { + return keys + } + return c.MaxMergeRegionSize * 10000 +} + func (c *ScheduleConfig) parseDeprecatedFlag(meta *configMetaData, name string, old, new bool) (bool, error) { oldName, newName := "disable-"+name, "enable-"+name defineOld, defineNew := meta.IsDefined(oldName), meta.IsDefined(newName) diff --git a/server/config/config_test.go b/server/config/config_test.go index 032c0526739..fe44ee619ee 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -189,13 +189,14 @@ leader-schedule-limit = 0 re.Equal(defaultLeaderLease, cfg.LeaderLease) re.Equal(uint(20000000), cfg.MaxRequestBytes) // When defined, use values from config file. + re.Equal(0*10000, int(cfg.Schedule.GetMaxMergeRegionKeys())) re.Equal(uint64(0), cfg.Schedule.MaxMergeRegionSize) re.True(cfg.Schedule.EnableOneWayMerge) re.Equal(uint64(0), cfg.Schedule.LeaderScheduleLimit) // When undefined, use default values. re.True(cfg.PreVote) re.Equal("info", cfg.Log.Level) - re.Equal(uint64(defaultMaxMergeRegionKeys), cfg.Schedule.MaxMergeRegionKeys) + re.Equal(uint64(0), cfg.Schedule.MaxMergeRegionKeys) re.Equal("http://127.0.0.1:9090", cfg.PDServerCfg.MetricStorage) re.Equal(DefaultTSOUpdatePhysicalInterval, cfg.TSOUpdatePhysicalInterval.Duration) @@ -208,6 +209,7 @@ lease = 0 [schedule] type = "random-merge" +max-merge-region-keys = 400000 ` cfg = NewConfig() meta, err = toml.Decode(cfgData, &cfg) @@ -215,7 +217,7 @@ type = "random-merge" err = cfg.Adjust(&meta, false) re.NoError(err) re.Contains(cfg.WarningMsgs[0], "Config contains undefined item") - + re.Equal(40*10000, int(cfg.Schedule.GetMaxMergeRegionKeys())) // Check misspelled schedulers name cfgData = ` name = "" @@ -229,7 +231,6 @@ type = "random-merge-schedulers" re.NoError(err) err = cfg.Adjust(&meta, false) re.Error(err) - // Check correct schedulers name cfgData = ` name = "" diff --git a/server/config/persist_options.go b/server/config/persist_options.go index fe7203722c2..643e20a3087 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -244,8 +244,17 @@ func (o *PersistOptions) GetMaxMergeRegionSize() uint64 { } // GetMaxMergeRegionKeys returns the max number of keys. +// It returns size * 10000 if the key of max-merge-region-Keys doesn't exist. func (o *PersistOptions) GetMaxMergeRegionKeys() uint64 { - return o.getTTLUintOr(maxMergeRegionKeysKey, o.GetScheduleConfig().MaxMergeRegionKeys) + keys, exist, err := o.getTTLUint(maxMergeRegionKeysKey) + if exist && err == nil { + return keys + } + size, exist, err := o.getTTLUint(maxMergeRegionSizeKey) + if exist && err == nil { + return size * 10000 + } + return o.GetScheduleConfig().GetMaxMergeRegionKeys() } // GetSplitMergeInterval returns the interval between finishing split and starting to merge. diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index cb564699b53..311a0e7db99 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -97,7 +97,9 @@ func (s *configTestSuite) TestConfig(c *C) { scheduleConfig.EnableRemoveExtraReplica = false scheduleConfig.EnableLocationReplacement = false scheduleConfig.StoreLimitMode = "" - + c.Assert(scheduleConfig.MaxMergeRegionKeys, Equals, uint64(0)) + // The result of config show doesn't be 0. + scheduleConfig.MaxMergeRegionKeys = scheduleConfig.GetMaxMergeRegionKeys() c.Assert(&cfg.Schedule, DeepEquals, scheduleConfig) c.Assert(&cfg.Replication, DeepEquals, svr.GetReplicationConfig()) @@ -122,7 +124,26 @@ func (s *configTestSuite) TestConfig(c *C) { c.Assert(err, IsNil) scheduleCfg := config.ScheduleConfig{} c.Assert(json.Unmarshal(output, &scheduleCfg), IsNil) - c.Assert(&scheduleCfg, DeepEquals, svr.GetScheduleConfig()) + scheduleConfig = svr.GetScheduleConfig() + scheduleConfig.MaxMergeRegionKeys = scheduleConfig.GetMaxMergeRegionKeys() + c.Assert(&scheduleCfg, DeepEquals, scheduleConfig) + + c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionSize), Equals, 20) + c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionKeys), Equals, 0) + c.Assert(int(svr.GetScheduleConfig().GetMaxMergeRegionKeys()), Equals, 20*10000) + + // set max-merge-region-size to 40MB + args = []string{"-u", pdAddr, "config", "set", "max-merge-region-size", "40"} + _, err = pdctl.ExecuteCommand(cmd, args...) + c.Assert(err, IsNil) + c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionSize), Equals, 40) + c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionKeys), Equals, 0) + c.Assert(int(svr.GetScheduleConfig().GetMaxMergeRegionKeys()), Equals, 40*10000) + args = []string{"-u", pdAddr, "config", "set", "max-merge-region-keys", "200000"} + _, err = pdctl.ExecuteCommand(cmd, args...) + c.Assert(err, IsNil) + c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionKeys), Equals, 20*10000) + c.Assert(int(svr.GetScheduleConfig().GetMaxMergeRegionKeys()), Equals, 20*10000) // config show replication args = []string{"-u", pdAddr, "config", "show", "replication"} From 74661fad2e90a312622cff4368bff5e45f24d632 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 14 Jun 2022 14:40:33 +0800 Subject: [PATCH 17/35] tools: migrate test framework to testify (#5149) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- .gitignore | 1 + tools/pd-analysis/analysis/parse_log_test.go | 43 ++++++++--------- .../analysis/transfer_counter_test.go | 35 +++++++------- tools/pd-ctl/pdctl/ctl_test.go | 19 +++----- .../simulator/simutil/key_test.go | 46 ++++++++----------- 5 files changed, 64 insertions(+), 80 deletions(-) diff --git a/.gitignore b/.gitignore index fbe6a8595a8..93e6189a687 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ package.list report.xml coverage.xml coverage +*.txt diff --git a/tools/pd-analysis/analysis/parse_log_test.go b/tools/pd-analysis/analysis/parse_log_test.go index 475e3ae7797..ffdcb2137c0 100644 --- a/tools/pd-analysis/analysis/parse_log_test.go +++ b/tools/pd-analysis/analysis/parse_log_test.go @@ -18,17 +18,9 @@ import ( "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testParseLog{}) - -type testParseLog struct{} - func transferCounterParseLog(operator, content string, expect []uint64) bool { r, _ := GetTransferCounter().CompileRegex(operator) results, _ := GetTransferCounter().parseLine(content, r) @@ -43,73 +35,76 @@ func transferCounterParseLog(operator, content string, expect []uint64) bool { return true } -func (t *testParseLog) TestTransferCounterParseLog(c *C) { +func TestTransferCounterParseLog(t *testing.T) { + re := require.New(t) { operator := "balance-leader" content := "[2019/09/05 04:15:52.404 +00:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=54252] [operator=\"\"balance-leader {transfer leader: store 4 to 6} (kind:leader,balance, region:54252(8243,398), createAt:2019-09-05 04:15:52.400290023 +0000 UTC m=+91268.739649520, startAt:2019-09-05 04:15:52.400489629 +0000 UTC m=+91268.739849120, currentStep:1, steps:[transfer leader from store 4 to store 6]) finished\"\"]" var expect = []uint64{54252, 4, 6} - c.Assert(transferCounterParseLog(operator, content, expect), IsTrue) + re.True(transferCounterParseLog(operator, content, expect)) } { operator := "balance-region" content := "[2019/09/03 17:42:07.898 +08:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=24622] [operator=\"\"balance-region {mv peer: store [6] to [1]} (kind:region,balance, region:24622(1,1), createAt:2019-09-03 17:42:06.602589701 +0800 CST m=+737.457773921, startAt:2019-09-03 17:42:06.602849306 +0800 CST m=+737.458033475, currentStep:3, steps:[add learner peer 64064 on store 1, promote learner peer 64064 on store 1 to voter, remove peer on store 6]) finished\"\"]\"" var expect = []uint64{24622, 6, 1} - c.Assert(transferCounterParseLog(operator, content, expect), IsTrue) + re.True(transferCounterParseLog(operator, content, expect)) } { operator := "transfer-hot-write-leader" content := "[2019/09/05 14:05:42.811 +08:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=94] [operator=\"\"transfer-hot-write-leader {transfer leader: store 2 to 1} (kind:leader,hot-region, region:94(1,1), createAt:2019-09-05 14:05:42.676394689 +0800 CST m=+14.955640307, startAt:2019-09-05 14:05:42.676589507 +0800 CST m=+14.955835051, currentStep:1, steps:[transfer leader from store 2 to store 1]) finished\"\"]" var expect = []uint64{94, 2, 1} - c.Assert(transferCounterParseLog(operator, content, expect), IsTrue) + re.True(transferCounterParseLog(operator, content, expect)) } { operator := "move-hot-write-region" content := "[2019/09/05 14:05:54.311 +08:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=98] [operator=\"\"move-hot-write-region {mv peer: store [2] to [10]} (kind:region,hot-region, region:98(1,1), createAt:2019-09-05 14:05:49.718201432 +0800 CST m=+21.997446945, startAt:2019-09-05 14:05:49.718336308 +0800 CST m=+21.997581822, currentStep:3, steps:[add learner peer 2048 on store 10, promote learner peer 2048 on store 10 to voter, remove peer on store 2]) finished\"\"]" var expect = []uint64{98, 2, 10} - c.Assert(transferCounterParseLog(operator, content, expect), IsTrue) + re.True(transferCounterParseLog(operator, content, expect)) } { operator := "transfer-hot-read-leader" content := "[2019/09/05 14:16:38.758 +08:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=85] [operator=\"\"transfer-hot-read-leader {transfer leader: store 1 to 5} (kind:leader,hot-region, region:85(1,1), createAt:2019-09-05 14:16:38.567463945 +0800 CST m=+29.117453011, startAt:2019-09-05 14:16:38.567603515 +0800 CST m=+29.117592496, currentStep:1, steps:[transfer leader from store 1 to store 5]) finished\"\"]" var expect = []uint64{85, 1, 5} - c.Assert(transferCounterParseLog(operator, content, expect), IsTrue) + re.True(transferCounterParseLog(operator, content, expect)) } { operator := "move-hot-read-region" content := "[2019/09/05 14:19:15.066 +08:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=389] [operator=\"\"move-hot-read-region {mv peer: store [5] to [4]} (kind:leader,region,hot-region, region:389(1,1), createAt:2019-09-05 14:19:13.576359364 +0800 CST m=+25.855737101, startAt:2019-09-05 14:19:13.576556556 +0800 CST m=+25.855934288, currentStep:4, steps:[add learner peer 2014 on store 4, promote learner peer 2014 on store 4 to voter, transfer leader from store 5 to store 3, remove peer on store 5]) finished\"\"]" var expect = []uint64{389, 5, 4} - c.Assert(transferCounterParseLog(operator, content, expect), IsTrue) + re.True(transferCounterParseLog(operator, content, expect)) } } -func (t *testParseLog) TestIsExpectTime(c *C) { +func TestIsExpectTime(t *testing.T) { + re := require.New(t) { testFunction := isExpectTime("2019/09/05 14:19:15", DefaultLayout, true) current, _ := time.Parse(DefaultLayout, "2019/09/05 14:19:14") - c.Assert(testFunction(current), IsTrue) + re.True(testFunction(current)) } { testFunction := isExpectTime("2019/09/05 14:19:15", DefaultLayout, false) current, _ := time.Parse(DefaultLayout, "2019/09/05 14:19:16") - c.Assert(testFunction(current), IsTrue) + re.True(testFunction(current)) } { testFunction := isExpectTime("", DefaultLayout, true) current, _ := time.Parse(DefaultLayout, "2019/09/05 14:19:14") - c.Assert(testFunction(current), IsTrue) + re.True(testFunction(current)) } { testFunction := isExpectTime("", DefaultLayout, false) current, _ := time.Parse(DefaultLayout, "2019/09/05 14:19:16") - c.Assert(testFunction(current), IsTrue) + re.True(testFunction(current)) } } -func (t *testParseLog) TestCurrentTime(c *C) { +func TestCurrentTime(t *testing.T) { + re := require.New(t) getCurrentTime := currentTime(DefaultLayout) content := "[2019/09/05 14:19:15.066 +08:00] [INFO] [operator_controller.go:119] [\"operator finish\"] [region-id=389] [operator=\"\"move-hot-read-region {mv peer: store 5 to 4} (kind:leader,region,hot-region, region:389(1,1), createAt:2019-09-05 14:19:13.576359364 +0800 CST m=+25.855737101, startAt:2019-09-05 14:19:13.576556556 +0800 CST m=+25.855934288, currentStep:4, steps:[add learner peer 2014 on store 4, promote learner peer 2014 on store 4 to voter, transfer leader from store 5 to store 3, remove peer on store 5]) finished\"\"]" current, err := getCurrentTime(content) - c.Assert(err, Equals, nil) + re.NoError(err) expect, _ := time.Parse(DefaultLayout, "2019/09/05 14:19:15") - c.Assert(current, Equals, expect) + re.Equal(expect, current) } diff --git a/tools/pd-analysis/analysis/transfer_counter_test.go b/tools/pd-analysis/analysis/transfer_counter_test.go index 796f0652345..092767cd49d 100644 --- a/tools/pd-analysis/analysis/transfer_counter_test.go +++ b/tools/pd-analysis/analysis/transfer_counter_test.go @@ -15,12 +15,10 @@ package analysis import ( - . "github.com/pingcap/check" -) - -var _ = Suite(&testTransferRegionCounter{}) + "testing" -type testTransferRegionCounter struct{} + "github.com/stretchr/testify/require" +) func addData(test [][]uint64) { for i, row := range test { @@ -33,7 +31,8 @@ func addData(test [][]uint64) { } } -func (t *testTransferRegionCounter) TestCounterRedundant(c *C) { +func TestCounterRedundant(t *testing.T) { + re := require.New(t) { test := [][]uint64{ {0, 0, 0, 0, 0, 0, 0}, @@ -44,12 +43,12 @@ func (t *testTransferRegionCounter) TestCounterRedundant(c *C) { {0, 5, 9, 0, 0, 0, 0}, {0, 0, 8, 0, 0, 0, 0}} GetTransferCounter().Init(6, 3000) - c.Assert(GetTransferCounter().Redundant, Equals, uint64(0)) - c.Assert(GetTransferCounter().Necessary, Equals, uint64(0)) + re.Equal(uint64(0), GetTransferCounter().Redundant) + re.Equal(uint64(0), GetTransferCounter().Necessary) addData(test) GetTransferCounter().Result() - c.Assert(GetTransferCounter().Redundant, Equals, uint64(64)) - c.Assert(GetTransferCounter().Necessary, Equals, uint64(5)) + re.Equal(uint64(64), GetTransferCounter().Redundant) + re.Equal(uint64(5), GetTransferCounter().Necessary) } { test := [][]uint64{ @@ -61,12 +60,12 @@ func (t *testTransferRegionCounter) TestCounterRedundant(c *C) { {0, 1, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0}} GetTransferCounter().Init(6, 3000) - c.Assert(GetTransferCounter().Redundant, Equals, uint64(0)) - c.Assert(GetTransferCounter().Necessary, Equals, uint64(0)) + re.Equal(uint64(0), GetTransferCounter().Redundant) + re.Equal(uint64(0), GetTransferCounter().Necessary) addData(test) GetTransferCounter().Result() - c.Assert(GetTransferCounter().Redundant, Equals, uint64(0)) - c.Assert(GetTransferCounter().Necessary, Equals, uint64(5)) + re.Equal(uint64(0), GetTransferCounter().Redundant) + re.Equal(uint64(5), GetTransferCounter().Necessary) } { test := [][]uint64{ @@ -80,12 +79,12 @@ func (t *testTransferRegionCounter) TestCounterRedundant(c *C) { {0, 0, 48, 0, 84, 1, 48, 0, 20}, {0, 61, 2, 57, 7, 122, 1, 21, 0}} GetTransferCounter().Init(8, 3000) - c.Assert(GetTransferCounter().Redundant, Equals, uint64(0)) - c.Assert(GetTransferCounter().Necessary, Equals, uint64(0)) + re.Equal(uint64(0), GetTransferCounter().Redundant) + re.Equal(uint64(0), GetTransferCounter().Necessary) addData(test) GetTransferCounter().Result() - c.Assert(GetTransferCounter().Redundant, Equals, uint64(1778)) - c.Assert(GetTransferCounter().Necessary, Equals, uint64(938)) + re.Equal(uint64(1778), GetTransferCounter().Redundant) + re.Equal(uint64(938), GetTransferCounter().Necessary) GetTransferCounter().PrintResult() } } diff --git a/tools/pd-ctl/pdctl/ctl_test.go b/tools/pd-ctl/pdctl/ctl_test.go index 90369bab46c..6dc29058e34 100644 --- a/tools/pd-ctl/pdctl/ctl_test.go +++ b/tools/pd-ctl/pdctl/ctl_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/stretchr/testify/require" ) func newCommand(usage, short string) *cobra.Command { @@ -31,6 +32,7 @@ func newCommand(usage, short string) *cobra.Command { } func TestGenCompleter(t *testing.T) { + re := require.New(t) var subCommand = []string{"testa", "testb", "testc", "testdef"} rootCmd := &cobra.Command{ @@ -65,13 +67,12 @@ func TestGenCompleter(t *testing.T) { } } - if inPrefixArray == false { - t.Errorf("%s not in prefix array", cmd) - } + re.True(inPrefixArray) } } func TestReadStdin(t *testing.T) { + re := require.New(t) s := []struct { in io.Reader targets []string @@ -84,16 +85,10 @@ func TestReadStdin(t *testing.T) { }} for _, v := range s { in, err := ReadStdin(v.in) - if err != nil { - t.Errorf("ReadStdin err:%v", err) - } - if len(v.targets) != len(in) { - t.Errorf("ReadStdin = %v, want %s, nil", in, v.targets) - } + re.NoError(err) + re.Equal(len(v.targets), len(in)) for i, target := range v.targets { - if target != in[i] { - t.Errorf("ReadStdin = %v, want %s, nil", in, v.targets) - } + re.Equal(target, in[i]) } } } diff --git a/tools/pd-simulator/simulator/simutil/key_test.go b/tools/pd-simulator/simulator/simutil/key_test.go index d7821c475fc..6174ec35381 100644 --- a/tools/pd-simulator/simulator/simutil/key_test.go +++ b/tools/pd-simulator/simulator/simutil/key_test.go @@ -17,46 +17,40 @@ package simutil import ( "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/codec" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testTableKeySuite{}) - -type testTableKeySuite struct{} - -func (t *testTableKeySuite) TestGenerateTableKeys(c *C) { +func TestGenerateTableKeys(t *testing.T) { + re := require.New(t) tableCount := 3 size := 10 keys := GenerateTableKeys(tableCount, size) - c.Assert(keys, HasLen, size) + re.Len(keys, size) for i := 1; i < len(keys); i++ { - c.Assert(keys[i-1], Less, keys[i]) + re.Less(keys[i-1], keys[i]) s := []byte(keys[i-1]) e := []byte(keys[i]) for j := 0; j < 1000; j++ { split, err := GenerateTiDBEncodedSplitKey(s, e) - c.Assert(err, IsNil) - c.Assert(s, Less, split) - c.Assert(split, Less, e) + re.NoError(err) + re.Less(string(s), string(split)) + re.Less(string(split), string(e)) e = split } } } -func (t *testTableKeySuite) TestGenerateSplitKey(c *C) { +func TestGenerateSplitKey(t *testing.T) { + re := require.New(t) s := []byte(codec.EncodeBytes([]byte("a"))) e := []byte(codec.EncodeBytes([]byte("ab"))) for i := 0; i <= 1000; i++ { cc, err := GenerateTiDBEncodedSplitKey(s, e) - c.Assert(err, IsNil) - c.Assert(s, Less, cc) - c.Assert(cc, Less, e) + re.NoError(err) + re.Less(string(s), string(cc)) + re.Less(string(cc), string(e)) e = cc } @@ -64,19 +58,19 @@ func (t *testTableKeySuite) TestGenerateSplitKey(c *C) { s = []byte("") e = []byte{116, 128, 0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 0, 0, 0, 0, 0, 248} splitKey, err := GenerateTiDBEncodedSplitKey(s, e) - c.Assert(err, IsNil) - c.Assert(s, Less, splitKey) - c.Assert(splitKey, Less, e) + re.NoError(err) + re.Less(string(s), string(splitKey)) + re.Less(string(splitKey), string(e)) // split equal key s = codec.EncodeBytes([]byte{116, 128, 0, 0, 0, 0, 0, 0, 1}) e = codec.EncodeBytes([]byte{116, 128, 0, 0, 0, 0, 0, 0, 1, 1}) for i := 0; i <= 1000; i++ { - c.Assert(s, Less, e) + re.Less(string(s), string(e)) splitKey, err = GenerateTiDBEncodedSplitKey(s, e) - c.Assert(err, IsNil) - c.Assert(s, Less, splitKey) - c.Assert(splitKey, Less, e) + re.NoError(err) + re.Less(string(s), string(splitKey)) + re.Less(string(splitKey), string(e)) e = splitKey } } From d263b8586123387d219bdccf9a1733632ab0d9c2 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Tue, 14 Jun 2022 16:04:33 +0800 Subject: [PATCH 18/35] tests: testify the pd-ctl tests (#5154) ref tikv/pd#4813 Testify the pd-ctl tests. Signed-off-by: JmPotato --- tests/pdctl/cluster/cluster_test.go | 47 +- tests/pdctl/completion/completion_test.go | 17 +- tests/pdctl/config/config_test.go | 529 +++++++++--------- tests/pdctl/global_test.go | 25 +- tests/pdctl/health/health_test.go | 28 +- tests/pdctl/helper.go | 66 ++- tests/pdctl/hot/hot_test.go | 173 +++--- tests/pdctl/label/label_test.go | 39 +- tests/pdctl/log/log_test.go | 78 ++- tests/pdctl/member/member_test.go | 67 +-- tests/pdctl/operator/operator_test.go | 115 ++-- tests/pdctl/region/region_test.go | 75 ++- tests/pdctl/scheduler/scheduler_test.go | 141 ++--- tests/pdctl/store/store_test.go | 314 ++++++----- tests/pdctl/tso/tso_test.go | 26 +- tests/pdctl/unsafe/unsafe_operation_test.go | 29 +- tests/server/api/api_test.go | 22 +- .../server/storage/hot_region_storage_test.go | 22 +- 18 files changed, 877 insertions(+), 936 deletions(-) diff --git a/tests/pdctl/cluster/cluster_test.go b/tests/pdctl/cluster/cluster_test.go index 4b8cceb3bc5..9d69f89dcef 100644 --- a/tests/pdctl/cluster/cluster_test.go +++ b/tests/pdctl/cluster/cluster_test.go @@ -21,32 +21,25 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" clusterpkg "github.com/tikv/pd/server/cluster" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&clusterTestSuite{}) - -type clusterTestSuite struct{} - -func (s *clusterTestSuite) TestClusterAndPing(c *C) { +func TestClusterAndPing(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() err = cluster.GetServer(cluster.GetLeader()).BootstrapCluster() - c.Assert(err, IsNil) + re.NoError(err) pdAddr := cluster.GetConfig().GetClientURL() i := strings.Index(pdAddr, "//") pdAddr = pdAddr[i+2:] @@ -56,42 +49,42 @@ func (s *clusterTestSuite) TestClusterAndPing(c *C) { // cluster args := []string{"-u", pdAddr, "cluster"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) ci := &metapb.Cluster{} - c.Assert(json.Unmarshal(output, ci), IsNil) - c.Assert(ci, DeepEquals, cluster.GetCluster()) + re.NoError(json.Unmarshal(output, ci)) + re.Equal(cluster.GetCluster(), ci) // cluster info args = []string{"-u", pdAddr, "cluster"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) ci = &metapb.Cluster{} - c.Assert(json.Unmarshal(output, ci), IsNil) - c.Assert(ci, DeepEquals, cluster.GetCluster()) + re.NoError(json.Unmarshal(output, ci)) + re.Equal(cluster.GetCluster(), ci) // cluster status args = []string{"-u", pdAddr, "cluster", "status"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) cs := &clusterpkg.Status{} - c.Assert(json.Unmarshal(output, cs), IsNil) + re.NoError(json.Unmarshal(output, cs)) clusterStatus, err := cluster.GetClusterStatus() - c.Assert(err, IsNil) - c.Assert(clusterStatus.RaftBootstrapTime.Equal(cs.RaftBootstrapTime), IsTrue) + re.NoError(err) + re.True(clusterStatus.RaftBootstrapTime.Equal(cs.RaftBootstrapTime)) // ref: https://github.com/onsi/gomega/issues/264 clusterStatus.RaftBootstrapTime = time.Time{} cs.RaftBootstrapTime = time.Time{} - c.Assert(cs, DeepEquals, clusterStatus) + re.Equal(clusterStatus, cs) // ping args = []string{"-u", pdAddr, "ping"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(output, NotNil) + re.NoError(err) + re.NotNil(output) // does not exist args = []string{"-u", pdAddr, "--cacert=ca.pem", "cluster"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, ErrorMatches, ".*no such file or directory.*") + re.Contains(err.Error(), "no such file or directory") } diff --git a/tests/pdctl/completion/completion_test.go b/tests/pdctl/completion/completion_test.go index f7cc30bbe05..c64615df9e1 100644 --- a/tests/pdctl/completion/completion_test.go +++ b/tests/pdctl/completion/completion_test.go @@ -17,29 +17,22 @@ package completion_test import ( "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&completionTestSuite{}) - -type completionTestSuite struct{} - -func (s *completionTestSuite) TestCompletion(c *C) { +func TestCompletion(t *testing.T) { + re := require.New(t) cmd := pdctlCmd.GetRootCmd() // completion command args := []string{"completion", "bash"} _, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) // completion command args = []string{"completion", "zsh"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) } diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 311a0e7db99..f5acd3fd3ff 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -15,18 +15,16 @@ package config_test import ( - "bytes" "context" "encoding/json" "os" "reflect" - "strings" "testing" "time" "github.com/coreos/go-semver/semver" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/typeutil" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/placement" @@ -35,35 +33,28 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&configTestSuite{}) - -type configTestSuite struct{} - type testItem struct { name string value interface{} read func(scheduleConfig *config.ScheduleConfig) interface{} } -func (t *testItem) judge(c *C, scheduleConfigs ...*config.ScheduleConfig) { +func (t *testItem) judge(re *require.Assertions, scheduleConfigs ...*config.ScheduleConfig) { value := t.value for _, scheduleConfig := range scheduleConfigs { - c.Assert(scheduleConfig, NotNil) - c.Assert(reflect.TypeOf(t.read(scheduleConfig)), Equals, reflect.TypeOf(value)) + re.NotNil(scheduleConfig) + re.IsType(value, t.read(scheduleConfig)) } } -func (s *configTestSuite) TestConfig(c *C) { +func TestConfig(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -73,17 +64,17 @@ func (s *configTestSuite) TestConfig(c *C) { State: metapb.StoreState_Up, } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() // config show args := []string{"-u", pdAddr, "config", "show"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) cfg := config.Config{} - c.Assert(json.Unmarshal(output, &cfg), IsNil) + re.NoError(json.Unmarshal(output, &cfg)) scheduleConfig := svr.GetScheduleConfig() // hidden config @@ -97,127 +88,127 @@ func (s *configTestSuite) TestConfig(c *C) { scheduleConfig.EnableRemoveExtraReplica = false scheduleConfig.EnableLocationReplacement = false scheduleConfig.StoreLimitMode = "" - c.Assert(scheduleConfig.MaxMergeRegionKeys, Equals, uint64(0)) + re.Equal(uint64(0), scheduleConfig.MaxMergeRegionKeys) // The result of config show doesn't be 0. scheduleConfig.MaxMergeRegionKeys = scheduleConfig.GetMaxMergeRegionKeys() - c.Assert(&cfg.Schedule, DeepEquals, scheduleConfig) - c.Assert(&cfg.Replication, DeepEquals, svr.GetReplicationConfig()) + re.Equal(scheduleConfig, &cfg.Schedule) + re.Equal(svr.GetReplicationConfig(), &cfg.Replication) // config set trace-region-flow args = []string{"-u", pdAddr, "config", "set", "trace-region-flow", "false"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(svr.GetPDServerConfig().TraceRegionFlow, IsFalse) + re.NoError(err) + re.False(svr.GetPDServerConfig().TraceRegionFlow) args = []string{"-u", pdAddr, "config", "set", "flow-round-by-digit", "10"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(svr.GetPDServerConfig().FlowRoundByDigit, Equals, 10) + re.NoError(err) + re.Equal(10, svr.GetPDServerConfig().FlowRoundByDigit) args = []string{"-u", pdAddr, "config", "set", "flow-round-by-digit", "-10"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, NotNil) + re.Error(err) // config show schedule args = []string{"-u", pdAddr, "config", "show", "schedule"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) scheduleCfg := config.ScheduleConfig{} - c.Assert(json.Unmarshal(output, &scheduleCfg), IsNil) + re.NoError(json.Unmarshal(output, &scheduleCfg)) scheduleConfig = svr.GetScheduleConfig() scheduleConfig.MaxMergeRegionKeys = scheduleConfig.GetMaxMergeRegionKeys() - c.Assert(&scheduleCfg, DeepEquals, scheduleConfig) + re.Equal(scheduleConfig, &scheduleCfg) - c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionSize), Equals, 20) - c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionKeys), Equals, 0) - c.Assert(int(svr.GetScheduleConfig().GetMaxMergeRegionKeys()), Equals, 20*10000) + re.Equal(20, int(svr.GetScheduleConfig().MaxMergeRegionSize)) + re.Equal(0, int(svr.GetScheduleConfig().MaxMergeRegionKeys)) + re.Equal(20*10000, int(svr.GetScheduleConfig().GetMaxMergeRegionKeys())) // set max-merge-region-size to 40MB args = []string{"-u", pdAddr, "config", "set", "max-merge-region-size", "40"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionSize), Equals, 40) - c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionKeys), Equals, 0) - c.Assert(int(svr.GetScheduleConfig().GetMaxMergeRegionKeys()), Equals, 40*10000) + re.NoError(err) + re.Equal(40, int(svr.GetScheduleConfig().MaxMergeRegionSize)) + re.Equal(0, int(svr.GetScheduleConfig().MaxMergeRegionKeys)) + re.Equal(40*10000, int(svr.GetScheduleConfig().GetMaxMergeRegionKeys())) args = []string{"-u", pdAddr, "config", "set", "max-merge-region-keys", "200000"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(int(svr.GetScheduleConfig().MaxMergeRegionKeys), Equals, 20*10000) - c.Assert(int(svr.GetScheduleConfig().GetMaxMergeRegionKeys()), Equals, 20*10000) + re.NoError(err) + re.Equal(20*10000, int(svr.GetScheduleConfig().MaxMergeRegionKeys)) + re.Equal(20*10000, int(svr.GetScheduleConfig().GetMaxMergeRegionKeys())) // config show replication args = []string{"-u", pdAddr, "config", "show", "replication"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) replicationCfg := config.ReplicationConfig{} - c.Assert(json.Unmarshal(output, &replicationCfg), IsNil) - c.Assert(&replicationCfg, DeepEquals, svr.GetReplicationConfig()) + re.NoError(json.Unmarshal(output, &replicationCfg)) + re.Equal(svr.GetReplicationConfig(), &replicationCfg) // config show cluster-version args1 := []string{"-u", pdAddr, "config", "show", "cluster-version"} output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) clusterVersion := semver.Version{} - c.Assert(json.Unmarshal(output, &clusterVersion), IsNil) - c.Assert(clusterVersion, DeepEquals, svr.GetClusterVersion()) + re.NoError(json.Unmarshal(output, &clusterVersion)) + re.Equal(svr.GetClusterVersion(), clusterVersion) // config set cluster-version args2 := []string{"-u", pdAddr, "config", "set", "cluster-version", "2.1.0-rc.5"} _, err = pdctl.ExecuteCommand(cmd, args2...) - c.Assert(err, IsNil) - c.Assert(clusterVersion, Not(DeepEquals), svr.GetClusterVersion()) + re.NoError(err) + re.NotEqual(svr.GetClusterVersion(), clusterVersion) output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) clusterVersion = semver.Version{} - c.Assert(json.Unmarshal(output, &clusterVersion), IsNil) - c.Assert(clusterVersion, DeepEquals, svr.GetClusterVersion()) + re.NoError(json.Unmarshal(output, &clusterVersion)) + re.Equal(svr.GetClusterVersion(), clusterVersion) // config show label-property args1 = []string{"-u", pdAddr, "config", "show", "label-property"} output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) labelPropertyCfg := config.LabelPropertyConfig{} - c.Assert(json.Unmarshal(output, &labelPropertyCfg), IsNil) - c.Assert(labelPropertyCfg, DeepEquals, svr.GetLabelProperty()) + re.NoError(json.Unmarshal(output, &labelPropertyCfg)) + re.Equal(svr.GetLabelProperty(), labelPropertyCfg) // config set label-property args2 = []string{"-u", pdAddr, "config", "set", "label-property", "reject-leader", "zone", "cn"} _, err = pdctl.ExecuteCommand(cmd, args2...) - c.Assert(err, IsNil) - c.Assert(labelPropertyCfg, Not(DeepEquals), svr.GetLabelProperty()) + re.NoError(err) + re.NotEqual(svr.GetLabelProperty(), labelPropertyCfg) output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) labelPropertyCfg = config.LabelPropertyConfig{} - c.Assert(json.Unmarshal(output, &labelPropertyCfg), IsNil) - c.Assert(labelPropertyCfg, DeepEquals, svr.GetLabelProperty()) + re.NoError(json.Unmarshal(output, &labelPropertyCfg)) + re.Equal(svr.GetLabelProperty(), labelPropertyCfg) // config delete label-property args3 := []string{"-u", pdAddr, "config", "delete", "label-property", "reject-leader", "zone", "cn"} _, err = pdctl.ExecuteCommand(cmd, args3...) - c.Assert(err, IsNil) - c.Assert(labelPropertyCfg, Not(DeepEquals), svr.GetLabelProperty()) + re.NoError(err) + re.NotEqual(svr.GetLabelProperty(), labelPropertyCfg) output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) labelPropertyCfg = config.LabelPropertyConfig{} - c.Assert(json.Unmarshal(output, &labelPropertyCfg), IsNil) - c.Assert(labelPropertyCfg, DeepEquals, svr.GetLabelProperty()) + re.NoError(json.Unmarshal(output, &labelPropertyCfg)) + re.Equal(svr.GetLabelProperty(), labelPropertyCfg) // config set min-resolved-ts-persistence-interval args = []string{"-u", pdAddr, "config", "set", "min-resolved-ts-persistence-interval", "1s"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(svr.GetPDServerConfig().MinResolvedTSPersistenceInterval, Equals, typeutil.NewDuration(time.Second)) + re.NoError(err) + re.Equal(typeutil.NewDuration(time.Second), svr.GetPDServerConfig().MinResolvedTSPersistenceInterval) // config set max-store-preparing-time 10m args = []string{"-u", pdAddr, "config", "set", "max-store-preparing-time", "10m"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(svr.GetScheduleConfig().MaxStorePreparingTime, Equals, typeutil.NewDuration(10*time.Minute)) + re.NoError(err) + re.Equal(typeutil.NewDuration(10*time.Minute), svr.GetScheduleConfig().MaxStorePreparingTime) args = []string{"-u", pdAddr, "config", "set", "max-store-preparing-time", "0s"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(svr.GetScheduleConfig().MaxStorePreparingTime, Equals, typeutil.NewDuration(0)) + re.NoError(err) + re.Equal(typeutil.NewDuration(0), svr.GetScheduleConfig().MaxStorePreparingTime) // test config read and write testItems := []testItem{ @@ -242,43 +233,44 @@ func (s *configTestSuite) TestConfig(c *C) { // write args1 = []string{"-u", pdAddr, "config", "set", item.name, reflect.TypeOf(item.value).String()} _, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) // read args2 = []string{"-u", pdAddr, "config", "show"} output, err = pdctl.ExecuteCommand(cmd, args2...) - c.Assert(err, IsNil) + re.NoError(err) cfg = config.Config{} - c.Assert(json.Unmarshal(output, &cfg), IsNil) + re.NoError(json.Unmarshal(output, &cfg)) // judge - item.judge(c, &cfg.Schedule, svr.GetScheduleConfig()) + item.judge(re, &cfg.Schedule, svr.GetScheduleConfig()) } // test error or deprecated config name args1 = []string{"-u", pdAddr, "config", "set", "foo-bar", "1"} output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "not found"), IsTrue) + re.NoError(err) + re.Contains(string(output), "not found") args1 = []string{"-u", pdAddr, "config", "set", "disable-remove-down-replica", "true"} output, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "already been deprecated"), IsTrue) + re.NoError(err) + re.Contains(string(output), "already been deprecated") // set enable-placement-rules twice, make sure it does not return error. args1 = []string{"-u", pdAddr, "config", "set", "enable-placement-rules", "true"} _, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) args1 = []string{"-u", pdAddr, "config", "set", "enable-placement-rules", "true"} _, err = pdctl.ExecuteCommand(cmd, args1...) - c.Assert(err, IsNil) + re.NoError(err) } -func (s *configTestSuite) TestPlacementRules(c *C) { +func TestPlacementRules(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -289,23 +281,22 @@ func (s *configTestSuite) TestPlacementRules(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") // test show var rules []placement.Rule output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "show") - c.Assert(err, IsNil) - err = json.Unmarshal(output, &rules) - c.Assert(err, IsNil) - c.Assert(rules, HasLen, 1) - c.Assert(rules[0].Key(), Equals, [2]string{"pd", "default"}) + re.NoError(err) + re.NoError(json.Unmarshal(output, &rules)) + re.Len(rules, 1) + re.Equal([2]string{"pd", "default"}, rules[0].Key()) f, _ := os.CreateTemp("/tmp", "pd_tests") fname := f.Name() @@ -313,11 +304,11 @@ func (s *configTestSuite) TestPlacementRules(c *C) { // test load _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, _ := os.ReadFile(fname) - c.Assert(json.Unmarshal(b, &rules), IsNil) - c.Assert(rules, HasLen, 1) - c.Assert(rules[0].Key(), Equals, [2]string{"pd", "default"}) + re.NoError(json.Unmarshal(b, &rules)) + re.Len(rules, 1) + re.Equal([2]string{"pd", "default"}, rules[0].Key()) // test save rules = append(rules, placement.Rule{ @@ -334,39 +325,38 @@ func (s *configTestSuite) TestPlacementRules(c *C) { b, _ = json.Marshal(rules) os.WriteFile(fname, b, 0600) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) - c.Assert(err, IsNil) + re.NoError(err) // test show group var rules2 []placement.Rule output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "show", "--group=pd") - c.Assert(err, IsNil) - err = json.Unmarshal(output, &rules2) - c.Assert(err, IsNil) - c.Assert(rules2, HasLen, 2) - c.Assert(rules2[0].Key(), Equals, [2]string{"pd", "default"}) - c.Assert(rules2[1].Key(), Equals, [2]string{"pd", "test1"}) + re.NoError(err) + re.NoError(json.Unmarshal(output, &rules2)) + re.Len(rules2, 2) + re.Equal([2]string{"pd", "default"}, rules2[0].Key()) + re.Equal([2]string{"pd", "test1"}, rules2[1].Key()) // test delete rules[0].Count = 0 b, _ = json.Marshal(rules) os.WriteFile(fname, b, 0600) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) - c.Assert(err, IsNil) + re.NoError(err) output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "show", "--group=pd") - c.Assert(err, IsNil) - err = json.Unmarshal(output, &rules) - c.Assert(err, IsNil) - c.Assert(rules, HasLen, 1) - c.Assert(rules[0].Key(), Equals, [2]string{"pd", "test1"}) + re.NoError(err) + re.NoError(json.Unmarshal(output, &rules)) + re.Len(rules, 1) + re.Equal([2]string{"pd", "test1"}, rules[0].Key()) } -func (s *configTestSuite) TestPlacementRuleGroups(c *C) { +func TestPlacementRuleGroups(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -377,60 +367,59 @@ func (s *configTestSuite) TestPlacementRuleGroups(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") // test show var group placement.RuleGroup output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "pd") - c.Assert(err, IsNil) - err = json.Unmarshal(output, &group) - c.Assert(err, IsNil) - c.Assert(group, DeepEquals, placement.RuleGroup{ID: "pd"}) + re.NoError(err) + re.NoError(json.Unmarshal(output, &group)) + re.Equal(placement.RuleGroup{ID: "pd"}, group) // test set output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "set", "pd", "42", "true") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "set", "group2", "100", "false") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") // show all var groups []placement.RuleGroup output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show") - c.Assert(err, IsNil) - err = json.Unmarshal(output, &groups) - c.Assert(err, IsNil) - c.Assert(groups, DeepEquals, []placement.RuleGroup{ + re.NoError(err) + re.NoError(json.Unmarshal(output, &groups)) + re.Equal([]placement.RuleGroup{ {ID: "pd", Index: 42, Override: true}, {ID: "group2", Index: 100, Override: false}, - }) + }, groups) // delete output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "delete", "group2") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") // show again output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "group2") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "404"), IsTrue) + re.NoError(err) + re.Contains(string(output), "404") } -func (s *configTestSuite) TestPlacementRuleBundle(c *C) { +func TestPlacementRuleBundle(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -441,25 +430,24 @@ func (s *configTestSuite) TestPlacementRuleBundle(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") // test get var bundle placement.GroupBundle output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "get", "pd") - c.Assert(err, IsNil) - err = json.Unmarshal(output, &bundle) - c.Assert(err, IsNil) - c.Assert(bundle, DeepEquals, placement.GroupBundle{ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}) + re.NoError(err) + re.NoError(json.Unmarshal(output, &bundle)) + re.Equal(placement.GroupBundle{ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, bundle) f, err := os.CreateTemp("/tmp", "pd_tests") - c.Assert(err, IsNil) + re.NoError(err) fname := f.Name() f.Close() defer func() { @@ -469,106 +457,107 @@ func (s *configTestSuite) TestPlacementRuleBundle(c *C) { // test load var bundles []placement.GroupBundle _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, _ := os.ReadFile(fname) - c.Assert(json.Unmarshal(b, &bundles), IsNil) - c.Assert(bundles, HasLen, 1) - c.Assert(bundles[0], DeepEquals, placement.GroupBundle{ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}) + re.NoError(json.Unmarshal(b, &bundles)) + re.Len(bundles, 1) + re.Equal(placement.GroupBundle{ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, bundles[0]) // test set bundle.ID = "pe" bundle.Rules[0].GroupID = "pe" b, err = json.Marshal(bundle) - c.Assert(err, IsNil) - c.Assert(os.WriteFile(fname, b, 0600), IsNil) + re.NoError(err) + re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, _ = os.ReadFile(fname) - c.Assert(json.Unmarshal(b, &bundles), IsNil) - assertBundles(bundles, []placement.GroupBundle{ + re.NoError(json.Unmarshal(b, &bundles)) + assertBundles(re, bundles, []placement.GroupBundle{ {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, - }, c) + }) // test delete _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "pd") - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, _ = os.ReadFile(fname) - c.Assert(json.Unmarshal(b, &bundles), IsNil) - assertBundles(bundles, []placement.GroupBundle{ + re.NoError(json.Unmarshal(b, &bundles)) + assertBundles(re, bundles, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, - }, c) + }) // test delete regexp bundle.ID = "pf" bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}} b, err = json.Marshal(bundle) - c.Assert(err, IsNil) - c.Assert(os.WriteFile(fname, b, 0600), IsNil) + re.NoError(err) + re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "--regexp", ".*f") - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, _ = os.ReadFile(fname) - c.Assert(json.Unmarshal(b, &bundles), IsNil) - assertBundles(bundles, []placement.GroupBundle{ + re.NoError(json.Unmarshal(b, &bundles)) + assertBundles(re, bundles, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, - }, c) + }) // test save bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}} bundles = append(bundles, bundle) b, err = json.Marshal(bundles) - c.Assert(err, IsNil) - c.Assert(os.WriteFile(fname, b, 0600), IsNil) + re.NoError(err) + re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname) - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, err = os.ReadFile(fname) - c.Assert(err, IsNil) - c.Assert(json.Unmarshal(b, &bundles), IsNil) - assertBundles(bundles, []placement.GroupBundle{ + re.NoError(err) + re.NoError(json.Unmarshal(b, &bundles)) + assertBundles(re, bundles, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, - }, c) + }) // partial update, so still one group is left, no error bundles = []placement.GroupBundle{{ID: "pe", Rules: []*placement.Rule{}}} b, err = json.Marshal(bundles) - c.Assert(err, IsNil) - c.Assert(os.WriteFile(fname, b, 0600), IsNil) + re.NoError(err) + re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname, "--partial") - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - c.Assert(err, IsNil) + re.NoError(err) b, err = os.ReadFile(fname) - c.Assert(err, IsNil) - c.Assert(json.Unmarshal(b, &bundles), IsNil) - assertBundles(bundles, []placement.GroupBundle{ + re.NoError(err) + re.NoError(json.Unmarshal(b, &bundles)) + assertBundles(re, bundles, []placement.GroupBundle{ {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, - }, c) + }) } -func (s *configTestSuite) TestReplicationMode(c *C) { +func TestReplicationMode(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -579,9 +568,9 @@ func (s *configTestSuite) TestReplicationMode(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() conf := config.ReplicationModeConfig{ @@ -593,42 +582,43 @@ func (s *configTestSuite) TestReplicationMode(c *C) { } check := func() { output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "show", "replication-mode") - c.Assert(err, IsNil) + re.NoError(err) var conf2 config.ReplicationModeConfig - json.Unmarshal(output, &conf2) - c.Assert(conf2, DeepEquals, conf) + re.NoError(json.Unmarshal(output, &conf2)) + re.Equal(conf, conf2) } check() _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "replication-mode", "dr-auto-sync") - c.Assert(err, IsNil) + re.NoError(err) conf.ReplicationMode = "dr-auto-sync" check() _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "replication-mode", "dr-auto-sync", "label-key", "foobar") - c.Assert(err, IsNil) + re.NoError(err) conf.DRAutoSync.LabelKey = "foobar" check() _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "replication-mode", "dr-auto-sync", "primary-replicas", "5") - c.Assert(err, IsNil) + re.NoError(err) conf.DRAutoSync.PrimaryReplicas = 5 check() _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "replication-mode", "dr-auto-sync", "wait-store-timeout", "10m") - c.Assert(err, IsNil) + re.NoError(err) conf.DRAutoSync.WaitStoreTimeout = typeutil.NewDuration(time.Minute * 10) check() } -func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { +func TestUpdateDefaultReplicaConfig(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -638,83 +628,77 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { State: metapb.StoreState_Up, } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() checkMaxReplicas := func(expect uint64) { args := []string{"-u", pdAddr, "config", "show", "replication"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) replicationCfg := config.ReplicationConfig{} - c.Assert(json.Unmarshal(output, &replicationCfg), IsNil) - c.Assert(replicationCfg.MaxReplicas, Equals, expect) + re.NoError(json.Unmarshal(output, &replicationCfg)) + re.Equal(expect, replicationCfg.MaxReplicas) } checkLocaltionLabels := func(expect int) { args := []string{"-u", pdAddr, "config", "show", "replication"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) replicationCfg := config.ReplicationConfig{} - c.Assert(json.Unmarshal(output, &replicationCfg), IsNil) - c.Assert(replicationCfg.LocationLabels, HasLen, expect) + re.NoError(json.Unmarshal(output, &replicationCfg)) + re.Len(replicationCfg.LocationLabels, expect) } checkRuleCount := func(expect int) { args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) rule := placement.Rule{} - c.Assert(json.Unmarshal(output, &rule), IsNil) - c.Assert(rule.Count, Equals, expect) + re.NoError(json.Unmarshal(output, &rule)) + re.Equal(expect, rule.Count) } checkRuleLocationLabels := func(expect int) { args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) rule := placement.Rule{} - c.Assert(json.Unmarshal(output, &rule), IsNil) - c.Assert(rule.LocationLabels, HasLen, expect) + re.NoError(json.Unmarshal(output, &rule)) + re.Len(rule.LocationLabels, expect) } // update successfully when placement rules is not enabled. output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "max-replicas", "2") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") checkMaxReplicas(2) output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "location-labels", "zone,host") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") checkLocaltionLabels(2) checkRuleLocationLabels(2) // update successfully when only one default rule exists. output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "max-replicas", "3") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") checkMaxReplicas(3) checkRuleCount(3) output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "location-labels", "host") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") checkLocaltionLabels(1) checkRuleLocationLabels(1) // update unsuccessfully when many rule exists. - f, _ := os.CreateTemp("/tmp", "pd_tests") - fname := f.Name() - f.Close() - defer func() { - os.RemoveAll(fname) - }() - + fname := t.TempDir() rules := []placement.Rule{ { GroupID: "pd", @@ -724,28 +708,29 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { }, } b, err := json.Marshal(rules) - c.Assert(err, IsNil) + re.NoError(err) os.WriteFile(fname, b, 0600) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) - c.Assert(err, IsNil) + re.NoError(err) checkMaxReplicas(3) checkRuleCount(3) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "max-replicas", "4") - c.Assert(err, IsNil) + re.NoError(err) checkMaxReplicas(4) checkRuleCount(4) checkLocaltionLabels(1) checkRuleLocationLabels(1) } -func (s *configTestSuite) TestPDServerConfig(c *C) { +func TestPDServerConfig(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -756,52 +741,52 @@ func (s *configTestSuite) TestPDServerConfig(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) svr := leaderServer.GetServer() - pdctl.MustPutStore(c, svr, store) + pdctl.MustPutStore(re, svr, store) defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "show", "server") - c.Assert(err, IsNil) + re.NoError(err) var conf config.PDServerConfig - json.Unmarshal(output, &conf) - - c.Assert(conf.UseRegionStorage, Equals, bool(true)) - c.Assert(conf.MaxResetTSGap.Duration, Equals, 24*time.Hour) - c.Assert(conf.KeyType, Equals, "table") - c.Assert(conf.RuntimeServices, DeepEquals, typeutil.StringSlice([]string{})) - c.Assert(conf.MetricStorage, Equals, "") - c.Assert(conf.DashboardAddress, Equals, "auto") - c.Assert(conf.FlowRoundByDigit, Equals, int(3)) + re.NoError(json.Unmarshal(output, &conf)) + + re.True(conf.UseRegionStorage) + re.Equal(24*time.Hour, conf.MaxResetTSGap.Duration) + re.Equal("table", conf.KeyType) + re.Equal(typeutil.StringSlice([]string{}), conf.RuntimeServices) + re.Equal("", conf.MetricStorage) + re.Equal("auto", conf.DashboardAddress) + re.Equal(int(3), conf.FlowRoundByDigit) } -func assertBundles(a, b []placement.GroupBundle, c *C) { - c.Assert(len(a), Equals, len(b)) +func assertBundles(re *require.Assertions, a, b []placement.GroupBundle) { + re.Equal(len(a), len(b)) for i := 0; i < len(a); i++ { - assertBundle(a[i], b[i], c) + assertBundle(re, a[i], b[i]) } } -func assertBundle(a, b placement.GroupBundle, c *C) { - c.Assert(a.ID, Equals, b.ID) - c.Assert(a.Index, Equals, b.Index) - c.Assert(a.Override, Equals, b.Override) - c.Assert(len(a.Rules), Equals, len(b.Rules)) +func assertBundle(re *require.Assertions, a, b placement.GroupBundle) { + re.Equal(a.ID, b.ID) + re.Equal(a.Index, b.Index) + re.Equal(a.Override, b.Override) + re.Equal(len(a.Rules), len(b.Rules)) for i := 0; i < len(a.Rules); i++ { - assertRule(a.Rules[i], b.Rules[i], c) + assertRule(re, a.Rules[i], b.Rules[i]) } } -func assertRule(a, b *placement.Rule, c *C) { - c.Assert(a.GroupID, Equals, b.GroupID) - c.Assert(a.ID, Equals, b.ID) - c.Assert(a.Index, Equals, b.Index) - c.Assert(a.Override, Equals, b.Override) - c.Assert(bytes.Equal(a.StartKey, b.StartKey), IsTrue) - c.Assert(bytes.Equal(a.EndKey, b.EndKey), IsTrue) - c.Assert(a.Role, Equals, b.Role) - c.Assert(a.Count, Equals, b.Count) - c.Assert(a.LabelConstraints, DeepEquals, b.LabelConstraints) - c.Assert(a.LocationLabels, DeepEquals, b.LocationLabels) - c.Assert(a.IsolationLevel, Equals, b.IsolationLevel) +func assertRule(re *require.Assertions, a, b *placement.Rule) { + re.Equal(a.GroupID, b.GroupID) + re.Equal(a.ID, b.ID) + re.Equal(a.Index, b.Index) + re.Equal(a.Override, b.Override) + re.Equal(a.StartKey, b.StartKey) + re.Equal(a.EndKey, b.EndKey) + re.Equal(a.Role, b.Role) + re.Equal(a.Count, b.Count) + re.Equal(a.LabelConstraints, b.LabelConstraints) + re.Equal(a.LocationLabels, b.LocationLabels) + re.Equal(a.IsolationLevel, b.IsolationLevel) } diff --git a/tests/pdctl/global_test.go b/tests/pdctl/global_test.go index de165eea600..c182c739403 100644 --- a/tests/pdctl/global_test.go +++ b/tests/pdctl/global_test.go @@ -20,8 +20,8 @@ import ( "net/http" "testing" - . "github.com/pingcap/check" "github.com/pingcap/log" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/apiutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server" @@ -29,15 +29,8 @@ import ( "go.uber.org/zap" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&globalTestSuite{}) - -type globalTestSuite struct{} - -func (s *globalTestSuite) TestSendAndGetComponent(c *C) { +func TestSendAndGetComponent(t *testing.T) { + re := require.New(t) handler := func(ctx context.Context, s *server.Server) (http.Handler, server.ServiceGroup, error) { mux := http.NewServeMux() mux.HandleFunc("/pd/api/v1/health", func(w http.ResponseWriter, r *http.Request) { @@ -46,7 +39,7 @@ func (s *globalTestSuite) TestSendAndGetComponent(c *C) { log.Info("header", zap.String("key", k)) } log.Info("component", zap.String("component", component)) - c.Assert(component, Equals, "pdctl") + re.Equal("pdctl", component) fmt.Fprint(w, component) }) info := server.ServiceGroup{ @@ -54,12 +47,12 @@ func (s *globalTestSuite) TestSendAndGetComponent(c *C) { } return mux, info, nil } - cfg := server.NewTestSingleConfig(checkerWithNilAssert(c)) + cfg := server.NewTestSingleConfig(checkerWithNilAssert(re)) ctx, cancel := context.WithCancel(context.Background()) svr, err := server.CreateServer(ctx, cfg, handler) - c.Assert(err, IsNil) + re.NoError(err) err = svr.Run() - c.Assert(err, IsNil) + re.NoError(err) pdAddr := svr.GetAddr() defer func() { cancel() @@ -70,6 +63,6 @@ func (s *globalTestSuite) TestSendAndGetComponent(c *C) { cmd := cmd.GetRootCmd() args := []string{"-u", pdAddr, "health"} output, err := ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(string(output), Equals, "pdctl\n") + re.NoError(err) + re.Equal("pdctl\n", string(output)) } diff --git a/tests/pdctl/health/health_test.go b/tests/pdctl/health/health_test.go index 06e287dcb36..bc808a36750 100644 --- a/tests/pdctl/health/health_test.go +++ b/tests/pdctl/health/health_test.go @@ -19,7 +19,7 @@ import ( "encoding/json" "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/cluster" "github.com/tikv/pd/tests" @@ -27,31 +27,24 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&healthTestSuite{}) - -type healthTestSuite struct{} - -func (s *healthTestSuite) TestHealth(c *C) { +func TestHealth(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() tc, err := tests.NewTestCluster(ctx, 3) - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) pdAddr := tc.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() defer tc.Destroy() client := tc.GetEtcdClient() members, err := cluster.GetMembers(client) - c.Assert(err, IsNil) + re.NoError(err) healthMembers := cluster.CheckHealth(tc.GetHTTPClient(), members) healths := []api.Health{} for _, member := range members { @@ -70,9 +63,8 @@ func (s *healthTestSuite) TestHealth(c *C) { // health command args := []string{"-u", pdAddr, "health"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) h := make([]api.Health, len(healths)) - c.Assert(json.Unmarshal(output, &h), IsNil) - c.Assert(err, IsNil) - c.Assert(h, DeepEquals, healths) + re.NoError(json.Unmarshal(output, &h)) + re.Equal(healths, h) } diff --git a/tests/pdctl/helper.go b/tests/pdctl/helper.go index b5160a83f30..c5aaf948aa2 100644 --- a/tests/pdctl/helper.go +++ b/tests/pdctl/helper.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/spf13/cobra" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/assertutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" @@ -43,9 +44,9 @@ func ExecuteCommand(root *cobra.Command, args ...string) (output []byte, err err } // CheckStoresInfo is used to check the test results. -// CheckStoresInfo will not check Store.State because this feild has been omitted pdctl output -func CheckStoresInfo(c *check.C, stores []*api.StoreInfo, want []*api.StoreInfo) { - c.Assert(len(stores), check.Equals, len(want)) +// CheckStoresInfo will not check Store.State because this field has been omitted pdctl output +func CheckStoresInfo(re *require.Assertions, stores []*api.StoreInfo, want []*api.StoreInfo) { + re.Equal(len(want), len(stores)) mapWant := make(map[uint64]*api.StoreInfo) for _, s := range want { if _, ok := mapWant[s.Store.Id]; !ok { @@ -60,24 +61,24 @@ func CheckStoresInfo(c *check.C, stores []*api.StoreInfo, want []*api.StoreInfo) obtained.NodeState, expected.NodeState = 0, 0 // Ignore lastHeartbeat obtained.LastHeartbeat, expected.LastHeartbeat = 0, 0 - c.Assert(obtained, check.DeepEquals, expected) + re.Equal(expected, obtained) obtainedStateName := s.Store.StateName expectedStateName := mapWant[obtained.Id].Store.StateName - c.Assert(obtainedStateName, check.Equals, expectedStateName) + re.Equal(expectedStateName, obtainedStateName) } } // CheckRegionInfo is used to check the test results. -func CheckRegionInfo(c *check.C, output *api.RegionInfo, expected *core.RegionInfo) { +func CheckRegionInfo(re *require.Assertions, output *api.RegionInfo, expected *core.RegionInfo) { region := api.NewRegionInfo(expected) output.Adjust() - c.Assert(output, check.DeepEquals, region) + re.Equal(region, output) } // CheckRegionsInfo is used to check the test results. -func CheckRegionsInfo(c *check.C, output *api.RegionsInfo, expected []*core.RegionInfo) { - c.Assert(output.Count, check.Equals, len(expected)) +func CheckRegionsInfo(re *require.Assertions, output *api.RegionsInfo, expected []*core.RegionInfo) { + re.Len(expected, output.Count) got := output.Regions sort.Slice(got, func(i, j int) bool { return got[i].ID < got[j].ID @@ -86,12 +87,26 @@ func CheckRegionsInfo(c *check.C, output *api.RegionsInfo, expected []*core.Regi return expected[i].GetID() < expected[j].GetID() }) for i, region := range expected { - CheckRegionInfo(c, &got[i], region) + CheckRegionInfo(re, &got[i], region) } } // MustPutStore is used for test purpose. -func MustPutStore(c *check.C, svr *server.Server, store *metapb.Store) { +func MustPutStore(re *require.Assertions, svr *server.Server, store *metapb.Store) { + store.Address = fmt.Sprintf("tikv%d", store.GetId()) + if len(store.Version) == 0 { + store.Version = versioninfo.MinSupportedVersion(versioninfo.Version2_0).String() + } + grpcServer := &server.GrpcServer{Server: svr} + _, err := grpcServer.PutStore(context.Background(), &pdpb.PutStoreRequest{ + Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}, + Store: store, + }) + re.NoError(err) +} + +// MustPutStoreWithCheck is a temporary function for test purpose. +func MustPutStoreWithCheck(c *check.C, svr *server.Server, store *metapb.Store) { store.Address = fmt.Sprintf("tikv%d", store.GetId()) if len(store.Version) == 0 { store.Version = versioninfo.MinSupportedVersion(versioninfo.Version2_0).String() @@ -105,7 +120,26 @@ func MustPutStore(c *check.C, svr *server.Server, store *metapb.Store) { } // MustPutRegion is used for test purpose. -func MustPutRegion(c *check.C, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { +func MustPutRegion(re *require.Assertions, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { + leader := &metapb.Peer{ + Id: regionID, + StoreId: storeID, + } + metaRegion := &metapb.Region{ + Id: regionID, + StartKey: start, + EndKey: end, + Peers: []*metapb.Peer{leader}, + RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1}, + } + r := core.NewRegionInfo(metaRegion, leader, opts...) + err := cluster.HandleRegionHeartbeat(r) + re.NoError(err) + return r +} + +// MustPutRegionWithCheck is a temporary function for test purpose. +func MustPutRegionWithCheck(c *check.C, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { leader := &metapb.Peer{ Id: regionID, StoreId: storeID, @@ -123,10 +157,12 @@ func MustPutRegion(c *check.C, cluster *tests.TestCluster, regionID, storeID uin return r } -func checkerWithNilAssert(c *check.C) *assertutil.Checker { - checker := assertutil.NewChecker(c.FailNow) +func checkerWithNilAssert(re *require.Assertions) *assertutil.Checker { + checker := assertutil.NewChecker(func() { + re.FailNow("should be nil") + }) checker.IsNil = func(obtained interface{}) { - c.Assert(obtained, check.IsNil) + re.Nil(obtained) } return checker } diff --git a/tests/pdctl/hot/hot_test.go b/tests/pdctl/hot/hot_test.go index 06a657df7d7..74148b40955 100644 --- a/tests/pdctl/hot/hot_test.go +++ b/tests/pdctl/hot/hot_test.go @@ -22,9 +22,9 @@ import ( "time" "github.com/gogo/protobuf/proto" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" @@ -35,22 +35,15 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&hotTestSuite{}) - -type hotTestSuite struct{} - -func (s *hotTestSuite) TestHot(c *C) { +func TestHot(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -68,9 +61,9 @@ func (s *hotTestSuite) TestHot(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) - pdctl.MustPutStore(c, leaderServer.GetServer(), store1) - pdctl.MustPutStore(c, leaderServer.GetServer(), store2) + re.NoError(leaderServer.BootstrapCluster()) + pdctl.MustPutStore(re, leaderServer.GetServer(), store1) + pdctl.MustPutStore(re, leaderServer.GetServer(), store2) defer cluster.Destroy() // test hot store @@ -99,33 +92,33 @@ func (s *hotTestSuite) TestHot(c *C) { args := []string{"-u", pdAddr, "hot", "store"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) hotStores := api.HotStoreStats{} - c.Assert(json.Unmarshal(output, &hotStores), IsNil) - c.Assert(hotStores.BytesWriteStats[1], Equals, float64(bytesWritten)/statistics.StoreHeartBeatReportInterval) - c.Assert(hotStores.BytesReadStats[1], Equals, float64(bytesRead)/statistics.StoreHeartBeatReportInterval) - c.Assert(hotStores.KeysWriteStats[1], Equals, float64(keysWritten)/statistics.StoreHeartBeatReportInterval) - c.Assert(hotStores.KeysReadStats[1], Equals, float64(keysRead)/statistics.StoreHeartBeatReportInterval) - c.Assert(hotStores.BytesWriteStats[2], Equals, float64(bytesWritten)) - c.Assert(hotStores.KeysWriteStats[2], Equals, float64(keysWritten)) + re.NoError(json.Unmarshal(output, &hotStores)) + re.Equal(float64(bytesWritten)/statistics.StoreHeartBeatReportInterval, hotStores.BytesWriteStats[1]) + re.Equal(float64(bytesRead)/statistics.StoreHeartBeatReportInterval, hotStores.BytesReadStats[1]) + re.Equal(float64(keysWritten)/statistics.StoreHeartBeatReportInterval, hotStores.KeysWriteStats[1]) + re.Equal(float64(keysRead)/statistics.StoreHeartBeatReportInterval, hotStores.KeysReadStats[1]) + re.Equal(float64(bytesWritten), hotStores.BytesWriteStats[2]) + re.Equal(float64(keysWritten), hotStores.KeysWriteStats[2]) // test hot region args = []string{"-u", pdAddr, "config", "set", "hot-region-cache-hits-threshold", "0"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) hotStoreID := store1.Id count := 0 testHot := func(hotRegionID, hotStoreID uint64, hotType string) { args = []string{"-u", pdAddr, "hot", hotType} - output, e := pdctl.ExecuteCommand(cmd, args...) + output, err := pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) hotRegion := statistics.StoreHotPeersInfos{} - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegion), IsNil) - c.Assert(hotRegion.AsLeader, HasKey, hotStoreID) - c.Assert(hotRegion.AsLeader[hotStoreID].Count, Equals, count) + re.NoError(json.Unmarshal(output, &hotRegion)) + re.Contains(hotRegion.AsLeader, hotStoreID) + re.Equal(count, hotRegion.AsLeader[hotStoreID].Count) if count > 0 { - c.Assert(hotRegion.AsLeader[hotStoreID].Stats[count-1].RegionID, Equals, hotRegionID) + re.Equal(hotRegionID, hotRegion.AsLeader[hotStoreID].Stats[count-1].RegionID) } } @@ -159,7 +152,11 @@ func (s *hotTestSuite) TestHot(c *C) { } testHot(hotRegionID, hotStoreID, "read") case "write": - pdctl.MustPutRegion(c, cluster, hotRegionID, hotStoreID, []byte("c"), []byte("d"), core.SetWrittenBytes(1000000000*reportInterval), core.SetReportInterval(reportInterval)) + pdctl.MustPutRegion( + re, cluster, + hotRegionID, hotStoreID, + []byte("c"), []byte("d"), + core.SetWrittenBytes(1000000000*reportInterval), core.SetReportInterval(reportInterval)) time.Sleep(5000 * time.Millisecond) if reportInterval >= statistics.WriteReportInterval { count++ @@ -189,14 +186,15 @@ func (s *hotTestSuite) TestHot(c *C) { testCommand(reportIntervals, "read") } -func (s *hotTestSuite) TestHotWithStoreID(c *C) { +func TestHotWithStoreID(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, serverName string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 }) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -215,39 +213,40 @@ func (s *hotTestSuite) TestHotWithStoreID(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) args := []string{"-u", pdAddr, "hot", "write", "1"} - output, e := pdctl.ExecuteCommand(cmd, args...) + output, err := pdctl.ExecuteCommand(cmd, args...) hotRegion := statistics.StoreHotPeersInfos{} - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegion), IsNil) - c.Assert(hotRegion.AsLeader, HasLen, 1) - c.Assert(hotRegion.AsLeader[1].Count, Equals, 2) - c.Assert(hotRegion.AsLeader[1].TotalBytesRate, Equals, float64(200000000)) + re.NoError(err) + re.NoError(json.Unmarshal(output, &hotRegion)) + re.Len(hotRegion.AsLeader, 1) + re.Equal(2, hotRegion.AsLeader[1].Count) + re.Equal(float64(200000000), hotRegion.AsLeader[1].TotalBytesRate) args = []string{"-u", pdAddr, "hot", "write", "1", "2"} - output, e = pdctl.ExecuteCommand(cmd, args...) + output, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) hotRegion = statistics.StoreHotPeersInfos{} - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegion), IsNil) - c.Assert(hotRegion.AsLeader, HasLen, 2) - c.Assert(hotRegion.AsLeader[1].Count, Equals, 2) - c.Assert(hotRegion.AsLeader[2].Count, Equals, 1) - c.Assert(hotRegion.AsLeader[1].TotalBytesRate, Equals, float64(200000000)) - c.Assert(hotRegion.AsLeader[2].TotalBytesRate, Equals, float64(100000000)) + re.NoError(json.Unmarshal(output, &hotRegion)) + re.Len(hotRegion.AsLeader, 2) + re.Equal(2, hotRegion.AsLeader[1].Count) + re.Equal(1, hotRegion.AsLeader[2].Count) + re.Equal(float64(200000000), hotRegion.AsLeader[1].TotalBytesRate) + re.Equal(float64(100000000), hotRegion.AsLeader[2].TotalBytesRate) } -func (s *hotTestSuite) TestHistoryHotRegions(c *C) { +func TestHistoryHotRegions(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -258,9 +257,9 @@ func (s *hotTestSuite) TestHistoryHotRegions(c *C) { cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -284,16 +283,16 @@ func (s *hotTestSuite) TestHistoryHotRegions(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 4, 3, []byte("g"), []byte("h"), core.SetWrittenBytes(9000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 4, 3, []byte("g"), []byte("h"), core.SetWrittenBytes(9000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) @@ -306,54 +305,54 @@ func (s *hotTestSuite) TestHistoryHotRegions(c *C) { "store_id", "1,4", "is_learner", "false", } - output, e := pdctl.ExecuteCommand(cmd, args...) + output, err := pdctl.ExecuteCommand(cmd, args...) hotRegions := storage.HistoryHotRegions{} - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegions), IsNil) + re.NoError(err) + re.NoError(json.Unmarshal(output, &hotRegions)) regions := hotRegions.HistoryHotRegion - c.Assert(len(regions), Equals, 1) - c.Assert(regions[0].RegionID, Equals, uint64(1)) - c.Assert(regions[0].StoreID, Equals, uint64(1)) - c.Assert(regions[0].HotRegionType, Equals, "write") + re.Len(regions, 1) + re.Equal(uint64(1), regions[0].RegionID) + re.Equal(uint64(1), regions[0].StoreID) + re.Equal("write", regions[0].HotRegionType) args = []string{"-u", pdAddr, "hot", "history", start, end, "hot_region_type", "write", "region_id", "1,2", "store_id", "1,2", } - output, e = pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegions), IsNil) + output, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.NoError(json.Unmarshal(output, &hotRegions)) regions = hotRegions.HistoryHotRegion - c.Assert(len(regions), Equals, 2) + re.Len(regions, 2) isSort := regions[0].UpdateTime > regions[1].UpdateTime || regions[0].RegionID < regions[1].RegionID - c.Assert(isSort, Equals, true) + re.True(isSort) args = []string{"-u", pdAddr, "hot", "history", start, end, "hot_region_type", "read", "is_leader", "false", "peer_id", "12", } - output, e = pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegions), IsNil) - c.Assert(len(hotRegions.HistoryHotRegion), Equals, 0) + output, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.NoError(json.Unmarshal(output, &hotRegions)) + re.Len(hotRegions.HistoryHotRegion, 0) args = []string{"-u", pdAddr, "hot", "history"} - output, e = pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegions), NotNil) + output, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Error(json.Unmarshal(output, &hotRegions)) args = []string{"-u", pdAddr, "hot", "history", start, end, "region_id", "dada", } - output, e = pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegions), NotNil) + output, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Error(json.Unmarshal(output, &hotRegions)) args = []string{"-u", pdAddr, "hot", "history", start, end, "region_ids", "12323", } - output, e = pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) - c.Assert(json.Unmarshal(output, &hotRegions), NotNil) + output, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Error(json.Unmarshal(output, &hotRegions)) } diff --git a/tests/pdctl/label/label_test.go b/tests/pdctl/label/label_test.go index 50a52413e82..ba31b1fb1d1 100644 --- a/tests/pdctl/label/label_test.go +++ b/tests/pdctl/label/label_test.go @@ -21,8 +21,8 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" @@ -30,21 +30,14 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&labelTestSuite{}) - -type labelTestSuite struct{} - -func (s *labelTestSuite) TestLabel(c *C) { +func TestLabel(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, serverName string) { cfg.Replication.StrictlyMatchLabel = false }) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -100,19 +93,19 @@ func (s *labelTestSuite) TestLabel(c *C) { }, } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store.Store.Store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store.Store.Store) } defer cluster.Destroy() // label command args := []string{"-u", pdAddr, "label"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) labels := make([]*metapb.StoreLabel, 0, len(stores)) - c.Assert(json.Unmarshal(output, &labels), IsNil) + re.NoError(json.Unmarshal(output, &labels)) got := make(map[string]struct{}) for _, l := range labels { if _, ok := got[strings.ToLower(l.Key+l.Value)]; !ok { @@ -129,21 +122,21 @@ func (s *labelTestSuite) TestLabel(c *C) { } } } - c.Assert(got, DeepEquals, expected) + re.Equal(expected, got) // label store command args = []string{"-u", pdAddr, "label", "store", "zone", "us-west"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storesInfo := new(api.StoresInfo) - c.Assert(json.Unmarshal(output, &storesInfo), IsNil) + re.NoError(json.Unmarshal(output, &storesInfo)) sss := []*api.StoreInfo{stores[0], stores[2]} - pdctl.CheckStoresInfo(c, storesInfo.Stores, sss) + pdctl.CheckStoresInfo(re, storesInfo.Stores, sss) // label isolation [label] args = []string{"-u", pdAddr, "label", "isolation"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "none"), IsTrue) - c.Assert(strings.Contains(string(output), "2"), IsTrue) + re.NoError(err) + re.Contains(string(output), "none") + re.Contains(string(output), "2") } diff --git a/tests/pdctl/log/log_test.go b/tests/pdctl/log/log_test.go index 6499b2694c7..7f2e4f20584 100644 --- a/tests/pdctl/log/log_test.go +++ b/tests/pdctl/log/log_test.go @@ -19,21 +19,16 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/server" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&logTestSuite{}) - type logTestSuite struct { + suite.Suite ctx context.Context cancel context.CancelFunc cluster *tests.TestCluster @@ -41,33 +36,36 @@ type logTestSuite struct { pdAddrs []string } -func (s *logTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) +func TestLogTestSuite(t *testing.T) { + suite.Run(t, new(logTestSuite)) +} + +func (suite *logTestSuite) SetupSuite() { + suite.ctx, suite.cancel = context.WithCancel(context.Background()) var err error - s.cluster, err = tests.NewTestCluster(s.ctx, 3) - c.Assert(err, IsNil) - err = s.cluster.RunInitialServers() - c.Assert(err, IsNil) - s.cluster.WaitLeader() - s.pdAddrs = s.cluster.GetConfig().GetClientURLs() + suite.cluster, err = tests.NewTestCluster(suite.ctx, 3) + suite.NoError(err) + suite.NoError(suite.cluster.RunInitialServers()) + suite.cluster.WaitLeader() + suite.pdAddrs = suite.cluster.GetConfig().GetClientURLs() store := &metapb.Store{ Id: 1, State: metapb.StoreState_Up, LastHeartbeat: time.Now().UnixNano(), } - leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) - s.svr = leaderServer.GetServer() - pdctl.MustPutStore(c, s.svr, store) + leaderServer := suite.cluster.GetServer(suite.cluster.GetLeader()) + suite.NoError(leaderServer.BootstrapCluster()) + suite.svr = leaderServer.GetServer() + pdctl.MustPutStore(suite.Require(), suite.svr, store) } -func (s *logTestSuite) TearDownSuite(c *C) { - s.cluster.Destroy() - s.cancel() +func (suite *logTestSuite) TearDownSuite() { + suite.cancel() + suite.cluster.Destroy() } -func (s *logTestSuite) TestLog(c *C) { +func (suite *logTestSuite) TestLog() { cmd := pdctlCmd.GetRootCmd() var testCases = []struct { cmd []string @@ -75,35 +73,35 @@ func (s *logTestSuite) TestLog(c *C) { }{ // log [fatal|error|warn|info|debug] { - cmd: []string{"-u", s.pdAddrs[0], "log", "fatal"}, + cmd: []string{"-u", suite.pdAddrs[0], "log", "fatal"}, expect: "fatal", }, { - cmd: []string{"-u", s.pdAddrs[0], "log", "error"}, + cmd: []string{"-u", suite.pdAddrs[0], "log", "error"}, expect: "error", }, { - cmd: []string{"-u", s.pdAddrs[0], "log", "warn"}, + cmd: []string{"-u", suite.pdAddrs[0], "log", "warn"}, expect: "warn", }, { - cmd: []string{"-u", s.pdAddrs[0], "log", "info"}, + cmd: []string{"-u", suite.pdAddrs[0], "log", "info"}, expect: "info", }, { - cmd: []string{"-u", s.pdAddrs[0], "log", "debug"}, + cmd: []string{"-u", suite.pdAddrs[0], "log", "debug"}, expect: "debug", }, } for _, testCase := range testCases { _, err := pdctl.ExecuteCommand(cmd, testCase.cmd...) - c.Assert(err, IsNil) - c.Assert(s.svr.GetConfig().Log.Level, Equals, testCase.expect) + suite.NoError(err) + suite.Equal(testCase.expect, suite.svr.GetConfig().Log.Level) } } -func (s *logTestSuite) TestInstanceLog(c *C) { +func (suite *logTestSuite) TestInstanceLog() { cmd := pdctlCmd.GetRootCmd() var testCases = []struct { cmd []string @@ -112,29 +110,29 @@ func (s *logTestSuite) TestInstanceLog(c *C) { }{ // log [fatal|error|warn|info|debug] [address] { - cmd: []string{"-u", s.pdAddrs[0], "log", "debug", s.pdAddrs[0]}, - instance: s.pdAddrs[0], + cmd: []string{"-u", suite.pdAddrs[0], "log", "debug", suite.pdAddrs[0]}, + instance: suite.pdAddrs[0], expect: "debug", }, { - cmd: []string{"-u", s.pdAddrs[0], "log", "error", s.pdAddrs[1]}, - instance: s.pdAddrs[1], + cmd: []string{"-u", suite.pdAddrs[0], "log", "error", suite.pdAddrs[1]}, + instance: suite.pdAddrs[1], expect: "error", }, { - cmd: []string{"-u", s.pdAddrs[0], "log", "warn", s.pdAddrs[2]}, - instance: s.pdAddrs[2], + cmd: []string{"-u", suite.pdAddrs[0], "log", "warn", suite.pdAddrs[2]}, + instance: suite.pdAddrs[2], expect: "warn", }, } for _, testCase := range testCases { _, err := pdctl.ExecuteCommand(cmd, testCase.cmd...) - c.Assert(err, IsNil) - svrs := s.cluster.GetServers() + suite.NoError(err) + svrs := suite.cluster.GetServers() for _, svr := range svrs { if svr.GetAddr() == testCase.instance { - c.Assert(svr.GetConfig().Log.Level, Equals, testCase.expect) + suite.Equal(testCase.expect, svr.GetConfig().Log.Level) } } } diff --git a/tests/pdctl/member/member_test.go b/tests/pdctl/member/member_test.go index f85f2d946df..2c93a9c6c53 100644 --- a/tests/pdctl/member/member_test.go +++ b/tests/pdctl/member/member_test.go @@ -18,11 +18,10 @@ import ( "context" "encoding/json" "fmt" - "strings" "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/tests" @@ -30,26 +29,19 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&memberTestSuite{}) - -type memberTestSuite struct{} - -func (s *memberTestSuite) TestMember(c *C) { +func TestMember(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 3) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) pdAddr := cluster.GetConfig().GetClientURL() - c.Assert(err, IsNil) + re.NoError(err) cmd := pdctlCmd.GetRootCmd() svr := cluster.GetServer("pd2") id := svr.GetServerID() @@ -60,57 +52,56 @@ func (s *memberTestSuite) TestMember(c *C) { // member leader show args := []string{"-u", pdAddr, "member", "leader", "show"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) leader := pdpb.Member{} - c.Assert(json.Unmarshal(output, &leader), IsNil) - c.Assert(&leader, DeepEquals, svr.GetLeader()) + re.NoError(json.Unmarshal(output, &leader)) + re.Equal(svr.GetLeader(), &leader) // member leader transfer args = []string{"-u", pdAddr, "member", "leader", "transfer", "pd2"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - testutil.WaitUntil(c, func() bool { - return c.Check("pd2", Equals, svr.GetLeader().GetName()) + re.NoError(err) + testutil.Eventually(re, func() bool { + return svr.GetLeader().GetName() == "pd2" }) // member leader resign cluster.WaitLeader() args = []string{"-u", pdAddr, "member", "leader", "resign"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(strings.Contains(string(output), "Success"), IsTrue) - c.Assert(err, IsNil) - testutil.WaitUntil(c, func() bool { - return c.Check("pd2", Not(Equals), svr.GetLeader().GetName()) + re.Contains(string(output), "Success") + re.NoError(err) + testutil.Eventually(re, func() bool { + return svr.GetLeader().GetName() != "pd2" }) // member leader_priority cluster.WaitLeader() args = []string{"-u", pdAddr, "member", "leader_priority", name, "100"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) priority, err := svr.GetServer().GetMember().GetMemberLeaderPriority(id) - c.Assert(err, IsNil) - c.Assert(priority, Equals, 100) + re.NoError(err) + re.Equal(100, priority) // member delete name err = svr.Destroy() - c.Assert(err, IsNil) + re.NoError(err) members, err := etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 3) + re.NoError(err) + re.Len(members.Members, 3) args = []string{"-u", pdAddr, "member", "delete", "name", name} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) members, err = etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 2) + re.NoError(err) + re.Len(members.Members, 2) // member delete id args = []string{"-u", pdAddr, "member", "delete", "id", fmt.Sprint(id)} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) members, err = etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 2) - c.Succeed() + re.NoError(err) + re.Len(members.Members, 2) } diff --git a/tests/pdctl/operator/operator_test.go b/tests/pdctl/operator/operator_test.go index 73ae2687c80..b8433520381 100644 --- a/tests/pdctl/operator/operator_test.go +++ b/tests/pdctl/operator/operator_test.go @@ -21,8 +21,8 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" @@ -30,31 +30,26 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&operatorTestSuite{}) - -type operatorTestSuite struct{} - -func (s *operatorTestSuite) TestOperator(c *C) { +func TestOperator(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() var err error - var t time.Time - t = t.Add(time.Hour) + var start time.Time + start = start.Add(time.Hour) cluster, err := tests.NewTestCluster(ctx, 1, // TODO: enable placementrules func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 2 conf.Replication.EnablePlacementRules = false }, - func(conf *config.Config, serverName string) { conf.Schedule.MaxStoreDownTime.Duration = time.Since(t) }, + func(conf *config.Config, serverName string) { + conf.Schedule.MaxStoreDownTime.Duration = time.Since(start) + }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -83,16 +78,16 @@ func (s *operatorTestSuite) TestOperator(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetPeers([]*metapb.Peer{ + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetPeers([]*metapb.Peer{ {Id: 1, StoreId: 1}, {Id: 2, StoreId: 2}, })) - pdctl.MustPutRegion(c, cluster, 3, 2, []byte("b"), []byte("c"), core.SetPeers([]*metapb.Peer{ + pdctl.MustPutRegion(re, cluster, 3, 2, []byte("b"), []byte("c"), core.SetPeers([]*metapb.Peer{ {Id: 3, StoreId: 1}, {Id: 4, StoreId: 2}, })) @@ -170,78 +165,80 @@ func (s *operatorTestSuite) TestOperator(c *C) { } for _, testCase := range testCases { - _, e := pdctl.ExecuteCommand(cmd, testCase.cmd...) - c.Assert(e, IsNil) - output, e := pdctl.ExecuteCommand(cmd, testCase.show...) - c.Assert(e, IsNil) - c.Assert(strings.Contains(string(output), testCase.expect), IsTrue) - t := time.Now() - _, e = pdctl.ExecuteCommand(cmd, testCase.reset...) - c.Assert(e, IsNil) - historyCmd := []string{"-u", pdAddr, "operator", "history", strconv.FormatInt(t.Unix(), 10)} - records, e := pdctl.ExecuteCommand(cmd, historyCmd...) - c.Assert(e, IsNil) - c.Assert(strings.Contains(string(records), "admin"), IsTrue) + _, err := pdctl.ExecuteCommand(cmd, testCase.cmd...) + re.NoError(err) + output, err := pdctl.ExecuteCommand(cmd, testCase.show...) + re.NoError(err) + re.Contains(string(output), testCase.expect) + start := time.Now() + _, err = pdctl.ExecuteCommand(cmd, testCase.reset...) + re.NoError(err) + historyCmd := []string{"-u", pdAddr, "operator", "history", strconv.FormatInt(start.Unix(), 10)} + records, err := pdctl.ExecuteCommand(cmd, historyCmd...) + re.NoError(err) + re.Contains(string(records), "admin") } // operator add merge-region args := []string{"-u", pdAddr, "operator", "add", "merge-region", "1", "3"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "operator", "show"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "merge region 1 into region 3"), IsTrue) + re.NoError(err) + re.Contains(string(output), "merge region 1 into region 3") args = []string{"-u", pdAddr, "operator", "remove", "1"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "operator", "remove", "3"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) _, err = pdctl.ExecuteCommand(cmd, "config", "set", "enable-placement-rules", "true") - c.Assert(err, IsNil) + re.NoError(err) output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-region", "1", "2", "3") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "not supported"), IsTrue) + re.NoError(err) + re.Contains(string(output), "not supported") output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-region", "1", "2", "follower", "3") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "not match"), IsTrue) + re.NoError(err) + re.Contains(string(output), "not match") output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-peer", "1", "2", "4") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "is unhealthy"), IsTrue) + re.NoError(err) + re.Contains(string(output), "is unhealthy") output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-region", "1", "2", "leader", "4", "follower") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "is unhealthy"), IsTrue) + re.NoError(err) + re.Contains(string(output), "is unhealthy") output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-region", "1", "2", "follower", "leader", "3", "follower") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "invalid"), IsTrue) + re.NoError(err) + re.Contains(string(output), "invalid") output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-region", "1", "leader", "2", "follower", "3") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "invalid"), IsTrue) + re.NoError(err) + re.Contains(string(output), "invalid") output, err = pdctl.ExecuteCommand(cmd, "operator", "add", "transfer-region", "1", "2", "leader", "3", "follower") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "operator", "remove", "1") - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Success!") _, err = pdctl.ExecuteCommand(cmd, "config", "set", "enable-placement-rules", "false") - c.Assert(err, IsNil) + re.NoError(err) // operator add scatter-region args = []string{"-u", pdAddr, "operator", "add", "scatter-region", "3"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "operator", "add", "scatter-region", "1"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "operator", "show", "region"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "scatter-region"), IsTrue) + re.NoError(err) + re.Contains(string(output), "scatter-region") // test echo, as the scatter region result is random, both region 1 and region 3 can be the region to be scattered output1, _ := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "operator", "remove", "1") output2, _ := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "operator", "remove", "3") - c.Assert(strings.Contains(string(output1), "Success!") || strings.Contains(string(output2), "Success!"), IsTrue) + re.Condition(func() bool { + return strings.Contains(string(output1), "Success!") || strings.Contains(string(output2), "Success!") + }) } diff --git a/tests/pdctl/region/region_test.go b/tests/pdctl/region/region_test.go index dd83accea55..951433bd432 100644 --- a/tests/pdctl/region/region_test.go +++ b/tests/pdctl/region/region_test.go @@ -17,13 +17,12 @@ package region_test import ( "context" "encoding/json" - "strings" "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" @@ -31,21 +30,14 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(®ionTestSuite{}) - -type regionTestSuite struct{} - -func (s *regionTestSuite) TestRegionKeyFormat(c *C) { +func TestRegionKeyFormat(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() url := cluster.GetConfig().GetClientURL() store := &metapb.Store{ @@ -54,22 +46,23 @@ func (s *regionTestSuite) TestRegionKeyFormat(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + re.NoError(leaderServer.BootstrapCluster()) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) cmd := pdctlCmd.GetRootCmd() - output, e := pdctl.ExecuteCommand(cmd, "-u", url, "region", "key", "--format=raw", " ") - c.Assert(e, IsNil) - c.Assert(strings.Contains(string(output), "unknown flag"), IsFalse) + output, err := pdctl.ExecuteCommand(cmd, "-u", url, "region", "key", "--format=raw", " ") + re.NoError(err) + re.NotContains(string(output), "unknown flag") } -func (s *regionTestSuite) TestRegion(c *C) { +func TestRegion(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -80,11 +73,11 @@ func (s *regionTestSuite) TestRegion(c *C) { LastHeartbeat: time.Now().UnixNano(), } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + re.NoError(leaderServer.BootstrapCluster()) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) downPeer := &metapb.Peer{Id: 8, StoreId: 3} - r1 := pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), + r1 := pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1), core.SetApproximateSize(1), core.SetApproximateKeys(100), core.SetPeers([]*metapb.Peer{ @@ -93,16 +86,16 @@ func (s *regionTestSuite) TestRegion(c *C) { {Id: 6, StoreId: 3}, {Id: 7, StoreId: 4}, })) - r2 := pdctl.MustPutRegion(c, cluster, 2, 1, []byte("b"), []byte("c"), + r2 := pdctl.MustPutRegion(re, cluster, 2, 1, []byte("b"), []byte("c"), core.SetWrittenBytes(2000), core.SetReadBytes(0), core.SetRegionConfVer(2), core.SetRegionVersion(3), core.SetApproximateSize(144), core.SetApproximateKeys(14400), ) - r3 := pdctl.MustPutRegion(c, cluster, 3, 1, []byte("c"), []byte("d"), + r3 := pdctl.MustPutRegion(re, cluster, 3, 1, []byte("c"), []byte("d"), core.SetWrittenBytes(500), core.SetReadBytes(800), core.SetRegionConfVer(3), core.SetRegionVersion(2), core.SetApproximateSize(30), core.SetApproximateKeys(3000), core.WithDownPeers([]*pdpb.PeerStats{{Peer: downPeer, DownSeconds: 3600}}), core.WithPendingPeers([]*metapb.Peer{downPeer}), core.WithLearners([]*metapb.Peer{{Id: 3, StoreId: 1}})) - r4 := pdctl.MustPutRegion(c, cluster, 4, 1, []byte("d"), []byte("e"), + r4 := pdctl.MustPutRegion(re, cluster, 4, 1, []byte("d"), []byte("e"), core.SetWrittenBytes(100), core.SetReadBytes(100), core.SetRegionConfVer(1), core.SetRegionVersion(1), core.SetApproximateSize(10), core.SetApproximateKeys(1000), ) @@ -173,11 +166,11 @@ func (s *regionTestSuite) TestRegion(c *C) { for _, testCase := range testRegionsCases { args := append([]string{"-u", pdAddr}, testCase.args...) - output, e := pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) + output, err := pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) regions := &api.RegionsInfo{} - c.Assert(json.Unmarshal(output, regions), IsNil) - pdctl.CheckRegionsInfo(c, regions, testCase.expect) + re.NoError(json.Unmarshal(output, regions)) + pdctl.CheckRegionsInfo(re, regions, testCase.expect) } var testRegionCases = []struct { @@ -196,22 +189,22 @@ func (s *regionTestSuite) TestRegion(c *C) { for _, testCase := range testRegionCases { args := append([]string{"-u", pdAddr}, testCase.args...) - output, e := pdctl.ExecuteCommand(cmd, args...) - c.Assert(e, IsNil) + output, err := pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) region := &api.RegionInfo{} - c.Assert(json.Unmarshal(output, region), IsNil) - pdctl.CheckRegionInfo(c, region, testCase.expect) + re.NoError(json.Unmarshal(output, region)) + pdctl.CheckRegionInfo(re, region, testCase.expect) } // Test region range-holes. - r5 := pdctl.MustPutRegion(c, cluster, 5, 1, []byte("x"), []byte("z")) - output, e := pdctl.ExecuteCommand(cmd, []string{"-u", pdAddr, "region", "range-holes"}...) - c.Assert(e, IsNil) + r5 := pdctl.MustPutRegion(re, cluster, 5, 1, []byte("x"), []byte("z")) + output, err := pdctl.ExecuteCommand(cmd, []string{"-u", pdAddr, "region", "range-holes"}...) + re.NoError(err) rangeHoles := new([][]string) - c.Assert(json.Unmarshal(output, rangeHoles), IsNil) - c.Assert(*rangeHoles, DeepEquals, [][]string{ + re.NoError(json.Unmarshal(output, rangeHoles)) + re.Equal([][]string{ {"", core.HexRegionKeyStr(r1.GetStartKey())}, {core.HexRegionKeyStr(r4.GetEndKey()), core.HexRegionKeyStr(r5.GetStartKey())}, {core.HexRegionKeyStr(r5.GetEndKey()), ""}, - }) + }, *rangeHoles) } diff --git a/tests/pdctl/scheduler/scheduler_test.go b/tests/pdctl/scheduler/scheduler_test.go index 53ed808f410..3a3846603dd 100644 --- a/tests/pdctl/scheduler/scheduler_test.go +++ b/tests/pdctl/scheduler/scheduler_test.go @@ -17,12 +17,11 @@ package scheduler_test import ( "context" "encoding/json" - "strings" "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/versioninfo" "github.com/tikv/pd/tests" @@ -30,30 +29,14 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&schedulerTestSuite{}) - -type schedulerTestSuite struct { - context context.Context - cancel context.CancelFunc -} - -func (s *schedulerTestSuite) SetUpSuite(c *C) { - s.context, s.cancel = context.WithCancel(context.Background()) -} - -func (s *schedulerTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *schedulerTestSuite) TestScheduler(c *C) { - cluster, err := tests.NewTestCluster(s.context, 1) - c.Assert(err, IsNil) +func TestScheduler(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -83,18 +66,18 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { mustExec := func(args []string, v interface{}) string { output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) if v == nil { return string(output) } - c.Assert(json.Unmarshal(output, v), IsNil) + re.NoError(json.Unmarshal(output, v)) return "" } mustUsage := func(args []string) { output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Usage"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Usage") } checkSchedulerCommand := func(args []string, expected map[string]bool) { @@ -104,7 +87,7 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { var schedulers []string mustExec([]string{"-u", pdAddr, "scheduler", "show"}, &schedulers) for _, scheduler := range schedulers { - c.Assert(expected[scheduler], IsTrue) + re.True(expected[scheduler]) } } @@ -114,16 +97,16 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { } configInfo := make(map[string]interface{}) mustExec([]string{"-u", pdAddr, "scheduler", "config", schedulerName}, &configInfo) - c.Assert(expectedConfig, DeepEquals, configInfo) + re.Equal(expectedConfig, configInfo) } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b")) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b")) defer cluster.Destroy() time.Sleep(3 * time.Second) @@ -245,12 +228,12 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { }) var roles []string mustExec([]string{"-u", pdAddr, "scheduler", "config", "shuffle-region-scheduler", "show-roles"}, &roles) - c.Assert(roles, DeepEquals, []string{"leader", "follower", "learner"}) + re.Equal([]string{"leader", "follower", "learner"}, roles) mustExec([]string{"-u", pdAddr, "scheduler", "config", "shuffle-region-scheduler", "set-roles", "learner"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "shuffle-region-scheduler", "show-roles"}, &roles) - c.Assert(roles, DeepEquals, []string{"learner"}) + re.Equal([]string{"learner"}, roles) mustExec([]string{"-u", pdAddr, "scheduler", "config", "shuffle-region-scheduler"}, &roles) - c.Assert(roles, DeepEquals, []string{"learner"}) + re.Equal([]string{"learner"}, roles) // test grant hot region scheduler config checkSchedulerCommand([]string{"-u", pdAddr, "scheduler", "add", "grant-hot-region-scheduler", "1", "1,2,3"}, map[string]bool{ @@ -266,30 +249,30 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { "store-leader-id": float64(1), } mustExec([]string{"-u", pdAddr, "scheduler", "config", "grant-hot-region-scheduler"}, &conf3) - c.Assert(expected3, DeepEquals, conf3) + re.Equal(expected3, conf3) mustExec([]string{"-u", pdAddr, "scheduler", "config", "grant-hot-region-scheduler", "set", "2", "1,2,3"}, nil) expected3["store-leader-id"] = float64(2) mustExec([]string{"-u", pdAddr, "scheduler", "config", "grant-hot-region-scheduler"}, &conf3) - c.Assert(expected3, DeepEquals, conf3) + re.Equal(expected3, conf3) // test balance region config echo := mustExec([]string{"-u", pdAddr, "scheduler", "add", "balance-region-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "balance-region-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "balance-region-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsFalse) + re.NotContains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "add", "evict-leader-scheduler", "1"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "evict-leader-scheduler-1"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "evict-leader-scheduler-1"}, nil) - c.Assert(strings.Contains(echo, "404"), IsTrue) + re.Contains(echo, "404") // test hot region config echo = mustExec([]string{"-u", pdAddr, "scheduler", "config", "evict-leader-scheduler"}, nil) - c.Assert(strings.Contains(echo, "[404] scheduler not found"), IsTrue) + re.Contains(echo, "[404] scheduler not found") expected1 := map[string]interface{}{ "min-hot-byte-rate": float64(100), "min-hot-key-rate": float64(10), @@ -312,55 +295,55 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { } var conf map[string]interface{} mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "list"}, &conf) - c.Assert(conf, DeepEquals, expected1) + re.Equal(expected1, conf) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "show"}, &conf) - c.Assert(conf, DeepEquals, expected1) + re.Equal(expected1, conf) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "src-tolerance-ratio", "1.02"}, nil) expected1["src-tolerance-ratio"] = 1.02 var conf1 map[string]interface{} mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "byte,key"}, nil) expected1["read-priorities"] = []interface{}{"byte", "key"} mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key,byte"}, nil) expected1["read-priorities"] = []interface{}{"key", "byte"} mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "foo,bar"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", ""}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key,key"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "byte,byte"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key,key,byte"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) // write-priorities is divided into write-leader-priorities and write-peer-priorities mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "write-priorities", "key,byte"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "forbid-rw-type", "read"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) // test compatibility for _, store := range stores { version := versioninfo.HotScheduleWithQuery store.Version = versioninfo.MinSupportedVersion(version).String() - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) } conf["read-priorities"] = []interface{}{"query", "byte"} @@ -368,32 +351,32 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { // cannot set qps as write-peer-priorities mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "write-peer-priorities", "query,byte"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) // test remove and add mustExec([]string{"-u", pdAddr, "scheduler", "remove", "balance-hot-region-scheduler"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "add", "balance-hot-region-scheduler"}, nil) - c.Assert(conf1, DeepEquals, expected1) + re.Equal(expected1, conf1) // test balance leader config conf = make(map[string]interface{}) conf1 = make(map[string]interface{}) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-leader-scheduler", "show"}, &conf) - c.Assert(conf["batch"], Equals, 4.) + re.Equal(4., conf["batch"]) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-leader-scheduler", "set", "batch", "3"}, nil) mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-leader-scheduler"}, &conf1) - c.Assert(conf1["batch"], Equals, 3.) + re.Equal(3., conf1["batch"]) echo = mustExec([]string{"-u", pdAddr, "scheduler", "add", "balance-leader-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsFalse) + re.NotContains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "balance-leader-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "balance-leader-scheduler"}, nil) - c.Assert(strings.Contains(echo, "404"), IsTrue) - c.Assert(strings.Contains(echo, "PD:scheduler:ErrSchedulerNotFound]scheduler not found"), IsTrue) + re.Contains(echo, "404") + re.Contains(echo, "PD:scheduler:ErrSchedulerNotFound]scheduler not found") echo = mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-leader-scheduler"}, nil) - c.Assert(strings.Contains(echo, "404"), IsTrue) - c.Assert(strings.Contains(echo, "scheduler not found"), IsTrue) + re.Contains(echo, "404") + re.Contains(echo, "scheduler not found") echo = mustExec([]string{"-u", pdAddr, "scheduler", "add", "balance-leader-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") // test show scheduler with paused and disabled status. checkSchedulerWithStatusCommand := func(args []string, status string, expected []string) { @@ -402,7 +385,7 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { } var schedulers []string mustExec([]string{"-u", pdAddr, "scheduler", "show", "--status", status}, &schedulers) - c.Assert(schedulers, DeepEquals, expected) + re.Equal(expected, schedulers) } mustUsage([]string{"-u", pdAddr, "scheduler", "pause", "balance-leader-scheduler"}) @@ -417,26 +400,26 @@ func (s *schedulerTestSuite) TestScheduler(c *C) { // set label scheduler to disabled manually. echo = mustExec([]string{"-u", pdAddr, "scheduler", "add", "label-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") cfg := leaderServer.GetServer().GetScheduleConfig() origin := cfg.Schedulers cfg.Schedulers = config.SchedulerConfigs{{Type: "label", Disable: true}} err = leaderServer.GetServer().SetScheduleConfig(*cfg) - c.Assert(err, IsNil) + re.NoError(err) checkSchedulerWithStatusCommand(nil, "disabled", []string{"label-scheduler"}) // reset Schedulers in ScheduleConfig cfg.Schedulers = origin err = leaderServer.GetServer().SetScheduleConfig(*cfg) - c.Assert(err, IsNil) + re.NoError(err) checkSchedulerWithStatusCommand(nil, "disabled", nil) // test split bucket scheduler echo = mustExec([]string{"-u", pdAddr, "scheduler", "config", "split-bucket-scheduler"}, nil) - c.Assert(strings.Contains(echo, "\"degree\": 3"), IsTrue) + re.Contains(echo, "\"degree\": 3") echo = mustExec([]string{"-u", pdAddr, "scheduler", "config", "split-bucket-scheduler", "set", "degree", "10"}, nil) - c.Assert(strings.Contains(echo, "Success"), IsTrue) + re.Contains(echo, "Success") echo = mustExec([]string{"-u", pdAddr, "scheduler", "config", "split-bucket-scheduler"}, nil) - c.Assert(strings.Contains(echo, "\"degree\": 10"), IsTrue) + re.Contains(echo, "\"degree\": 10") echo = mustExec([]string{"-u", pdAddr, "scheduler", "remove", "split-bucket-scheduler"}, nil) - c.Assert(strings.Contains(echo, "Success!"), IsTrue) + re.Contains(echo, "Success!") } diff --git a/tests/pdctl/store/store_test.go b/tests/pdctl/store/store_test.go index a43d70722e8..c2c9420d01a 100644 --- a/tests/pdctl/store/store_test.go +++ b/tests/pdctl/store/store_test.go @@ -21,8 +21,8 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/core/storelimit" @@ -32,21 +32,14 @@ import ( cmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&storeTestSuite{}) - -type storeTestSuite struct{} - -func (s *storeTestSuite) TestStore(c *C) { +func TestStore(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := cmd.GetRootCmd() @@ -88,180 +81,189 @@ func (s *storeTestSuite) TestStore(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store.Store.Store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store.Store.Store) } defer cluster.Destroy() // store command args := []string{"-u", pdAddr, "store"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storesInfo := new(api.StoresInfo) - c.Assert(json.Unmarshal(output, &storesInfo), IsNil) - pdctl.CheckStoresInfo(c, storesInfo.Stores, stores[:2]) + re.NoError(json.Unmarshal(output, &storesInfo)) + + pdctl.CheckStoresInfo(re, storesInfo.Stores, stores[:2]) // store --state= command args = []string{"-u", pdAddr, "store", "--state", "Up,Tombstone"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "\"state\":"), Equals, false) + re.NoError(err) + re.Equal(false, strings.Contains(string(output), "\"state\":")) storesInfo = new(api.StoresInfo) - c.Assert(json.Unmarshal(output, &storesInfo), IsNil) - pdctl.CheckStoresInfo(c, storesInfo.Stores, stores) + re.NoError(json.Unmarshal(output, &storesInfo)) + + pdctl.CheckStoresInfo(re, storesInfo.Stores, stores) // store command args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storeInfo := new(api.StoreInfo) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) - pdctl.CheckStoresInfo(c, []*api.StoreInfo{storeInfo}, stores[:1]) + re.NoError(json.Unmarshal(output, &storeInfo)) + + pdctl.CheckStoresInfo(re, []*api.StoreInfo{storeInfo}, stores[:1]) // store label [ ]... [flags] command - c.Assert(storeInfo.Store.Labels, IsNil) + re.Nil(storeInfo.Store.Labels) + args = []string{"-u", pdAddr, "store", "label", "1", "zone", "cn"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) + re.NoError(err) + re.NoError(json.Unmarshal(output, &storeInfo)) + label := storeInfo.Store.Labels[0] - c.Assert(label.Key, Equals, "zone") - c.Assert(label.Value, Equals, "cn") + re.Equal("zone", label.Key) + re.Equal("cn", label.Value) // store label ... command args = []string{"-u", pdAddr, "store", "label", "1", "zone", "us", "language", "English"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) + re.NoError(err) + re.NoError(json.Unmarshal(output, &storeInfo)) + label0 := storeInfo.Store.Labels[0] - c.Assert(label0.Key, Equals, "zone") - c.Assert(label0.Value, Equals, "us") + re.Equal("zone", label0.Key) + re.Equal("us", label0.Value) label1 := storeInfo.Store.Labels[1] - c.Assert(label1.Key, Equals, "language") - c.Assert(label1.Value, Equals, "English") + re.Equal("language", label1.Key) + re.Equal("English", label1.Value) // store label ... -f command args = []string{"-u", pdAddr, "store", "label", "1", "zone", "uk", "-f"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) + re.NoError(err) + re.NoError(json.Unmarshal(output, &storeInfo)) + label0 = storeInfo.Store.Labels[0] - c.Assert(label0.Key, Equals, "zone") - c.Assert(label0.Value, Equals, "uk") - c.Assert(storeInfo.Store.Labels, HasLen, 1) + re.Equal("zone", label0.Key) + re.Equal("uk", label0.Value) + re.Len(storeInfo.Store.Labels, 1) // store weight command - c.Assert(storeInfo.Status.LeaderWeight, Equals, float64(1)) - c.Assert(storeInfo.Status.RegionWeight, Equals, float64(1)) + re.Equal(float64(1), storeInfo.Status.LeaderWeight) + re.Equal(float64(1), storeInfo.Status.RegionWeight) args = []string{"-u", pdAddr, "store", "weight", "1", "5", "10"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) - c.Assert(storeInfo.Status.LeaderWeight, Equals, float64(5)) - c.Assert(storeInfo.Status.RegionWeight, Equals, float64(10)) + re.NoError(err) + re.NoError(json.Unmarshal(output, &storeInfo)) + + re.Equal(float64(5), storeInfo.Status.LeaderWeight) + re.Equal(float64(10), storeInfo.Status.RegionWeight) // store limit args = []string{"-u", pdAddr, "store", "limit", "1", "10"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) limit := leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.AddPeer) - c.Assert(limit, Equals, float64(10)) + re.Equal(float64(10), limit) limit = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) - c.Assert(limit, Equals, float64(10)) + re.Equal(float64(10), limit) // store limit args = []string{"-u", pdAddr, "store", "limit", "1", "5", "remove-peer"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) limit = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) - c.Assert(limit, Equals, float64(5)) + re.Equal(float64(5), limit) limit = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.AddPeer) - c.Assert(limit, Equals, float64(10)) + re.Equal(float64(10), limit) // store limit all args = []string{"-u", pdAddr, "store", "limit", "all", "20"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) limit1 := leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.AddPeer) limit2 := leaderServer.GetRaftCluster().GetStoreLimitByType(2, storelimit.AddPeer) limit3 := leaderServer.GetRaftCluster().GetStoreLimitByType(3, storelimit.AddPeer) - c.Assert(limit1, Equals, float64(20)) - c.Assert(limit2, Equals, float64(20)) - c.Assert(limit3, Equals, float64(20)) + re.Equal(float64(20), limit1) + re.Equal(float64(20), limit2) + re.Equal(float64(20), limit3) limit1 = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) limit2 = leaderServer.GetRaftCluster().GetStoreLimitByType(2, storelimit.RemovePeer) limit3 = leaderServer.GetRaftCluster().GetStoreLimitByType(3, storelimit.RemovePeer) - c.Assert(limit1, Equals, float64(20)) - c.Assert(limit2, Equals, float64(20)) - c.Assert(limit3, Equals, float64(20)) + re.Equal(float64(20), limit1) + re.Equal(float64(20), limit2) + re.Equal(float64(20), limit3) + + re.NoError(leaderServer.Stop()) + re.NoError(leaderServer.Run()) - c.Assert(leaderServer.Stop(), IsNil) - c.Assert(leaderServer.Run(), IsNil) cluster.WaitLeader() storesLimit := leaderServer.GetPersistOptions().GetAllStoresLimit() - c.Assert(storesLimit[1].AddPeer, Equals, float64(20)) - c.Assert(storesLimit[1].RemovePeer, Equals, float64(20)) + re.Equal(float64(20), storesLimit[1].AddPeer) + re.Equal(float64(20), storesLimit[1].RemovePeer) // store limit all args = []string{"-u", pdAddr, "store", "limit", "all", "25", "remove-peer"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) limit1 = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) limit3 = leaderServer.GetRaftCluster().GetStoreLimitByType(3, storelimit.RemovePeer) - c.Assert(limit1, Equals, float64(25)) - c.Assert(limit3, Equals, float64(25)) + re.Equal(float64(25), limit1) + re.Equal(float64(25), limit3) limit2 = leaderServer.GetRaftCluster().GetStoreLimitByType(2, storelimit.RemovePeer) - c.Assert(limit2, Equals, float64(25)) + re.Equal(float64(25), limit2) // store limit all args = []string{"-u", pdAddr, "store", "limit", "all", "zone", "uk", "20", "remove-peer"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) limit1 = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) - c.Assert(limit1, Equals, float64(20)) + re.Equal(float64(20), limit1) // store limit all 0 is invalid args = []string{"-u", pdAddr, "store", "limit", "all", "0"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "rate should be a number that > 0"), IsTrue) + re.NoError(err) + re.Contains(string(output), "rate should be a number that > 0") // store limit args = []string{"-u", pdAddr, "store", "limit"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) allAddPeerLimit := make(map[string]map[string]interface{}) json.Unmarshal(output, &allAddPeerLimit) - c.Assert(allAddPeerLimit["1"]["add-peer"].(float64), Equals, float64(20)) - c.Assert(allAddPeerLimit["3"]["add-peer"].(float64), Equals, float64(20)) + re.Equal(float64(20), allAddPeerLimit["1"]["add-peer"].(float64)) + re.Equal(float64(20), allAddPeerLimit["3"]["add-peer"].(float64)) _, ok := allAddPeerLimit["2"]["add-peer"] - c.Assert(ok, IsFalse) + re.False(ok) args = []string{"-u", pdAddr, "store", "limit", "remove-peer"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) allRemovePeerLimit := make(map[string]map[string]interface{}) json.Unmarshal(output, &allRemovePeerLimit) - c.Assert(allRemovePeerLimit["1"]["remove-peer"].(float64), Equals, float64(20)) - c.Assert(allRemovePeerLimit["3"]["remove-peer"].(float64), Equals, float64(25)) + re.Equal(float64(20), allRemovePeerLimit["1"]["remove-peer"].(float64)) + re.Equal(float64(25), allRemovePeerLimit["3"]["remove-peer"].(float64)) _, ok = allRemovePeerLimit["2"]["add-peer"] - c.Assert(ok, IsFalse) + re.False(ok) // put enough stores for replica. for id := 1000; id <= 1005; id++ { @@ -271,172 +273,179 @@ func (s *storeTestSuite) TestStore(c *C) { NodeState: metapb.NodeState_Serving, LastHeartbeat: time.Now().UnixNano(), } - pdctl.MustPutStore(c, leaderServer.GetServer(), store2) + pdctl.MustPutStore(re, leaderServer.GetServer(), store2) } // store delete command storeInfo.Store.State = metapb.StoreState(metapb.StoreState_value[storeInfo.Store.StateName]) - c.Assert(storeInfo.Store.State, Equals, metapb.StoreState_Up) + re.Equal(metapb.StoreState_Up, storeInfo.Store.State) args = []string{"-u", pdAddr, "store", "delete", "1"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storeInfo = new(api.StoreInfo) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) + re.NoError(json.Unmarshal(output, &storeInfo)) + storeInfo.Store.State = metapb.StoreState(metapb.StoreState_value[storeInfo.Store.StateName]) - c.Assert(storeInfo.Store.State, Equals, metapb.StoreState_Offline) + re.Equal(metapb.StoreState_Offline, storeInfo.Store.State) // store check status args = []string{"-u", pdAddr, "store", "check", "Offline"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "\"id\": 1,"), IsTrue) + re.NoError(err) + re.Contains(string(output), "\"id\": 1,") args = []string{"-u", pdAddr, "store", "check", "Tombstone"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "\"id\": 2,"), IsTrue) + re.NoError(err) + re.Contains(string(output), "\"id\": 2,") args = []string{"-u", pdAddr, "store", "check", "Up"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "\"id\": 3,"), IsTrue) + re.NoError(err) + re.Contains(string(output), "\"id\": 3,") args = []string{"-u", pdAddr, "store", "check", "Invalid_State"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "Unknown state: Invalid_state"), IsTrue) + re.NoError(err) + re.Contains(string(output), "Unknown state: Invalid_state") // store cancel-delete command limit = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) - c.Assert(limit, Equals, storelimit.Unlimited) + re.Equal(storelimit.Unlimited, limit) args = []string{"-u", pdAddr, "store", "cancel-delete", "1"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "1"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storeInfo = new(api.StoreInfo) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) - c.Assert(storeInfo.Store.State, Equals, metapb.StoreState_Up) + re.NoError(json.Unmarshal(output, &storeInfo)) + + re.Equal(metapb.StoreState_Up, storeInfo.Store.State) limit = leaderServer.GetRaftCluster().GetStoreLimitByType(1, storelimit.RemovePeer) - c.Assert(limit, Equals, 20.0) + re.Equal(20.0, limit) // store delete addr
args = []string{"-u", pdAddr, "store", "delete", "addr", "tikv3"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(string(output), Equals, "Success!\n") - c.Assert(err, IsNil) + re.Equal("Success!\n", string(output)) + re.NoError(err) args = []string{"-u", pdAddr, "store", "3"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storeInfo = new(api.StoreInfo) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) + re.NoError(json.Unmarshal(output, &storeInfo)) + storeInfo.Store.State = metapb.StoreState(metapb.StoreState_value[storeInfo.Store.StateName]) - c.Assert(storeInfo.Store.State, Equals, metapb.StoreState_Offline) + re.Equal(metapb.StoreState_Offline, storeInfo.Store.State) // store cancel-delete addr
limit = leaderServer.GetRaftCluster().GetStoreLimitByType(3, storelimit.RemovePeer) - c.Assert(limit, Equals, storelimit.Unlimited) + re.Equal(storelimit.Unlimited, limit) args = []string{"-u", pdAddr, "store", "cancel-delete", "addr", "tikv3"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(string(output), Equals, "Success!\n") - c.Assert(err, IsNil) + re.Equal("Success!\n", string(output)) + re.NoError(err) args = []string{"-u", pdAddr, "store", "3"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storeInfo = new(api.StoreInfo) - c.Assert(json.Unmarshal(output, &storeInfo), IsNil) - c.Assert(storeInfo.Store.State, Equals, metapb.StoreState_Up) + re.NoError(json.Unmarshal(output, &storeInfo)) + + re.Equal(metapb.StoreState_Up, storeInfo.Store.State) limit = leaderServer.GetRaftCluster().GetStoreLimitByType(3, storelimit.RemovePeer) - c.Assert(limit, Equals, 25.0) + re.Equal(25.0, limit) // store remove-tombstone args = []string{"-u", pdAddr, "store", "check", "Tombstone"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storesInfo = new(api.StoresInfo) - c.Assert(json.Unmarshal(output, &storesInfo), IsNil) - c.Assert(storesInfo.Count, Equals, 1) + re.NoError(json.Unmarshal(output, &storesInfo)) + + re.Equal(1, storesInfo.Count) args = []string{"-u", pdAddr, "store", "remove-tombstone"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "check", "Tombstone"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) storesInfo = new(api.StoresInfo) - c.Assert(json.Unmarshal(output, &storesInfo), IsNil) - c.Assert(storesInfo.Count, Equals, 0) + re.NoError(json.Unmarshal(output, &storesInfo)) + + re.Equal(0, storesInfo.Count) // It should be called after stores remove-tombstone. args = []string{"-u", pdAddr, "stores", "show", "limit"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "PANIC"), IsFalse) + re.NoError(err) + re.NotContains(string(output), "PANIC") args = []string{"-u", pdAddr, "stores", "show", "limit", "remove-peer"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "PANIC"), IsFalse) + re.NoError(err) + re.NotContains(string(output), "PANIC") args = []string{"-u", pdAddr, "stores", "show", "limit", "add-peer"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "PANIC"), IsFalse) + re.NoError(err) + re.NotContains(string(output), "PANIC") // store limit-scene args = []string{"-u", pdAddr, "store", "limit-scene"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) scene := &storelimit.Scene{} err = json.Unmarshal(output, scene) - c.Assert(err, IsNil) - c.Assert(scene, DeepEquals, storelimit.DefaultScene(storelimit.AddPeer)) + re.NoError(err) + re.Equal(storelimit.DefaultScene(storelimit.AddPeer), scene) // store limit-scene args = []string{"-u", pdAddr, "store", "limit-scene", "idle", "200"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "limit-scene"} scene = &storelimit.Scene{} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) err = json.Unmarshal(output, scene) - c.Assert(err, IsNil) - c.Assert(scene.Idle, Equals, 200) + re.NoError(err) + re.Equal(200, scene.Idle) // store limit-scene args = []string{"-u", pdAddr, "store", "limit-scene", "idle", "100", "remove-peer"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "store", "limit-scene", "remove-peer"} scene = &storelimit.Scene{} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) err = json.Unmarshal(output, scene) - c.Assert(err, IsNil) - c.Assert(scene.Idle, Equals, 100) + re.NoError(err) + re.Equal(100, scene.Idle) // store limit all 201 is invalid for all args = []string{"-u", pdAddr, "store", "limit", "all", "201"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "rate should less than"), IsTrue) + re.NoError(err) + re.Contains(string(output), "rate should less than") // store limit all 201 is invalid for label args = []string{"-u", pdAddr, "store", "limit", "all", "engine", "key", "201", "add-peer"} output, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - c.Assert(strings.Contains(string(output), "rate should less than"), IsTrue) + re.NoError(err) + re.Contains(string(output), "rate should less than") } // https://github.com/tikv/pd/issues/5024 -func (s *storeTestSuite) TestTombstoneStore(c *C) { +func TestTombstoneStore(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pdAddr := cluster.GetConfig().GetClientURL() cmd := cmd.GetRootCmd() @@ -478,18 +487,19 @@ func (s *storeTestSuite) TestTombstoneStore(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store.Store.Store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store.Store.Store) } defer cluster.Destroy() - pdctl.MustPutRegion(c, cluster, 1, 2, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 2, 3, []byte("b"), []byte("c"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 2, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 3, []byte("b"), []byte("c"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // store remove-tombstone args := []string{"-u", pdAddr, "store", "remove-tombstone"} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) message := string(output) - c.Assert(strings.Contains(message, "2") && strings.Contains(message, "3"), IsTrue) + re.Contains(message, "2") + re.Contains(message, "3") } diff --git a/tests/pdctl/tso/tso_test.go b/tests/pdctl/tso/tso_test.go index 1d2cdb77dc0..f6295424ddc 100644 --- a/tests/pdctl/tso/tso_test.go +++ b/tests/pdctl/tso/tso_test.go @@ -20,20 +20,13 @@ import ( "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&tsoTestSuite{}) - -type tsoTestSuite struct{} - -func (s *tsoTestSuite) TestTSO(c *C) { +func TestTSO(t *testing.T) { + re := require.New(t) cmd := pdctlCmd.GetRootCmd() const ( @@ -45,13 +38,12 @@ func (s *tsoTestSuite) TestTSO(c *C) { ts := "395181938313123110" args := []string{"-u", "127.0.0.1", "tso", ts} output, err := pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) - t, e := strconv.ParseUint(ts, 10, 64) - c.Assert(e, IsNil) - c.Assert(err, IsNil) - logicalTime := t & logicalBits - physical := t >> physicalShiftBits + re.NoError(err) + tsTime, err := strconv.ParseUint(ts, 10, 64) + re.NoError(err) + logicalTime := tsTime & logicalBits + physical := tsTime >> physicalShiftBits physicalTime := time.Unix(int64(physical/1000), int64(physical%1000)*time.Millisecond.Nanoseconds()) str := fmt.Sprintln("system: ", physicalTime) + fmt.Sprintln("logic: ", logicalTime) - c.Assert(str, Equals, string(output)) + re.Equal(string(output), str) } diff --git a/tests/pdctl/unsafe/unsafe_operation_test.go b/tests/pdctl/unsafe/unsafe_operation_test.go index 4bbe2309dc3..1e4e3468225 100644 --- a/tests/pdctl/unsafe/unsafe_operation_test.go +++ b/tests/pdctl/unsafe/unsafe_operation_test.go @@ -18,47 +18,40 @@ import ( "context" "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&unsafeOperationTestSuite{}) - -type unsafeOperationTestSuite struct{} - -func (s *unsafeOperationTestSuite) TestRemoveFailedStores(c *C) { +func TestRemoveFailedStores(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() err = cluster.GetServer(cluster.GetLeader()).BootstrapCluster() - c.Assert(err, IsNil) + re.NoError(err) pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() defer cluster.Destroy() args := []string{"-u", pdAddr, "unsafe", "remove-failed-stores", "1,2,3"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "unsafe", "remove-failed-stores", "1,2,3", "--timeout", "3600"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "unsafe", "remove-failed-stores", "1,2,3", "--timeout", "abc"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, Not(IsNil)) + re.Error(err) args = []string{"-u", pdAddr, "unsafe", "remove-failed-stores", "show"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) args = []string{"-u", pdAddr, "unsafe", "remove-failed-stores", "history"} _, err = pdctl.ExecuteCommand(cmd, args...) - c.Assert(err, IsNil) + re.NoError(err) } diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index 9e6248f9cec..e462adae2ba 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -542,12 +542,12 @@ func (s *testProgressSuite) TestRemovingProgress(c *C) { } for _, store := range stores { - pdctl.MustPutStore(c, leader.GetServer(), store) + pdctl.MustPutStoreWithCheck(c, leader.GetServer(), store) } - pdctl.MustPutRegion(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) - pdctl.MustPutRegion(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(30)) - pdctl.MustPutRegion(c, cluster, 1002, 1, []byte("e"), []byte("f"), core.SetApproximateSize(50)) - pdctl.MustPutRegion(c, cluster, 1003, 2, []byte("g"), []byte("h"), core.SetApproximateSize(40)) + pdctl.MustPutRegionWithCheck(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) + pdctl.MustPutRegionWithCheck(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(30)) + pdctl.MustPutRegionWithCheck(c, cluster, 1002, 1, []byte("e"), []byte("f"), core.SetApproximateSize(50)) + pdctl.MustPutRegionWithCheck(c, cluster, 1003, 2, []byte("g"), []byte("h"), core.SetApproximateSize(40)) // no store removing output := sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusNotFound) @@ -569,8 +569,8 @@ func (s *testProgressSuite) TestRemovingProgress(c *C) { c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) // update size - pdctl.MustPutRegion(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) - pdctl.MustPutRegion(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) + pdctl.MustPutRegionWithCheck(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) + pdctl.MustPutRegionWithCheck(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) // is not prepared time.Sleep(2 * time.Second) @@ -675,10 +675,10 @@ func (s *testProgressSuite) TestPreparingProgress(c *C) { } for _, store := range stores { - pdctl.MustPutStore(c, leader.GetServer(), store) + pdctl.MustPutStoreWithCheck(c, leader.GetServer(), store) } for i := 0; i < 100; i++ { - pdctl.MustPutRegion(c, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("p%d", i)), []byte(fmt.Sprintf("%d", i+1)), core.SetApproximateSize(10)) + pdctl.MustPutRegionWithCheck(c, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("p%d", i)), []byte(fmt.Sprintf("%d", i+1)), core.SetApproximateSize(10)) } // no store preparing output := sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) @@ -705,8 +705,8 @@ func (s *testProgressSuite) TestPreparingProgress(c *C) { c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) // update size - pdctl.MustPutRegion(c, cluster, 1000, 4, []byte(fmt.Sprintf("%d", 1000)), []byte(fmt.Sprintf("%d", 1001)), core.SetApproximateSize(10)) - pdctl.MustPutRegion(c, cluster, 1001, 5, []byte(fmt.Sprintf("%d", 1001)), []byte(fmt.Sprintf("%d", 1002)), core.SetApproximateSize(40)) + pdctl.MustPutRegionWithCheck(c, cluster, 1000, 4, []byte(fmt.Sprintf("%d", 1000)), []byte(fmt.Sprintf("%d", 1001)), core.SetApproximateSize(10)) + pdctl.MustPutRegionWithCheck(c, cluster, 1001, 5, []byte(fmt.Sprintf("%d", 1001)), []byte(fmt.Sprintf("%d", 1002)), core.SetApproximateSize(40)) time.Sleep(2 * time.Second) output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) c.Assert(json.Unmarshal(output, &p), IsNil) diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index 5a11f8c23c4..662f128dd1b 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -69,14 +69,14 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { leaderServer := cluster.GetServer(cluster.GetLeader()) c.Assert(leaderServer.BootstrapCluster(), IsNil) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegion(c, cluster, 3, 1, []byte("e"), []byte("f")) - pdctl.MustPutRegion(c, cluster, 4, 2, []byte("g"), []byte("h")) + pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegionWithCheck(c, cluster, 3, 1, []byte("e"), []byte("f")) + pdctl.MustPutRegionWithCheck(c, cluster, 4, 2, []byte("g"), []byte("h")) storeStats := []*pdpb.StoreStats{ { StoreId: 1, @@ -172,11 +172,11 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C leaderServer := cluster.GetServer(cluster.GetLeader()) c.Assert(leaderServer.BootstrapCluster(), IsNil) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) @@ -196,7 +196,7 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C schedule.HotRegionsReservedDays = 0 leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) - pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) time.Sleep(10 * interval) endTime = time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() @@ -263,11 +263,11 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c leaderServer := cluster.GetServer(cluster.GetLeader()) c.Assert(leaderServer.BootstrapCluster(), IsNil) for _, store := range stores { - pdctl.MustPutStore(c, leaderServer.GetServer(), store) + pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) @@ -287,7 +287,7 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c schedule.HotRegionsWriteInterval.Duration = 20 * interval leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) - pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) time.Sleep(10 * interval) endTime = time.Now().UnixNano() / int64(time.Millisecond) // it cant get new hot region because wait time smaller than hot region write interval From 5d744d356a040a6bd21ce4eac0c0d5a586862b28 Mon Sep 17 00:00:00 2001 From: Shirly Date: Wed, 15 Jun 2022 15:52:33 +0800 Subject: [PATCH 19/35] server/grpc_service: make the lock for `UpdateServiceGCSafePoint` smaller (#5128) close tikv/pd#5019 Signed-off-by: shirly Co-authored-by: Ti Chi Robot --- server/gc/safepoint.go | 63 +++++++++++++++++--------- server/gc/safepoint_test.go | 88 ++++++++++++++++++++++++++++++++++++- server/grpc_service.go | 34 +++----------- server/server.go | 6 +-- 4 files changed, 135 insertions(+), 56 deletions(-) diff --git a/server/gc/safepoint.go b/server/gc/safepoint.go index 3cec08d8951..533ae338580 100644 --- a/server/gc/safepoint.go +++ b/server/gc/safepoint.go @@ -15,42 +15,35 @@ package gc import ( + "math" + "time" + "github.com/tikv/pd/pkg/syncutil" "github.com/tikv/pd/server/storage/endpoint" ) -// SafePointManager is the manager for safePoint of GC and services +// SafePointManager is the manager for safePoint of GC and services. type SafePointManager struct { - *gcSafePointManager - // TODO add ServiceSafepointManager -} - -// NewSafepointManager creates a SafePointManager of GC and services -func NewSafepointManager(store endpoint.GCSafePointStorage) *SafePointManager { - return &SafePointManager{ - newGCSafePointManager(store), - } + gcLock syncutil.Mutex + serviceGCLock syncutil.Mutex + store endpoint.GCSafePointStorage } -type gcSafePointManager struct { - syncutil.Mutex - store endpoint.GCSafePointStorage -} - -func newGCSafePointManager(store endpoint.GCSafePointStorage) *gcSafePointManager { - return &gcSafePointManager{store: store} +// NewSafePointManager creates a SafePointManager of GC and services. +func NewSafePointManager(store endpoint.GCSafePointStorage) *SafePointManager { + return &SafePointManager{store: store} } // LoadGCSafePoint loads current GC safe point from storage. -func (manager *gcSafePointManager) LoadGCSafePoint() (uint64, error) { +func (manager *SafePointManager) LoadGCSafePoint() (uint64, error) { return manager.store.LoadGCSafePoint() } // UpdateGCSafePoint updates the safepoint if it is greater than the previous one // it returns the old safepoint in the storage. -func (manager *gcSafePointManager) UpdateGCSafePoint(newSafePoint uint64) (oldSafePoint uint64, err error) { - manager.Lock() - defer manager.Unlock() +func (manager *SafePointManager) UpdateGCSafePoint(newSafePoint uint64) (oldSafePoint uint64, err error) { + manager.gcLock.Lock() + defer manager.gcLock.Unlock() // TODO: cache the safepoint in the storage. oldSafePoint, err = manager.store.LoadGCSafePoint() if err != nil { @@ -62,3 +55,31 @@ func (manager *gcSafePointManager) UpdateGCSafePoint(newSafePoint uint64) (oldSa err = manager.store.SaveGCSafePoint(newSafePoint) return } + +// UpdateServiceGCSafePoint update the safepoint for a specific service. +func (manager *SafePointManager) UpdateServiceGCSafePoint(serviceID string, newSafePoint uint64, ttl int64, now time.Time) (minServiceSafePoint *endpoint.ServiceSafePoint, updated bool, err error) { + manager.serviceGCLock.Lock() + defer manager.serviceGCLock.Unlock() + minServiceSafePoint, err = manager.store.LoadMinServiceGCSafePoint(now) + if err != nil || ttl <= 0 || newSafePoint < minServiceSafePoint.SafePoint { + return minServiceSafePoint, false, err + } + + ssp := &endpoint.ServiceSafePoint{ + ServiceID: serviceID, + ExpiredAt: now.Unix() + ttl, + SafePoint: newSafePoint, + } + if math.MaxInt64-now.Unix() <= ttl { + ssp.ExpiredAt = math.MaxInt64 + } + if err := manager.store.SaveServiceGCSafePoint(ssp); err != nil { + return nil, false, err + } + + // If the min safePoint is updated, load the next one. + if serviceID == minServiceSafePoint.ServiceID { + minServiceSafePoint, err = manager.store.LoadMinServiceGCSafePoint(now) + } + return minServiceSafePoint, true, err +} diff --git a/server/gc/safepoint_test.go b/server/gc/safepoint_test.go index 2af82ba7145..aebf8033dea 100644 --- a/server/gc/safepoint_test.go +++ b/server/gc/safepoint_test.go @@ -15,8 +15,10 @@ package gc import ( + "math" "sync" "testing" + "time" "github.com/stretchr/testify/require" "github.com/tikv/pd/server/storage/endpoint" @@ -28,7 +30,7 @@ func newGCStorage() endpoint.GCSafePointStorage { } func TestGCSafePointUpdateSequentially(t *testing.T) { - gcSafePointManager := newGCSafePointManager(newGCStorage()) + gcSafePointManager := NewSafePointManager(newGCStorage()) re := require.New(t) curSafePoint := uint64(0) // update gc safePoint with asc value. @@ -57,7 +59,7 @@ func TestGCSafePointUpdateSequentially(t *testing.T) { } func TestGCSafePointUpdateCurrently(t *testing.T) { - gcSafePointManager := newGCSafePointManager(newGCStorage()) + gcSafePointManager := NewSafePointManager(newGCStorage()) maxSafePoint := uint64(1000) wg := sync.WaitGroup{} re := require.New(t) @@ -78,3 +80,85 @@ func TestGCSafePointUpdateCurrently(t *testing.T) { re.NoError(err) re.Equal(maxSafePoint, safePoint) } + +func TestServiceGCSafePointUpdate(t *testing.T) { + re := require.New(t) + manager := NewSafePointManager(newGCStorage()) + gcworkerServiceID := "gc_worker" + cdcServiceID := "cdc" + brServiceID := "br" + cdcServiceSafePoint := uint64(10) + gcWorkerSafePoint := uint64(8) + brSafePoint := uint64(15) + + wg := sync.WaitGroup{} + wg.Add(5) + // update the safepoint for cdc to 10 should success + go func() { + defer wg.Done() + min, updated, err := manager.UpdateServiceGCSafePoint(cdcServiceID, cdcServiceSafePoint, 10000, time.Now()) + re.NoError(err) + re.True(updated) + // the service will init the service safepoint to 0(<10 for cdc) for gc_worker. + re.Equal(gcworkerServiceID, min.ServiceID) + }() + + // update the safepoint for br to 15 should success + go func() { + defer wg.Done() + min, updated, err := manager.UpdateServiceGCSafePoint(brServiceID, brSafePoint, 10000, time.Now()) + re.NoError(err) + re.True(updated) + // the service will init the service safepoint to 0(<10 for cdc) for gc_worker. + re.Equal(gcworkerServiceID, min.ServiceID) + }() + + // update safepoint to 8 for gc_woker should be success + go func() { + defer wg.Done() + // update with valid ttl for gc_worker should be success. + min, updated, _ := manager.UpdateServiceGCSafePoint(gcworkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now()) + re.True(updated) + // the current min safepoint should be 8 for gc_worker(cdc 10) + re.Equal(gcWorkerSafePoint, min.SafePoint) + re.Equal(gcworkerServiceID, min.ServiceID) + }() + + go func() { + defer wg.Done() + // update safepoint of gc_worker's service with ttl not infinity should be failed. + _, updated, err := manager.UpdateServiceGCSafePoint(gcworkerServiceID, 10000, 10, time.Now()) + re.Error(err) + re.False(updated) + }() + + // update safepoint with negative ttl should be failed. + go func() { + defer wg.Done() + brTTL := int64(-100) + _, updated, err := manager.UpdateServiceGCSafePoint(brServiceID, uint64(10000), brTTL, time.Now()) + re.NoError(err) + re.False(updated) + }() + + wg.Wait() + // update safepoint to 15(>10 for cdc) for gc_worker + gcWorkerSafePoint = uint64(15) + min, updated, err := manager.UpdateServiceGCSafePoint(gcworkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now()) + re.NoError(err) + re.True(updated) + re.Equal(cdcServiceID, min.ServiceID) + re.Equal(cdcServiceSafePoint, min.SafePoint) + + // the value shouldn't be updated with current safepoint smaller than the min safepoint. + brTTL := int64(100) + brSafePoint = min.SafePoint - 5 + min, updated, err = manager.UpdateServiceGCSafePoint(brServiceID, brSafePoint, brTTL, time.Now()) + re.NoError(err) + re.False(updated) + + brSafePoint = min.SafePoint + 10 + _, updated, err = manager.UpdateServiceGCSafePoint(brServiceID, brSafePoint, brTTL, time.Now()) + re.NoError(err) + re.True(updated) +} diff --git a/server/grpc_service.go b/server/grpc_service.go index 53b74ba517d..c02e51ed510 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "io" - "math" "strconv" "sync/atomic" "time" @@ -1358,8 +1357,6 @@ func (s *GrpcServer) UpdateGCSafePoint(ctx context.Context, request *pdpb.Update // UpdateServiceGCSafePoint update the safepoint for specific service func (s *GrpcServer) UpdateServiceGCSafePoint(ctx context.Context, request *pdpb.UpdateServiceGCSafePointRequest) (*pdpb.UpdateServiceGCSafePointResponse, error) { - s.serviceSafePointLock.Lock() - defer s.serviceSafePointLock.Unlock() fn := func(ctx context.Context, client *grpc.ClientConn) (interface{}, error) { return pdpb.NewPDClient(client).UpdateServiceGCSafePoint(ctx, request) } @@ -1385,36 +1382,17 @@ func (s *GrpcServer) UpdateServiceGCSafePoint(ctx context.Context, request *pdpb return nil, err } now, _ := tsoutil.ParseTimestamp(nowTSO) - min, err := storage.LoadMinServiceGCSafePoint(now) + serviceID := string(request.ServiceId) + min, updated, err := s.gcSafePointManager.UpdateServiceGCSafePoint(serviceID, request.GetSafePoint(), request.GetTTL(), now) if err != nil { return nil, err } - - if request.TTL > 0 && request.SafePoint >= min.SafePoint { - ssp := &endpoint.ServiceSafePoint{ - ServiceID: string(request.ServiceId), - ExpiredAt: now.Unix() + request.TTL, - SafePoint: request.SafePoint, - } - if math.MaxInt64-now.Unix() <= request.TTL { - ssp.ExpiredAt = math.MaxInt64 - } - if err := storage.SaveServiceGCSafePoint(ssp); err != nil { - return nil, err - } + if updated { log.Info("update service GC safe point", - zap.String("service-id", ssp.ServiceID), - zap.Int64("expire-at", ssp.ExpiredAt), - zap.Uint64("safepoint", ssp.SafePoint)) - // If the min safepoint is updated, load the next one - if string(request.ServiceId) == min.ServiceID { - min, err = storage.LoadMinServiceGCSafePoint(now) - if err != nil { - return nil, err - } - } + zap.String("service-id", serviceID), + zap.Int64("expire-at", now.Unix()+request.GetTTL()), + zap.Uint64("safepoint", request.GetSafePoint())) } - return &pdpb.UpdateServiceGCSafePointResponse{ Header: s.header(), ServiceId: []byte(min.ServiceID), diff --git a/server/server.go b/server/server.go index b618f97aeed..c7902692552 100644 --- a/server/server.go +++ b/server/server.go @@ -46,7 +46,6 @@ import ( "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/logutil" "github.com/tikv/pd/pkg/ratelimit" - "github.com/tikv/pd/pkg/syncutil" "github.com/tikv/pd/pkg/systimemon" "github.com/tikv/pd/pkg/typeutil" "github.com/tikv/pd/server/cluster" @@ -146,9 +145,6 @@ type Server struct { startCallbacks []func() closeCallbacks []func() - // serviceSafePointLock is a lock for UpdateServiceGCSafePoint - serviceSafePointLock syncutil.Mutex - // hot region history info storeage hotRegionStorage *storage.HotRegionStorage // Store as map[string]*grpc.ClientConn @@ -404,7 +400,7 @@ func (s *Server) startServer(ctx context.Context) error { } defaultStorage := storage.NewStorageWithEtcdBackend(s.client, s.rootPath) s.storage = storage.NewCoreStorage(defaultStorage, regionStorage) - s.gcSafePointManager = gc.NewSafepointManager(s.storage) + s.gcSafePointManager = gc.NewSafePointManager(s.storage) s.basicCluster = core.NewBasicCluster() s.cluster = cluster.NewRaftCluster(ctx, s.clusterID, syncer.NewRegionSyncer(s), s.client, s.httpClient) s.hbStreams = hbstream.NewHeartbeatStreams(ctx, s.clusterID, s.cluster) From b58a7d5ae2a18216617d4ff70b29753583c99f14 Mon Sep 17 00:00:00 2001 From: Shirly Date: Wed, 15 Jun 2022 16:20:34 +0800 Subject: [PATCH 20/35] replica_strategy:speed up and reduce the complexity of selectStore to O(n) (#5144) close tikv/pd#5143 Signed-off-by: shirly Co-authored-by: Ti Chi Robot --- server/schedule/checker/replica_strategy.go | 11 +++-- server/schedule/filter/candidates.go | 49 ++++++++++++++------- server/schedule/filter/candidates_test.go | 19 +++++--- 3 files changed, 51 insertions(+), 28 deletions(-) diff --git a/server/schedule/checker/replica_strategy.go b/server/schedule/checker/replica_strategy.go index 5a249c46a8b..6ccad30a32d 100644 --- a/server/schedule/checker/replica_strategy.go +++ b/server/schedule/checker/replica_strategy.go @@ -72,12 +72,12 @@ func (s *ReplicaStrategy) SelectStoreToAdd(coLocationStores []*core.StoreInfo, e strictStateFilter := &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true} targetCandidate := filter.NewCandidates(s.cluster.GetStores()). FilterTarget(s.cluster.GetOpts(), filters...). - Sort(isolationComparer).Reverse().Top(isolationComparer). // greater isolation score is better - Sort(filter.RegionScoreComparer(s.cluster.GetOpts())) // less region score is better + KeepTheTopStores(isolationComparer, false) // greater isolation score is better if targetCandidate.Len() == 0 { return 0, false } - target := targetCandidate.FilterTarget(s.cluster.GetOpts(), strictStateFilter).PickFirst() // the filter does not ignore temp states + target := targetCandidate.FilterTarget(s.cluster.GetOpts(), strictStateFilter). + PickTheTopStore(filter.RegionScoreComparer(s.cluster.GetOpts()), true) // less region score is better if target == nil { return 0, true // filter by temporary states } @@ -124,9 +124,8 @@ func (s *ReplicaStrategy) SelectStoreToRemove(coLocationStores []*core.StoreInfo isolationComparer := filter.IsolationComparer(s.locationLabels, coLocationStores) source := filter.NewCandidates(coLocationStores). FilterSource(s.cluster.GetOpts(), &filter.StoreStateFilter{ActionScope: replicaCheckerName, MoveRegion: true}). - Sort(isolationComparer).Top(isolationComparer). - Sort(filter.RegionScoreComparer(s.cluster.GetOpts())).Reverse(). - PickFirst() + KeepTheTopStores(isolationComparer, true). + PickTheTopStore(filter.RegionScoreComparer(s.cluster.GetOpts()), false) if source == nil { log.Debug("no removable store", zap.Uint64("region-id", s.region.GetID())) return 0 diff --git a/server/schedule/filter/candidates.go b/server/schedule/filter/candidates.go index 969fec34d38..dcbe89710a8 100644 --- a/server/schedule/filter/candidates.go +++ b/server/schedule/filter/candidates.go @@ -51,32 +51,49 @@ func (c *StoreCandidates) Sort(less StoreComparer) *StoreCandidates { return c } -// Reverse reverses the candidate store list. -func (c *StoreCandidates) Reverse() *StoreCandidates { - for i := len(c.Stores)/2 - 1; i >= 0; i-- { - opp := len(c.Stores) - 1 - i - c.Stores[i], c.Stores[opp] = c.Stores[opp], c.Stores[i] - } - return c -} - // Shuffle reorders all candidates randomly. func (c *StoreCandidates) Shuffle() *StoreCandidates { rand.Shuffle(len(c.Stores), func(i, j int) { c.Stores[i], c.Stores[j] = c.Stores[j], c.Stores[i] }) return c } -// Top keeps all stores that have the same priority with the first store. -// The store list should be sorted before calling Top. -func (c *StoreCandidates) Top(less StoreComparer) *StoreCandidates { - var i int - for i < len(c.Stores) && less(c.Stores[0], c.Stores[i]) == 0 { - i++ +// KeepTheTopStores keeps the slice of the stores in the front order by asc. +func (c *StoreCandidates) KeepTheTopStores(cmp StoreComparer, asc bool) *StoreCandidates { + if len(c.Stores) <= 1 { + return c + } + topIdx := 0 + for idx := 1; idx < c.Len(); idx++ { + compare := cmp(c.Stores[topIdx], c.Stores[idx]) + if compare == 0 { + topIdx++ + } else if (compare > 0 && asc) || (!asc && compare < 0) { + topIdx = 0 + } else { + continue + } + c.Stores[idx], c.Stores[topIdx] = c.Stores[topIdx], c.Stores[idx] } - c.Stores = c.Stores[:i] + c.Stores = c.Stores[:topIdx+1] return c } +// PickTheTopStore returns the first store order by asc. +// It returns the min item when asc is true, returns the max item when asc is false. +func (c *StoreCandidates) PickTheTopStore(cmp StoreComparer, asc bool) *core.StoreInfo { + if len(c.Stores) == 0 { + return nil + } + topIdx := 0 + for idx := 1; idx < len(c.Stores); idx++ { + compare := cmp(c.Stores[topIdx], c.Stores[idx]) + if (compare > 0 && asc) || (!asc && compare < 0) { + topIdx = idx + } + } + return c.Stores[topIdx] +} + // PickFirst returns the first store in candidate list. func (c *StoreCandidates) PickFirst() *core.StoreInfo { if len(c.Stores) == 0 { diff --git a/server/schedule/filter/candidates_test.go b/server/schedule/filter/candidates_test.go index 5150bed9b66..bb86906b081 100644 --- a/server/schedule/filter/candidates_test.go +++ b/server/schedule/filter/candidates_test.go @@ -15,10 +15,10 @@ package filter import ( - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/stretchr/testify/require" "testing" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" ) @@ -71,13 +71,16 @@ func TestCandidates(t *testing.T) { re.Nil(store) cs = newTestCandidates(1, 3, 5, 7, 6, 2, 4) + minStore := cs.PickTheTopStore(idComparer, true) + re.Equal(uint64(1), minStore.GetID()) + maxStore := cs.PickTheTopStore(idComparer, false) + re.Equal(uint64(7), maxStore.GetID()) + cs.Sort(idComparer) check(re, cs, 1, 2, 3, 4, 5, 6, 7) store = cs.PickFirst() re.Equal(uint64(1), store.GetID()) - cs.Reverse() - check(re, cs, 7, 6, 5, 4, 3, 2, 1) - store = cs.PickFirst() + store = cs.PickTheTopStore(idComparer, false) re.Equal(uint64(7), store.GetID()) cs.Shuffle() cs.Sort(idComparer) @@ -87,8 +90,12 @@ func TestCandidates(t *testing.T) { re.Less(store.GetID(), uint64(8)) cs = newTestCandidates(10, 15, 23, 20, 33, 32, 31) - cs.Sort(idComparer).Reverse().Top(idComparer2) + cs.KeepTheTopStores(idComparer2, false) check(re, cs, 33, 32, 31) + + cs = newTestCandidates(10, 15, 23, 20, 33, 32, 31) + cs.KeepTheTopStores(idComparer2, true) + check(re, cs, 10, 15) } func newTestCandidates(ids ...uint64) *StoreCandidates { From c628ff94a9a3251f959d069f0d1ebbbec3d6e0a3 Mon Sep 17 00:00:00 2001 From: "Reg [bot]" <86050514+tidb-dashboard-bot@users.noreply.github.com> Date: Wed, 15 Jun 2022 16:32:34 +0800 Subject: [PATCH 21/35] Update TiDB Dashboard to v2022.06.13.1 [master] (#5147) ref tikv/pd#4257 Signed-off-by: tidb-dashboard-bot Co-authored-by: tidb-dashboard-bot Co-authored-by: ShuNing Co-authored-by: Ti Chi Robot --- go.mod | 2 +- go.sum | 4 ++-- tests/client/go.sum | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 5eeb2656499..0a662f0f16b 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/pingcap/kvproto v0.0.0-20220510035547-0e2f26c0a46a github.com/pingcap/log v0.0.0-20210906054005-afc726e70354 github.com/pingcap/sysutil v0.0.0-20211208032423-041a72e5860d - github.com/pingcap/tidb-dashboard v0.0.0-20220518164040-4d621864a9a0 + github.com/pingcap/tidb-dashboard v0.0.0-20220613053259-1b8920062bd3 github.com/prometheus/client_golang v1.1.0 github.com/prometheus/common v0.6.0 github.com/sasha-s/go-deadlock v0.2.0 diff --git a/go.sum b/go.sum index 50997691e2a..ad3415e8116 100644 --- a/go.sum +++ b/go.sum @@ -408,8 +408,8 @@ github.com/pingcap/log v0.0.0-20210906054005-afc726e70354 h1:SvWCbCPh1YeHd9yQLks github.com/pingcap/log v0.0.0-20210906054005-afc726e70354/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v0.0.0-20211208032423-041a72e5860d h1:k3/APKZjXOyJrFy8VyYwRlZhMelpD3qBLJNsw3bPl/g= github.com/pingcap/sysutil v0.0.0-20211208032423-041a72e5860d/go.mod h1:7j18ezaWTao2LHOyMlsc2Dg1vW+mDY9dEbPzVyOlaeM= -github.com/pingcap/tidb-dashboard v0.0.0-20220518164040-4d621864a9a0 h1:SNfoqt/qZ+tSnFcOIn6rvhmH06UGJ137Of+uK9q1oOk= -github.com/pingcap/tidb-dashboard v0.0.0-20220518164040-4d621864a9a0/go.mod h1:Hc2LXf5Vs+KwyegHd6osyZ2+LfaVSfWEwuR86SNg7tk= +github.com/pingcap/tidb-dashboard v0.0.0-20220613053259-1b8920062bd3 h1:chUUmmcfNVtfR1c7/qaoLLA2SgaP79LLVXoXV9F4lP8= +github.com/pingcap/tidb-dashboard v0.0.0-20220613053259-1b8920062bd3/go.mod h1:Hc2LXf5Vs+KwyegHd6osyZ2+LfaVSfWEwuR86SNg7tk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/tests/client/go.sum b/tests/client/go.sum index 493b1cd39f4..b9d76d704a8 100644 --- a/tests/client/go.sum +++ b/tests/client/go.sum @@ -417,8 +417,8 @@ github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee h1:VO2t6IBpfvW34TdtD/G github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v0.0.0-20211208032423-041a72e5860d h1:k3/APKZjXOyJrFy8VyYwRlZhMelpD3qBLJNsw3bPl/g= github.com/pingcap/sysutil v0.0.0-20211208032423-041a72e5860d/go.mod h1:7j18ezaWTao2LHOyMlsc2Dg1vW+mDY9dEbPzVyOlaeM= -github.com/pingcap/tidb-dashboard v0.0.0-20220518164040-4d621864a9a0 h1:SNfoqt/qZ+tSnFcOIn6rvhmH06UGJ137Of+uK9q1oOk= -github.com/pingcap/tidb-dashboard v0.0.0-20220518164040-4d621864a9a0/go.mod h1:Hc2LXf5Vs+KwyegHd6osyZ2+LfaVSfWEwuR86SNg7tk= +github.com/pingcap/tidb-dashboard v0.0.0-20220613053259-1b8920062bd3 h1:chUUmmcfNVtfR1c7/qaoLLA2SgaP79LLVXoXV9F4lP8= +github.com/pingcap/tidb-dashboard v0.0.0-20220613053259-1b8920062bd3/go.mod h1:Hc2LXf5Vs+KwyegHd6osyZ2+LfaVSfWEwuR86SNg7tk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= From 0da658cec9659c08e8dfefea54daac3de25c5331 Mon Sep 17 00:00:00 2001 From: Yongbo Jiang Date: Wed, 15 Jun 2022 17:22:34 +0800 Subject: [PATCH 22/35] api: add Rate-limit config update API (#4843) ref tikv/pd#4666, ref tikv/pd#4839 add Rate-limit config update API Signed-off-by: Cabinfever_B Co-authored-by: Ryan Leung Co-authored-by: Ti Chi Robot --- pkg/jsonutil/jsonutil.go | 49 ++++++++ pkg/jsonutil/jsonutil_test.go | 65 +++++++++++ server/api/config.go | 43 +------ server/api/router.go | 1 + server/api/service_middleware.go | 123 ++++++++++++++++---- server/api/service_middleware_test.go | 160 +++++++++++++++++++++++++- server/server.go | 40 +++++++ 7 files changed, 420 insertions(+), 61 deletions(-) create mode 100644 pkg/jsonutil/jsonutil.go create mode 100644 pkg/jsonutil/jsonutil_test.go diff --git a/pkg/jsonutil/jsonutil.go b/pkg/jsonutil/jsonutil.go new file mode 100644 index 00000000000..c5ae2f378da --- /dev/null +++ b/pkg/jsonutil/jsonutil.go @@ -0,0 +1,49 @@ +// Copyright 2022 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonutil + +import ( + "bytes" + "encoding/json" + + "github.com/tikv/pd/pkg/reflectutil" +) + +// AddKeyValue is used to add a key value pair into `old` +func AddKeyValue(old interface{}, key string, value interface{}) (updated bool, found bool, err error) { + data, err := json.Marshal(map[string]interface{}{key: value}) + if err != nil { + return false, false, err + } + return MergeJSONObject(old, data) +} + +// MergeJSONObject is used to merge a marshaled json object into v +func MergeJSONObject(v interface{}, data []byte) (updated bool, found bool, err error) { + old, _ := json.Marshal(v) + if err := json.Unmarshal(data, v); err != nil { + return false, false, err + } + new, _ := json.Marshal(v) + if !bytes.Equal(old, new) { + return true, true, nil + } + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return false, false, err + } + found = reflectutil.FindSameFieldByJSON(v, m) + return false, found, nil +} diff --git a/pkg/jsonutil/jsonutil_test.go b/pkg/jsonutil/jsonutil_test.go new file mode 100644 index 00000000000..a046fbaf70a --- /dev/null +++ b/pkg/jsonutil/jsonutil_test.go @@ -0,0 +1,65 @@ +// Copyright 2022 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type testJSONStructLevel1 struct { + Name string `json:"name"` + Sub1 testJSONStructLevel2 `json:"sub1"` + Sub2 testJSONStructLevel2 `json:"sub2"` +} + +type testJSONStructLevel2 struct { + SubName string `json:"sub-name"` +} + +func TestJSONUtil(t *testing.T) { + t.Parallel() + re := require.New(t) + father := &testJSONStructLevel1{ + Name: "father", + } + son1 := &testJSONStructLevel2{ + SubName: "son1", + } + update, found, err := AddKeyValue(&father, "sub1", &son1) + re.NoError(err) + re.True(update) + re.True(found) + + son2 := &testJSONStructLevel2{ + SubName: "son2", + } + + update, found, err = AddKeyValue(father, "sub2", &son2) + re.NoError(err) + re.True(update) + re.True(found) + + update, found, err = AddKeyValue(father, "sub3", &son2) + re.NoError(err) + re.False(update) + re.False(found) + + update, found, err = AddKeyValue(father, "sub2", &son2) + re.NoError(err) + re.False(update) + re.True(found) +} diff --git a/server/api/config.go b/server/api/config.go index d4d90735289..b33dd5c5a97 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -15,7 +15,6 @@ package api import ( - "bytes" "encoding/json" "fmt" "io" @@ -29,6 +28,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/tikv/pd/pkg/apiutil" + "github.com/tikv/pd/pkg/jsonutil" "github.com/tikv/pd/pkg/logutil" "github.com/tikv/pd/pkg/reflectutil" "github.com/tikv/pd/server" @@ -166,12 +166,7 @@ func (h *confHandler) updateConfig(cfg *config.Config, key string, value interfa } func (h *confHandler) updateSchedule(config *config.Config, key string, value interface{}) error { - data, err := json.Marshal(map[string]interface{}{key: value}) - if err != nil { - return err - } - - updated, found, err := mergeConfig(&config.Schedule, data) + updated, found, err := jsonutil.AddKeyValue(&config.Schedule, key, value) if err != nil { return err } @@ -187,12 +182,7 @@ func (h *confHandler) updateSchedule(config *config.Config, key string, value in } func (h *confHandler) updateReplication(config *config.Config, key string, value interface{}) error { - data, err := json.Marshal(map[string]interface{}{key: value}) - if err != nil { - return err - } - - updated, found, err := mergeConfig(&config.Replication, data) + updated, found, err := jsonutil.AddKeyValue(&config.Replication, key, value) if err != nil { return err } @@ -214,8 +204,7 @@ func (h *confHandler) updateReplicationModeConfig(config *config.Config, key []s if err != nil { return err } - - updated, found, err := mergeConfig(&config.ReplicationMode, data) + updated, found, err := jsonutil.MergeJSONObject(&config.ReplicationMode, data) if err != nil { return err } @@ -231,12 +220,7 @@ func (h *confHandler) updateReplicationModeConfig(config *config.Config, key []s } func (h *confHandler) updatePDServerConfig(config *config.Config, key string, value interface{}) error { - data, err := json.Marshal(map[string]interface{}{key: value}) - if err != nil { - return err - } - - updated, found, err := mergeConfig(&config.PDServerCfg, data) + updated, found, err := jsonutil.AddKeyValue(&config.PDServerCfg, key, value) if err != nil { return err } @@ -288,23 +272,6 @@ func getConfigMap(cfg map[string]interface{}, key []string, value interface{}) m return cfg } -func mergeConfig(v interface{}, data []byte) (updated bool, found bool, err error) { - old, _ := json.Marshal(v) - if err := json.Unmarshal(data, v); err != nil { - return false, false, err - } - new, _ := json.Marshal(v) - if !bytes.Equal(old, new) { - return true, true, nil - } - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return false, false, err - } - found = reflectutil.FindSameFieldByJSON(v, m) - return false, found, nil -} - // @Tags config // @Summary Get schedule config. // @Produce json diff --git a/server/api/router.go b/server/api/router.go index e755341ebef..3e8061fd74e 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -285,6 +285,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { serviceMiddlewareHandler := newServiceMiddlewareHandler(svr, rd) registerFunc(apiRouter, "/service-middleware/config", serviceMiddlewareHandler.GetServiceMiddlewareConfig, setMethods("GET")) registerFunc(apiRouter, "/service-middleware/config", serviceMiddlewareHandler.SetServiceMiddlewareConfig, setMethods("POST"), setAuditBackend(localLog)) + registerFunc(apiRouter, "/service-middleware/config/rate-limit", serviceMiddlewareHandler.SetRatelimitConfig, setMethods("POST"), setAuditBackend(localLog)) logHandler := newLogHandler(svr, rd) registerFunc(apiRouter, "/admin/log", logHandler.SetLogLevel, setMethods("POST"), setAuditBackend(localLog)) diff --git a/server/api/service_middleware.go b/server/api/service_middleware.go index 0f41f8ae725..426399a1d6e 100644 --- a/server/api/service_middleware.go +++ b/server/api/service_middleware.go @@ -23,6 +23,9 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/tikv/pd/pkg/apiutil" + "github.com/tikv/pd/pkg/jsonutil" + "github.com/tikv/pd/pkg/ratelimit" "github.com/tikv/pd/pkg/reflectutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" @@ -107,18 +110,13 @@ func (h *serviceMiddlewareHandler) updateServiceMiddlewareConfig(cfg *config.Ser case "audit": return h.updateAudit(cfg, kp[len(kp)-1], value) case "rate-limit": - return h.updateRateLimit(cfg, kp[len(kp)-1], value) + return h.svr.UpdateRateLimit(&cfg.RateLimitConfig, kp[len(kp)-1], value) } return errors.Errorf("config prefix %s not found", kp[0]) } func (h *serviceMiddlewareHandler) updateAudit(config *config.ServiceMiddlewareConfig, key string, value interface{}) error { - data, err := json.Marshal(map[string]interface{}{key: value}) - if err != nil { - return err - } - - updated, found, err := mergeConfig(&config.AuditConfig, data) + updated, found, err := jsonutil.AddKeyValue(&config.AuditConfig, key, value) if err != nil { return err } @@ -133,23 +131,104 @@ func (h *serviceMiddlewareHandler) updateAudit(config *config.ServiceMiddlewareC return err } -func (h *serviceMiddlewareHandler) updateRateLimit(config *config.ServiceMiddlewareConfig, key string, value interface{}) error { - data, err := json.Marshal(map[string]interface{}{key: value}) - if err != nil { - return err +// @Tags service_middleware +// @Summary update ratelimit config +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "config item not found" +// @Router /service-middleware/config/rate-limit [POST] +func (h *serviceMiddlewareHandler) SetRatelimitConfig(w http.ResponseWriter, r *http.Request) { + var input map[string]interface{} + if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { + return } - - updated, found, err := mergeConfig(&config.RateLimitConfig, data) - if err != nil { - return err + typeStr, ok := input["type"].(string) + if !ok { + h.rd.JSON(w, http.StatusBadRequest, "The type is empty.") + return } - - if !found { - return errors.Errorf("config item %s not found", key) + var serviceLabel string + switch typeStr { + case "label": + serviceLabel, ok = input["label"].(string) + if !ok || len(serviceLabel) == 0 { + h.rd.JSON(w, http.StatusBadRequest, "The label is empty.") + return + } + if len(h.svr.GetServiceLabels(serviceLabel)) == 0 { + h.rd.JSON(w, http.StatusBadRequest, "There is no label matched.") + return + } + case "path": + method, _ := input["method"].(string) + path, ok := input["path"].(string) + if !ok || len(path) == 0 { + h.rd.JSON(w, http.StatusBadRequest, "The path is empty.") + return + } + serviceLabel = h.svr.GetAPIAccessServiceLabel(apiutil.NewAccessPath(path, method)) + if len(serviceLabel) == 0 { + h.rd.JSON(w, http.StatusBadRequest, "There is no label matched.") + return + } + default: + h.rd.JSON(w, http.StatusBadRequest, "The type is invalid.") + return } - - if updated { - err = h.svr.SetRateLimitConfig(config.RateLimitConfig) + if h.svr.IsInRateLimitAllowList(serviceLabel) { + h.rd.JSON(w, http.StatusBadRequest, "This service is in allow list whose config can not be changed.") + return } - return err + cfg := h.svr.GetRateLimitConfig().LimiterConfig[serviceLabel] + // update concurrency limiter + concurrencyUpdatedFlag := "Concurrency limiter is not changed." + concurrencyFloat, okc := input["concurrency"].(float64) + if okc { + cfg.ConcurrencyLimit = uint64(concurrencyFloat) + } + // update qps rate limiter + qpsRateUpdatedFlag := "QPS rate limiter is not changed." + qps, okq := input["qps"].(float64) + if okq { + brust := 0 + if int(qps) > 1 { + brust = int(qps) + } else if qps > 0 { + brust = 1 + } + cfg.QPS = qps + cfg.QPSBurst = brust + } + if !okc && !okq { + h.rd.JSON(w, http.StatusOK, "No changed.") + } else { + status := h.svr.UpdateServiceRateLimiter(serviceLabel, ratelimit.UpdateDimensionConfig(&cfg)) + switch { + case status&ratelimit.QPSChanged != 0: + qpsRateUpdatedFlag = "QPS rate limiter is changed." + case status&ratelimit.QPSDeleted != 0: + qpsRateUpdatedFlag = "QPS rate limiter is deleted." + } + switch { + case status&ratelimit.ConcurrencyChanged != 0: + concurrencyUpdatedFlag = "Concurrency limiter is changed." + case status&ratelimit.ConcurrencyDeleted != 0: + concurrencyUpdatedFlag = "Concurrency limiter is deleted." + } + err := h.svr.UpdateRateLimitConfig("limiter-config", serviceLabel, cfg) + if err != nil { + h.rd.JSON(w, http.StatusInternalServerError, err.Error()) + } else { + result := rateLimitResult{concurrencyUpdatedFlag, qpsRateUpdatedFlag, h.svr.GetServiceMiddlewareConfig().RateLimitConfig.LimiterConfig} + h.rd.JSON(w, http.StatusOK, result) + } + } +} + +type rateLimitResult struct { + ConcurrencyUpdatedFlag string `json:"concurrency"` + QPSRateUpdatedFlag string `json:"qps"` + LimiterConfig map[string]ratelimit.DimensionConfig `json:"limiter-config"` } diff --git a/server/api/service_middleware_test.go b/server/api/service_middleware_test.go index a1d4804650c..6ea0343f53b 100644 --- a/server/api/service_middleware_test.go +++ b/server/api/service_middleware_test.go @@ -57,7 +57,8 @@ func (s *testAuditMiddlewareSuite) TestConfigAuditSwitch(c *C) { c.Assert(sc.EnableAudit, Equals, false) ms := map[string]interface{}{ - "enable-audit": "true", + "enable-audit": "true", + "enable-rate-limit": "true", } postData, err := json.Marshal(ms) c.Assert(err, IsNil) @@ -65,8 +66,10 @@ func (s *testAuditMiddlewareSuite) TestConfigAuditSwitch(c *C) { sc = &config.ServiceMiddlewareConfig{} c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) c.Assert(sc.EnableAudit, Equals, true) + c.Assert(sc.EnableRateLimit, Equals, true) ms = map[string]interface{}{ "audit.enable-audit": "false", + "enable-rate-limit": "false", } postData, err = json.Marshal(ms) c.Assert(err, IsNil) @@ -74,6 +77,7 @@ func (s *testAuditMiddlewareSuite) TestConfigAuditSwitch(c *C) { sc = &config.ServiceMiddlewareConfig{} c.Assert(tu.ReadGetJSON(c, testDialClient, addr, sc), IsNil) c.Assert(sc.EnableAudit, Equals, false) + c.Assert(sc.EnableRateLimit, Equals, false) // test empty ms = map[string]interface{}{} @@ -124,6 +128,160 @@ func (s *testRateLimitConfigSuite) TearDownSuite(c *C) { s.cleanup() } +func (s *testRateLimitConfigSuite) TestUpdateRateLimitConfig(c *C) { + urlPrefix := fmt.Sprintf("%s%s/api/v1/service-middleware/config/rate-limit", s.svr.GetAddr(), apiPrefix) + + // test empty type + input := make(map[string]interface{}) + input["type"] = 123 + jsonBody, err := json.Marshal(input) + c.Assert(err, IsNil) + + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "\"The type is empty.\"\n")) + c.Assert(err, IsNil) + // test invalid type + input = make(map[string]interface{}) + input["type"] = "url" + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "\"The type is invalid.\"\n")) + c.Assert(err, IsNil) + + // test empty label + input = make(map[string]interface{}) + input["type"] = "label" + input["label"] = "" + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "\"The label is empty.\"\n")) + c.Assert(err, IsNil) + // test no label matched + input = make(map[string]interface{}) + input["type"] = "label" + input["label"] = "TestLabel" + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "\"There is no label matched.\"\n")) + c.Assert(err, IsNil) + + // test empty path + input = make(map[string]interface{}) + input["type"] = "path" + input["path"] = "" + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "\"The path is empty.\"\n")) + c.Assert(err, IsNil) + + // test path but no label matched + input = make(map[string]interface{}) + input["type"] = "path" + input["path"] = "/pd/api/v1/test" + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.Status(c, http.StatusBadRequest), tu.StringEqual(c, "\"There is no label matched.\"\n")) + c.Assert(err, IsNil) + + // no change + input = make(map[string]interface{}) + input["type"] = "label" + input["label"] = "GetHealthStatus" + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringEqual(c, "\"No changed.\"\n")) + c.Assert(err, IsNil) + + // change concurrency + input = make(map[string]interface{}) + input["type"] = "path" + input["path"] = "/pd/api/v1/health" + input["method"] = "GET" + input["concurrency"] = 100 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringContain(c, "Concurrency limiter is changed.")) + c.Assert(err, IsNil) + input["concurrency"] = 0 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringContain(c, "Concurrency limiter is deleted.")) + c.Assert(err, IsNil) + + // change qps + input = make(map[string]interface{}) + input["type"] = "path" + input["path"] = "/pd/api/v1/health" + input["method"] = "GET" + input["qps"] = 100 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringContain(c, "QPS rate limiter is changed.")) + c.Assert(err, IsNil) + + input = make(map[string]interface{}) + input["type"] = "path" + input["path"] = "/pd/api/v1/health" + input["method"] = "GET" + input["qps"] = 0.3 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringContain(c, "QPS rate limiter is changed.")) + c.Assert(err, IsNil) + c.Assert(s.svr.GetRateLimitConfig().LimiterConfig["GetHealthStatus"].QPSBurst, Equals, 1) + + input["qps"] = -1 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringContain(c, "QPS rate limiter is deleted.")) + c.Assert(err, IsNil) + + // change both + input = make(map[string]interface{}) + input["type"] = "path" + input["path"] = "/pd/api/v1/debug/pprof/profile" + input["qps"] = 100 + input["concurrency"] = 100 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + result := rateLimitResult{} + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusOK(c), tu.StringContain(c, "Concurrency limiter is changed."), + tu.StringContain(c, "QPS rate limiter is changed."), + tu.ExtractJSON(c, &result), + ) + c.Assert(result.LimiterConfig["Profile"].QPS, Equals, 100.) + c.Assert(result.LimiterConfig["Profile"].QPSBurst, Equals, 100) + c.Assert(result.LimiterConfig["Profile"].ConcurrencyLimit, Equals, uint64(100)) + c.Assert(err, IsNil) + + limiter := s.svr.GetServiceRateLimiter() + limiter.Update("SetRatelimitConfig", ratelimit.AddLabelAllowList()) + + // Allow list + input = make(map[string]interface{}) + input["type"] = "label" + input["label"] = "SetRatelimitConfig" + input["qps"] = 100 + input["concurrency"] = 100 + jsonBody, err = json.Marshal(input) + c.Assert(err, IsNil) + err = tu.CheckPostJSON(testDialClient, urlPrefix, jsonBody, + tu.StatusNotOK(c), tu.StringEqual(c, "\"This service is in allow list whose config can not be changed.\"\n")) + c.Assert(err, IsNil) +} + func (s *testRateLimitConfigSuite) TestConfigRateLimitSwitch(c *C) { addr := fmt.Sprintf("%s/service-middleware/config", s.urlPrefix) diff --git a/server/server.go b/server/server.go index c7902692552..871c2e60738 100644 --- a/server/server.go +++ b/server/server.go @@ -44,6 +44,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/grpcutil" + "github.com/tikv/pd/pkg/jsonutil" "github.com/tikv/pd/pkg/logutil" "github.com/tikv/pd/pkg/ratelimit" "github.com/tikv/pd/pkg/systimemon" @@ -255,6 +256,7 @@ func CreateServer(ctx context.Context, cfg *config.Config, serviceBuilders ...Ha audit.NewLocalLogBackend(true), audit.NewPrometheusHistogramBackend(serviceAuditHistogram, false), } + s.serviceRateLimiter = ratelimit.NewLimiter() s.serviceAuditBackendLabels = make(map[string]*audit.BackendLabels) s.serviceRateLimiter = ratelimit.NewLimiter() s.serviceLabels = make(map[string][]apiutil.AccessPath) @@ -978,6 +980,34 @@ func (s *Server) SetAuditConfig(cfg config.AuditConfig) error { return nil } +// UpdateRateLimitConfig is used to update rate-limit config which will reserve old limiter-config +func (s *Server) UpdateRateLimitConfig(key, label string, value ratelimit.DimensionConfig) error { + cfg := s.GetServiceMiddlewareConfig() + rateLimitCfg := make(map[string]ratelimit.DimensionConfig) + for label, item := range cfg.LimiterConfig { + rateLimitCfg[label] = item + } + rateLimitCfg[label] = value + return s.UpdateRateLimit(&cfg.RateLimitConfig, key, &rateLimitCfg) +} + +// UpdateRateLimit is used to update rate-limit config which will overwrite limiter-config +func (s *Server) UpdateRateLimit(cfg *config.RateLimitConfig, key string, value interface{}) error { + updated, found, err := jsonutil.AddKeyValue(cfg, key, value) + if err != nil { + return err + } + + if !found { + return errors.Errorf("config item %s not found", key) + } + + if updated { + err = s.SetRateLimitConfig(*cfg) + } + return err +} + // GetRateLimitConfig gets the rate limit config information. func (s *Server) GetRateLimitConfig() *config.RateLimitConfig { return s.serviceMiddlewarePersistOptions.GetRateLimitConfig().Clone() @@ -1221,6 +1251,16 @@ func (s *Server) GetServiceRateLimiter() *ratelimit.Limiter { return s.serviceRateLimiter } +// IsInRateLimitAllowList returns whethis given service label is in allow lost +func (s *Server) IsInRateLimitAllowList(serviceLabel string) bool { + return s.serviceRateLimiter.IsInAllowList(serviceLabel) +} + +// UpdateServiceRateLimiter is used to update RateLimiter +func (s *Server) UpdateServiceRateLimiter(serviceLabel string, opts ...ratelimit.Option) ratelimit.UpdateStatus { + return s.serviceRateLimiter.Update(serviceLabel, opts...) +} + // GetClusterStatus gets cluster status. func (s *Server) GetClusterStatus() (*cluster.Status, error) { s.cluster.Lock() From ee302fcd827131afb06f5eec9973a779d254a7c8 Mon Sep 17 00:00:00 2001 From: LLThomas Date: Wed, 15 Jun 2022 21:08:33 +0800 Subject: [PATCH 23/35] *: fix some typos (#5165) ref tikv/pd#4820 Fix some typos. Signed-off-by: LLThomas --- server/server.go | 6 +++--- server/storage/hot_region_storage.go | 4 ++-- server/storage/hot_region_storage_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/server/server.go b/server/server.go index 871c2e60738..6e900580d66 100644 --- a/server/server.go +++ b/server/server.go @@ -1409,20 +1409,20 @@ func (s *Server) campaignLeader() { go s.member.KeepLeader(ctx) log.Info("campaign pd leader ok", zap.String("campaign-pd-leader-name", s.Name())) - alllocator, err := s.tsoAllocatorManager.GetAllocator(tso.GlobalDCLocation) + allocator, err := s.tsoAllocatorManager.GetAllocator(tso.GlobalDCLocation) if err != nil { log.Error("failed to get the global TSO allocator", errs.ZapError(err)) return } log.Info("initializing the global TSO allocator") - if err := alllocator.Initialize(0); err != nil { + if err := allocator.Initialize(0); err != nil { log.Error("failed to initialize the global TSO allocator", errs.ZapError(err)) return } defer func() { s.tsoAllocatorManager.ResetAllocatorGroup(tso.GlobalDCLocation) failpoint.Inject("updateAfterResetTSO", func() { - if err = alllocator.UpdateTSO(); err != nil { + if err = allocator.UpdateTSO(); err != nil { panic(err) } }) diff --git a/server/storage/hot_region_storage.go b/server/storage/hot_region_storage.go index 162b631ddb6..597a6fc83ce 100644 --- a/server/storage/hot_region_storage.go +++ b/server/storage/hot_region_storage.go @@ -144,13 +144,13 @@ func NewHotRegionsStorage( if err != nil { return nil, err } - hotRegionInfoCtx, hotRegionInfoCancle := context.WithCancel(ctx) + hotRegionInfoCtx, hotRegionInfoCancel := context.WithCancel(ctx) h := HotRegionStorage{ LevelDBKV: levelDB, ekm: ekm, batchHotInfo: make(map[string]*HistoryHotRegion), hotRegionInfoCtx: hotRegionInfoCtx, - hotRegionInfoCancel: hotRegionInfoCancle, + hotRegionInfoCancel: hotRegionInfoCancel, hotRegionStorageHandler: hotRegionStorageHandler, curReservedDays: hotRegionStorageHandler.GetHotRegionsReservedDays(), curInterval: hotRegionStorageHandler.GetHotRegionsWriteInterval(), diff --git a/server/storage/hot_region_storage_test.go b/server/storage/hot_region_storage_test.go index 29dc4140317..aa3b5b974b9 100644 --- a/server/storage/hot_region_storage_test.go +++ b/server/storage/hot_region_storage_test.go @@ -293,7 +293,7 @@ func newTestHotRegionStorage(pullInterval time.Duration, } packHotRegionInfo.pullInterval = pullInterval packHotRegionInfo.reservedDays = reservedDays - // delete data in between today and tomrrow + // delete data in between today and tomorrow hotRegionStorage, err = NewHotRegionsStorage(ctx, writePath, nil, packHotRegionInfo) if err != nil { From 2efa259d42faaf01efe66d3a694eed318c508ef8 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 16 Jun 2022 15:52:34 +0800 Subject: [PATCH 24/35] metrics: delete the metrics instead of setting them to 0 (#5162) close tikv/pd#5163 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/api/config.go | 1 - server/cluster/coordinator.go | 18 +++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/server/api/config.go b/server/api/config.go index b33dd5c5a97..7ed3a9de56c 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -371,7 +371,6 @@ func (h *confHandler) SetReplicationConfig(w http.ResponseWriter, r *http.Reques // @Summary Get label property config. // @Produce json // @Success 200 {object} config.LabelPropertyConfig -// @Failure 400 {string} string "The input is invalid." // @Router /config/label-property [get] func (h *confHandler) GetLabelPropertyConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetLabelProperty()) diff --git a/server/cluster/coordinator.go b/server/cluster/coordinator.go index b3f72a3be5f..85c80365c9a 100644 --- a/server/cluster/coordinator.go +++ b/server/cluster/coordinator.go @@ -581,10 +581,10 @@ func collectHotMetrics(cluster *RaftCluster, stores []*core.StoreInfo, typ stati hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_query_as_leader").Set(stat.TotalLoads[queryTyp]) hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "hot_"+kind+"_region_as_leader").Set(float64(stat.Count)) } else { - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_bytes_as_leader").Set(0) - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_keys_as_leader").Set(0) - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_query_as_leader").Set(0) - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "hot_"+kind+"_region_as_leader").Set(0) + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "total_"+kind+"_bytes_as_leader") + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "total_"+kind+"_keys_as_leader") + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "total_"+kind+"_query_as_leader") + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "hot_"+kind+"_region_as_leader") } stat, ok = status.AsPeer[storeID] @@ -594,10 +594,10 @@ func collectHotMetrics(cluster *RaftCluster, stores []*core.StoreInfo, typ stati hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_query_as_peer").Set(stat.TotalLoads[queryTyp]) hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "hot_"+kind+"_region_as_peer").Set(float64(stat.Count)) } else { - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_bytes_as_peer").Set(0) - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_keys_as_peer").Set(0) - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "total_"+kind+"_query_as_peer").Set(0) - hotSpotStatusGauge.WithLabelValues(storeAddress, storeLabel, "hot_"+kind+"_region_as_peer").Set(0) + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "total_"+kind+"_bytes_as_peer") + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "total_"+kind+"_keys_as_peer") + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "total_"+kind+"_query_as_peer") + hotSpotStatusGauge.DeleteLabelValues(storeAddress, storeLabel, "hot_"+kind+"_region_as_peer") } } } @@ -673,7 +673,7 @@ func (c *coordinator) removeScheduler(name string) error { } s.Stop() - schedulerStatusGauge.WithLabelValues(name, "allow").Set(0) + schedulerStatusGauge.DeleteLabelValues(name, "allow") delete(c.schedulers, name) return nil From e6fd11821f004001bf441fca46219ab1a86c37f3 Mon Sep 17 00:00:00 2001 From: Shirly Date: Thu, 16 Jun 2022 18:20:35 +0800 Subject: [PATCH 25/35] scheduler/balance_leader: fix data race in the function of clone for config (#5157) close tikv/pd#5156 Signed-off-by: shirly Co-authored-by: Ti Chi Robot --- server/schedulers/balance_leader.go | 4 ++- server/schedulers/balance_leader_test.go | 38 ++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 server/schedulers/balance_leader_test.go diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index 1b1f1bd8a64..f5c9c667264 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -128,8 +128,10 @@ func (conf *balanceLeaderSchedulerConfig) validate() bool { func (conf *balanceLeaderSchedulerConfig) Clone() *balanceLeaderSchedulerConfig { conf.mu.RLock() defer conf.mu.RUnlock() + ranges := make([]core.KeyRange, len(conf.Ranges)) + copy(ranges, conf.Ranges) return &balanceLeaderSchedulerConfig{ - Ranges: conf.Ranges, + Ranges: ranges, Batch: conf.Batch, } } diff --git a/server/schedulers/balance_leader_test.go b/server/schedulers/balance_leader_test.go new file mode 100644 index 00000000000..a74709de640 --- /dev/null +++ b/server/schedulers/balance_leader_test.go @@ -0,0 +1,38 @@ +// Copyright 2022 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBalanceLeaderSchedulerConfigClone(t *testing.T) { + re := require.New(t) + keyRanges1, _ := getKeyRanges([]string{"a", "b", "c", "d"}) + conf := &balanceLeaderSchedulerConfig{ + Ranges: keyRanges1, + Batch: 10, + } + conf2 := conf.Clone() + re.Equal(conf.Batch, conf2.Batch) + re.Equal(conf.Ranges, conf2.Ranges) + + keyRanges2, _ := getKeyRanges([]string{"e", "f", "g", "h"}) + // update conf2 + conf2.Ranges[1] = keyRanges2[1] + re.NotEqual(conf.Ranges, conf2.Ranges) +} From 9acd56ad305f42a1cb6670a7c04a5095c37e3609 Mon Sep 17 00:00:00 2001 From: LLThomas Date: Fri, 17 Jun 2022 10:54:34 +0800 Subject: [PATCH 26/35] =?UTF-8?q?server/schedulers:=20fix=20potential=20da?= =?UTF-8?q?ta=20race=20in=20the=20function=20of=20clone=20for=20grantHotRe?= =?UTF-8?q?gionS=E2=80=A6=20(#5173)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ref tikv/pd#5170 As the title says. Signed-off-by: LLThomas --- server/schedulers/grant_hot_region.go | 4 +++- server/schedulers/grant_leader.go | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/server/schedulers/grant_hot_region.go b/server/schedulers/grant_hot_region.go index e001371899e..4decd1b1340 100644 --- a/server/schedulers/grant_hot_region.go +++ b/server/schedulers/grant_hot_region.go @@ -123,8 +123,10 @@ func (conf *grantHotRegionSchedulerConfig) SetStoreLeaderID(id uint64) { func (conf *grantHotRegionSchedulerConfig) Clone() *grantHotRegionSchedulerConfig { conf.mu.RLock() defer conf.mu.RUnlock() + newStoreIDs := make([]uint64, len(conf.StoreIDs)) + copy(newStoreIDs, conf.StoreIDs) return &grantHotRegionSchedulerConfig{ - StoreIDs: conf.StoreIDs, + StoreIDs: newStoreIDs, StoreLeaderID: conf.StoreLeaderID, } } diff --git a/server/schedulers/grant_leader.go b/server/schedulers/grant_leader.go index 40dd5c8a073..845f2b100c7 100644 --- a/server/schedulers/grant_leader.go +++ b/server/schedulers/grant_leader.go @@ -102,8 +102,12 @@ func (conf *grantLeaderSchedulerConfig) BuildWithArgs(args []string) error { func (conf *grantLeaderSchedulerConfig) Clone() *grantLeaderSchedulerConfig { conf.mu.RLock() defer conf.mu.RUnlock() + newStoreIDWithRanges := make(map[uint64][]core.KeyRange) + for k, v := range conf.StoreIDWithRanges { + newStoreIDWithRanges[k] = v + } return &grantLeaderSchedulerConfig{ - StoreIDWithRanges: conf.StoreIDWithRanges, + StoreIDWithRanges: newStoreIDWithRanges, } } From cb23d6c48cf42d6543b220991b1a88a9ee64c580 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Fri, 17 Jun 2022 13:30:35 +0800 Subject: [PATCH 27/35] tests: testify the api and storage tests (#5166) ref tikv/pd#4813 Testify the api and storage tests. Signed-off-by: JmPotato Co-authored-by: Ti Chi Robot --- tests/pdctl/helper.go | 34 -- tests/server/api/api_test.go | 440 +++++++++--------- .../server/storage/hot_region_storage_test.go | 183 ++++---- 3 files changed, 307 insertions(+), 350 deletions(-) diff --git a/tests/pdctl/helper.go b/tests/pdctl/helper.go index c5aaf948aa2..775f0b40f15 100644 --- a/tests/pdctl/helper.go +++ b/tests/pdctl/helper.go @@ -21,7 +21,6 @@ import ( "sort" "github.com/gogo/protobuf/proto" - "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/spf13/cobra" @@ -105,20 +104,6 @@ func MustPutStore(re *require.Assertions, svr *server.Server, store *metapb.Stor re.NoError(err) } -// MustPutStoreWithCheck is a temporary function for test purpose. -func MustPutStoreWithCheck(c *check.C, svr *server.Server, store *metapb.Store) { - store.Address = fmt.Sprintf("tikv%d", store.GetId()) - if len(store.Version) == 0 { - store.Version = versioninfo.MinSupportedVersion(versioninfo.Version2_0).String() - } - grpcServer := &server.GrpcServer{Server: svr} - _, err := grpcServer.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}, - Store: store, - }) - c.Assert(err, check.IsNil) -} - // MustPutRegion is used for test purpose. func MustPutRegion(re *require.Assertions, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { leader := &metapb.Peer{ @@ -138,25 +123,6 @@ func MustPutRegion(re *require.Assertions, cluster *tests.TestCluster, regionID, return r } -// MustPutRegionWithCheck is a temporary function for test purpose. -func MustPutRegionWithCheck(c *check.C, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { - leader := &metapb.Peer{ - Id: regionID, - StoreId: storeID, - } - metaRegion := &metapb.Region{ - Id: regionID, - StartKey: start, - EndKey: end, - Peers: []*metapb.Peer{leader}, - RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1}, - } - r := core.NewRegionInfo(metaRegion, leader, opts...) - err := cluster.HandleRegionHeartbeat(r) - c.Assert(err, check.IsNil) - return r -} - func checkerWithNilAssert(re *require.Assertions) *assertutil.Checker { checker := assertutil.NewChecker(func() { re.FailNow("should be nil") diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index e462adae2ba..49f8026e2af 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -27,11 +27,12 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/apiutil/serverapi" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/pkg/typeutil" @@ -51,55 +52,47 @@ var dialClient = &http.Client{ }, } -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&serverTestSuite{}) - -type serverTestSuite struct{} - -func (s *serverTestSuite) TestReconnect(c *C) { +func TestReconnect(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) // Make connections to followers. // Make sure they proxy requests to the leader. leader := cluster.WaitLeader() for name, s := range cluster.GetServers() { if name != leader { - res, e := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") - c.Assert(e, IsNil) + res, err := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") + re.NoError(err) res.Body.Close() - c.Assert(res.StatusCode, Equals, http.StatusOK) + re.Equal(http.StatusOK, res.StatusCode) } } // Close the leader and wait for a new one. err = cluster.GetServer(leader).Stop() - c.Assert(err, IsNil) + re.NoError(err) newLeader := cluster.WaitLeader() - c.Assert(newLeader, Not(HasLen), 0) + re.NotEmpty(newLeader) // Make sure they proxy requests to the new leader. for name, s := range cluster.GetServers() { if name != leader { - testutil.WaitUntil(c, func() bool { - res, e := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") - c.Assert(e, IsNil) + testutil.Eventually(re, func() bool { + res, err := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") + re.NoError(err) defer res.Body.Close() return res.StatusCode == http.StatusOK }) @@ -107,15 +100,14 @@ func (s *serverTestSuite) TestReconnect(c *C) { } // Close the new leader and then we have only one node. - err = cluster.GetServer(newLeader).Stop() - c.Assert(err, IsNil) + re.NoError(cluster.GetServer(newLeader).Stop()) // Request will fail with no leader. for name, s := range cluster.GetServers() { if name != leader && name != newLeader { - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { res, err := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") - c.Assert(err, IsNil) + re.NoError(err) defer res.Body.Close() return res.StatusCode == http.StatusServiceUnavailable }) @@ -123,77 +115,80 @@ func (s *serverTestSuite) TestReconnect(c *C) { } } -var _ = Suite(&testMiddlewareSuite{}) - -type testMiddlewareSuite struct { +type middlewareTestSuite struct { + suite.Suite cleanup func() cluster *tests.TestCluster } -func (s *testMiddlewareSuite) SetUpSuite(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/api/enableFailpointAPI", "return(true)"), IsNil) +func TestMiddlewareTestSuite(t *testing.T) { + suite.Run(t, new(middlewareTestSuite)) +} + +func (suite *middlewareTestSuite) SetupSuite() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/api/enableFailpointAPI", "return(true)")) ctx, cancel := context.WithCancel(context.Background()) - s.cleanup = cancel + suite.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 3) - c.Assert(err, IsNil) - c.Assert(cluster.RunInitialServers(), IsNil) - c.Assert(cluster.WaitLeader(), Not(HasLen), 0) - s.cluster = cluster + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) + suite.NotEmpty(cluster.WaitLeader()) + suite.cluster = cluster } -func (s *testMiddlewareSuite) TearDownSuite(c *C) { - c.Assert(failpoint.Disable("github.com/tikv/pd/server/api/enableFailpointAPI"), IsNil) - s.cleanup() - s.cluster.Destroy() +func (suite *middlewareTestSuite) TearDownSuite() { + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/api/enableFailpointAPI")) + suite.cleanup() + suite.cluster.Destroy() } -func (s *testMiddlewareSuite) TestRequestInfoMiddleware(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/api/addRequestInfoMiddleware", "return(true)"), IsNil) - leader := s.cluster.GetServer(s.cluster.GetLeader()) +func (suite *middlewareTestSuite) TestRequestInfoMiddleware() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/api/addRequestInfoMiddleware", "return(true)")) + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) input := map[string]interface{}{ "enable-audit": "true", } data, err := json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) labels := make(map[string]interface{}) labels["testkey"] = "testvalue" data, _ = json.Marshal(labels) resp, err = dialClient.Post(leader.GetAddr()+"/pd/api/v1/debug/pprof/profile?force=true", "application/json", bytes.NewBuffer(data)) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.NoError(err) + suite.Equal(http.StatusOK, resp.StatusCode) - c.Assert(resp.Header.Get("service-label"), Equals, "Profile") - c.Assert(resp.Header.Get("url-param"), Equals, "{\"force\":[\"true\"]}") - c.Assert(resp.Header.Get("body-param"), Equals, "{\"testkey\":\"testvalue\"}") - c.Assert(resp.Header.Get("method"), Equals, "HTTP/1.1/POST:/pd/api/v1/debug/pprof/profile") - c.Assert(resp.Header.Get("component"), Equals, "anonymous") - c.Assert(resp.Header.Get("ip"), Equals, "127.0.0.1") + suite.Equal("Profile", resp.Header.Get("service-label")) + suite.Equal("{\"force\":[\"true\"]}", resp.Header.Get("url-param")) + suite.Equal("{\"testkey\":\"testvalue\"}", resp.Header.Get("body-param")) + suite.Equal("HTTP/1.1/POST:/pd/api/v1/debug/pprof/profile", resp.Header.Get("method")) + suite.Equal("anonymous", resp.Header.Get("component")) + suite.Equal("127.0.0.1", resp.Header.Get("ip")) input = map[string]interface{}{ "enable-audit": "false", } data, err = json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, false) + suite.False(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) - header := mustRequestSuccess(c, leader.GetServer()) - c.Assert(header.Get("service-label"), Equals, "") + header := mustRequestSuccess(suite.Require(), leader.GetServer()) + suite.Equal("", header.Get("service-label")) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/api/addRequestInfoMiddleware"), IsNil) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/api/addRequestInfoMiddleware")) } func BenchmarkDoRequestWithServiceMiddleware(b *testing.B) { @@ -248,96 +243,96 @@ func doTestRequest(srv *tests.TestServer) { resp.Body.Close() } -func (s *testMiddlewareSuite) TestAuditPrometheusBackend(c *C) { - leader := s.cluster.GetServer(s.cluster.GetLeader()) +func (suite *middlewareTestSuite) TestAuditPrometheusBackend() { + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) input := map[string]interface{}{ "enable-audit": "true", } data, err := json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) timeUnix := time.Now().Unix() - 20 req, _ = http.NewRequest("GET", fmt.Sprintf("%s/pd/api/v1/trend?from=%d", leader.GetAddr(), timeUnix), nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("GET", leader.GetAddr()+"/metrics", nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) defer resp.Body.Close() content, _ := io.ReadAll(resp.Body) output := string(content) - c.Assert(strings.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 1"), Equals, true) + suite.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 1") // resign to test persist config oldLeaderName := leader.GetServer().Name() leader.GetServer().GetMember().ResignEtcdLeader(leader.GetServer().Context(), oldLeaderName, "") - mustWaitLeader(c, s.cluster.GetServers()) - leader = s.cluster.GetServer(s.cluster.GetLeader()) + suite.mustWaitLeader() + leader = suite.cluster.GetServer(suite.cluster.GetLeader()) timeUnix = time.Now().Unix() - 20 req, _ = http.NewRequest("GET", fmt.Sprintf("%s/pd/api/v1/trend?from=%d", leader.GetAddr(), timeUnix), nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("GET", leader.GetAddr()+"/metrics", nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) defer resp.Body.Close() content, _ = io.ReadAll(resp.Body) output = string(content) - c.Assert(strings.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 2"), Equals, true) + suite.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 2") input = map[string]interface{}{ "enable-audit": "false", } data, err = json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, false) + suite.False(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) } -func (s *testMiddlewareSuite) TestAuditLocalLogBackend(c *C) { +func (suite *middlewareTestSuite) TestAuditLocalLogBackend() { tempStdoutFile, _ := os.CreateTemp("/tmp", "pd_tests") cfg := &log.Config{} cfg.File.Filename = tempStdoutFile.Name() cfg.Level = "info" lg, p, _ := log.InitLogger(cfg) log.ReplaceGlobals(lg, p) - leader := s.cluster.GetServer(s.cluster.GetLeader()) + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) input := map[string]interface{}{ "enable-audit": "true", } data, err := json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) req, _ = http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() b, _ := os.ReadFile(tempStdoutFile.Name()) - c.Assert(strings.Contains(string(b), "Audit Log"), Equals, true) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.Contains(string(b), "Audit Log") + suite.NoError(err) + suite.Equal(http.StatusOK, resp.StatusCode) os.Remove(tempStdoutFile.Name()) } @@ -386,50 +381,54 @@ func BenchmarkDoRequestWithoutLocalLogAudit(b *testing.B) { cluster.Destroy() } -var _ = Suite(&testRedirectorSuite{}) - -type testRedirectorSuite struct { +type redirectorTestSuite struct { + suite.Suite cleanup func() cluster *tests.TestCluster } -func (s *testRedirectorSuite) SetUpSuite(c *C) { +func TestRedirectorTestSuite(t *testing.T) { + suite.Run(t, new(redirectorTestSuite)) +} + +func (suite *redirectorTestSuite) SetupSuite() { ctx, cancel := context.WithCancel(context.Background()) - s.cleanup = cancel + suite.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) - c.Assert(err, IsNil) - c.Assert(cluster.RunInitialServers(), IsNil) - c.Assert(cluster.WaitLeader(), Not(HasLen), 0) - s.cluster = cluster + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) + suite.NotEmpty(cluster.WaitLeader(), 0) + suite.cluster = cluster } -func (s *testRedirectorSuite) TearDownSuite(c *C) { - s.cleanup() - s.cluster.Destroy() +func (suite *redirectorTestSuite) TearDownSuite() { + suite.cleanup() + suite.cluster.Destroy() } -func (s *testRedirectorSuite) TestRedirect(c *C) { - leader := s.cluster.GetServer(s.cluster.GetLeader()) - c.Assert(leader, NotNil) - header := mustRequestSuccess(c, leader.GetServer()) +func (suite *redirectorTestSuite) TestRedirect() { + re := suite.Require() + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) + suite.NotNil(leader) + header := mustRequestSuccess(re, leader.GetServer()) header.Del("Date") - for _, svr := range s.cluster.GetServers() { + for _, svr := range suite.cluster.GetServers() { if svr != leader { - h := mustRequestSuccess(c, svr.GetServer()) + h := mustRequestSuccess(re, svr.GetServer()) h.Del("Date") - c.Assert(header, DeepEquals, h) + suite.Equal(h, header) } } } -func (s *testRedirectorSuite) TestAllowFollowerHandle(c *C) { +func (suite *redirectorTestSuite) TestAllowFollowerHandle() { // Find a follower. var follower *server.Server - leader := s.cluster.GetServer(s.cluster.GetLeader()) - for _, svr := range s.cluster.GetServers() { + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) + for _, svr := range suite.cluster.GetServers() { if svr != leader { follower = svr.GetServer() break @@ -438,22 +437,22 @@ func (s *testRedirectorSuite) TestAllowFollowerHandle(c *C) { addr := follower.GetAddr() + "/pd/api/v1/version" request, err := http.NewRequest(http.MethodGet, addr, nil) - c.Assert(err, IsNil) + suite.NoError(err) request.Header.Add(serverapi.AllowFollowerHandle, "true") resp, err := dialClient.Do(request) - c.Assert(err, IsNil) - c.Assert(resp.Header.Get(serverapi.RedirectorHeader), Equals, "") + suite.NoError(err) + suite.Equal("", resp.Header.Get(serverapi.RedirectorHeader)) defer resp.Body.Close() - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) + suite.NoError(err) } -func (s *testRedirectorSuite) TestNotLeader(c *C) { +func (suite *redirectorTestSuite) TestNotLeader() { // Find a follower. var follower *server.Server - leader := s.cluster.GetServer(s.cluster.GetLeader()) - for _, svr := range s.cluster.GetServers() { + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) + for _, svr := range suite.cluster.GetServers() { if svr != leader { follower = svr.GetServer() break @@ -463,55 +462,52 @@ func (s *testRedirectorSuite) TestNotLeader(c *C) { addr := follower.GetAddr() + "/pd/api/v1/version" // Request to follower without redirectorHeader is OK. request, err := http.NewRequest(http.MethodGet, addr, nil) - c.Assert(err, IsNil) + suite.NoError(err) resp, err := dialClient.Do(request) - c.Assert(err, IsNil) + suite.NoError(err) defer resp.Body.Close() - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) + suite.NoError(err) // Request to follower with redirectorHeader will fail. request.RequestURI = "" request.Header.Set(serverapi.RedirectorHeader, "pd") resp1, err := dialClient.Do(request) - c.Assert(err, IsNil) + suite.NoError(err) defer resp1.Body.Close() - c.Assert(resp1.StatusCode, Not(Equals), http.StatusOK) + suite.NotEqual(http.StatusOK, resp1.StatusCode) _, err = io.ReadAll(resp1.Body) - c.Assert(err, IsNil) + suite.NoError(err) } -func mustRequestSuccess(c *C, s *server.Server) http.Header { +func mustRequestSuccess(re *require.Assertions, s *server.Server) http.Header { resp, err := dialClient.Get(s.GetAddr() + "/pd/api/v1/version") - c.Assert(err, IsNil) + re.NoError(err) defer resp.Body.Close() _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) + re.NoError(err) + re.Equal(http.StatusOK, resp.StatusCode) return resp.Header } -var _ = Suite(&testProgressSuite{}) - -type testProgressSuite struct{} - -func (s *testProgressSuite) TestRemovingProgress(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`), IsNil) +func TestRemovingProgress(t *testing.T) { + re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 1 }) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leader := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leader.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leader.GetAddr()) clusterID := leader.GetClusterID() req := &pdpb.BootstrapRequest{ Header: testutil.NewRequestHeader(clusterID), @@ -519,7 +515,7 @@ func (s *testProgressSuite) TestRemovingProgress(c *C) { Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, } _, err = grpcPDClient.Bootstrap(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) stores := []*metapb.Store{ { Id: 1, @@ -542,92 +538,93 @@ func (s *testProgressSuite) TestRemovingProgress(c *C) { } for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leader.GetServer(), store) + pdctl.MustPutStore(re, leader.GetServer(), store) } - pdctl.MustPutRegionWithCheck(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) - pdctl.MustPutRegionWithCheck(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(30)) - pdctl.MustPutRegionWithCheck(c, cluster, 1002, 1, []byte("e"), []byte("f"), core.SetApproximateSize(50)) - pdctl.MustPutRegionWithCheck(c, cluster, 1003, 2, []byte("g"), []byte("h"), core.SetApproximateSize(40)) + pdctl.MustPutRegion(re, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) + pdctl.MustPutRegion(re, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(30)) + pdctl.MustPutRegion(re, cluster, 1002, 1, []byte("e"), []byte("f"), core.SetApproximateSize(50)) + pdctl.MustPutRegion(re, cluster, 1003, 2, []byte("g"), []byte("h"), core.SetApproximateSize(40)) // no store removing - output := sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the action"), IsTrue) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the given store ID"), IsTrue) + output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the given store ID") // remove store 1 and store 2 - _ = sendRequest(c, leader.GetAddr()+"/pd/api/v1/store/1", http.MethodDelete, http.StatusOK) - _ = sendRequest(c, leader.GetAddr()+"/pd/api/v1/store/2", http.MethodDelete, http.StatusOK) + _ = sendRequest(re, leader.GetAddr()+"/pd/api/v1/store/1", http.MethodDelete, http.StatusOK) + _ = sendRequest(re, leader.GetAddr()+"/pd/api/v1/store/2", http.MethodDelete, http.StatusOK) // size is not changed. - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) var p api.Progress - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") - c.Assert(p.Progress, Equals, 0.0) - c.Assert(p.CurrentSpeed, Equals, 0.0) - c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) // update size - pdctl.MustPutRegionWithCheck(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) - pdctl.MustPutRegionWithCheck(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) + pdctl.MustPutRegion(re, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) + pdctl.MustPutRegion(re, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) // is not prepared time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") - c.Assert(p.Progress, Equals, 0.0) - c.Assert(p.CurrentSpeed, Equals, 0.0) - c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) leader.GetRaftCluster().SetPrepared() time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) // store 1: (60-20)/(60+50) ~= 0.36 // store 2: (30-10)/(30+40) ~= 0.28 // average progress ~= (0.36+0.28)/2 = 0.32 - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.32") + re.Equal("0.32", fmt.Sprintf("%.2f", p.Progress)) // store 1: 40/10s = 4 // store 2: 20/10s = 2 // average speed = (2+4)/2 = 33 - c.Assert(p.CurrentSpeed, Equals, 3.0) + re.Equal(3.0, p.CurrentSpeed) // store 1: (20+50)/4 = 17.5s // store 2: (10+40)/2 = 25s // average time = (17.5+25)/2 = 21.25s - c.Assert(p.LeftSeconds, Equals, 21.25) + re.Equal(21.25, p.LeftSeconds) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) // store 2: (30-10)/(30+40) ~= 0.285 - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.29") + re.Equal("0.29", fmt.Sprintf("%.2f", p.Progress)) // store 2: 20/10s = 2 - c.Assert(p.CurrentSpeed, Equals, 2.0) + re.Equal(2.0, p.CurrentSpeed) // store 2: (10+40)/2 = 25s - c.Assert(p.LeftSeconds, Equals, 25.0) + re.Equal(25.0, p.LeftSeconds) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) } -func (s *testProgressSuite) TestPreparingProgress(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`), IsNil) +func TestPreparingProgress(t *testing.T) { + re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 1 }) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leader := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leader.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leader.GetAddr()) clusterID := leader.GetClusterID() req := &pdpb.BootstrapRequest{ Header: testutil.NewRequestHeader(clusterID), @@ -635,7 +632,7 @@ func (s *testProgressSuite) TestPreparingProgress(c *C) { Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, } _, err = grpcPDClient.Bootstrap(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) stores := []*metapb.Store{ { Id: 1, @@ -675,80 +672,79 @@ func (s *testProgressSuite) TestPreparingProgress(c *C) { } for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leader.GetServer(), store) + pdctl.MustPutStore(re, leader.GetServer(), store) } for i := 0; i < 100; i++ { - pdctl.MustPutRegionWithCheck(c, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("p%d", i)), []byte(fmt.Sprintf("%d", i+1)), core.SetApproximateSize(10)) + pdctl.MustPutRegion(re, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("p%d", i)), []byte(fmt.Sprintf("%d", i+1)), core.SetApproximateSize(10)) } // no store preparing - output := sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the action"), IsTrue) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the given store ID"), IsTrue) + output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the given store ID") // is not prepared time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the action"), IsTrue) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the given store ID"), IsTrue) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the given store ID") // size is not changed. leader.GetRaftCluster().SetPrepared() time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) var p api.Progress - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "preparing") - c.Assert(p.Progress, Equals, 0.0) - c.Assert(p.CurrentSpeed, Equals, 0.0) - c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("preparing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) // update size - pdctl.MustPutRegionWithCheck(c, cluster, 1000, 4, []byte(fmt.Sprintf("%d", 1000)), []byte(fmt.Sprintf("%d", 1001)), core.SetApproximateSize(10)) - pdctl.MustPutRegionWithCheck(c, cluster, 1001, 5, []byte(fmt.Sprintf("%d", 1001)), []byte(fmt.Sprintf("%d", 1002)), core.SetApproximateSize(40)) + pdctl.MustPutRegion(re, cluster, 1000, 4, []byte(fmt.Sprintf("%d", 1000)), []byte(fmt.Sprintf("%d", 1001)), core.SetApproximateSize(10)) + pdctl.MustPutRegion(re, cluster, 1001, 5, []byte(fmt.Sprintf("%d", 1001)), []byte(fmt.Sprintf("%d", 1002)), core.SetApproximateSize(40)) time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "preparing") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("preparing", p.Action) // store 4: 10/(210*0.9) ~= 0.05 // store 5: 40/(210*0.9) ~= 0.21 // average progress ~= (0.05+0.21)/2 = 0.13 - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.13") + re.Equal("0.13", fmt.Sprintf("%.2f", p.Progress)) // store 4: 10/10s = 1 // store 5: 40/10s = 4 // average speed = (1+4)/2 = 2.5 - c.Assert(p.CurrentSpeed, Equals, 2.5) + re.Equal(2.5, p.CurrentSpeed) // store 4: 179/1 ~= 179 // store 5: 149/4 ~= 37.25 // average time ~= (179+37.25)/2 = 108.125 - c.Assert(p.LeftSeconds, Equals, 108.125) - - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "preparing") - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.05") - c.Assert(p.CurrentSpeed, Equals, 1.0) - c.Assert(p.LeftSeconds, Equals, 179.0) - - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs"), IsNil) + re.Equal(108.125, p.LeftSeconds) + + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("preparing", p.Action) + re.Equal("0.05", fmt.Sprintf("%.2f", p.Progress)) + re.Equal(1.0, p.CurrentSpeed) + re.Equal(179.0, p.LeftSeconds) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) } -func sendRequest(c *C, url string, method string, statusCode int) []byte { +func sendRequest(re *require.Assertions, url string, method string, statusCode int) []byte { req, _ := http.NewRequest(method, url, nil) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, statusCode) + re.NoError(err) + re.Equal(statusCode, resp.StatusCode) output, err := io.ReadAll(resp.Body) - c.Assert(err, IsNil) + re.NoError(err) resp.Body.Close() return output } -func mustWaitLeader(c *C, svrs map[string]*tests.TestServer) *server.Server { +func (suite *middlewareTestSuite) mustWaitLeader() *server.Server { var leader *server.Server - testutil.WaitUntil(c, func() bool { - for _, s := range svrs { + testutil.Eventually(suite.Require(), func() bool { + for _, s := range suite.cluster.GetServers() { if !s.GetServer().IsClosed() && s.GetServer().GetMember().IsLeader() { leader = s.GetServer() return true diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index 662f128dd1b..9432ceb0c77 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -19,9 +19,9 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/statistics" @@ -30,15 +30,8 @@ import ( "github.com/tikv/pd/tests/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&hotRegionHistorySuite{}) - -type hotRegionHistorySuite struct{} - -func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { +func TestHotRegionStorage(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -49,9 +42,9 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() stores := []*metapb.Store{ { @@ -67,16 +60,16 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegionWithCheck(c, cluster, 3, 1, []byte("e"), []byte("f")) - pdctl.MustPutRegionWithCheck(c, cluster, 4, 2, []byte("g"), []byte("h")) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f")) + pdctl.MustPutRegion(re, cluster, 4, 2, []byte("g"), []byte("h")) storeStats := []*pdpb.StoreStats{ { StoreId: 1, @@ -108,39 +101,40 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter := hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err := iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(2)) - c.Assert(next.StoreID, Equals, uint64(2)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(2), next.RegionID) + re.Equal(uint64(2), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, IsNil) - c.Assert(err, IsNil) + re.NoError(err) + re.Nil(next) iter = hotRegionStorage.NewIterator([]string{storage.ReadType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(3)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.ReadType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(3), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.ReadType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(4)) - c.Assert(next.StoreID, Equals, uint64(2)) - c.Assert(next.HotRegionType, Equals, storage.ReadType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(4), next.RegionID) + re.Equal(uint64(2), next.StoreID) + re.Equal(storage.ReadType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, IsNil) - c.Assert(err, IsNil) + re.NoError(err) + re.Nil(next) } -func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C) { +func TestHotRegionStorageReservedDayConfigChange(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) interval := 100 * time.Millisecond @@ -152,9 +146,9 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() stores := []*metapb.Store{ { @@ -170,46 +164,46 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter := hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err := iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) schedule := leaderServer.GetConfig().Schedule // set reserved day to zero,close hot region storage schedule.HotRegionsReservedDays = 0 leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) - pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) time.Sleep(10 * interval) endTime = time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) // set reserved day to one,open hot region storage schedule.HotRegionsReservedDays = 1 leaderServer.GetServer().SetScheduleConfig(schedule) @@ -218,20 +212,21 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(2)) - c.Assert(next.StoreID, Equals, uint64(2)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(2), next.RegionID) + re.Equal(uint64(2), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) } -func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c *C) { +func TestHotRegionStorageWriteIntervalConfigChange(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) interval := 100 * time.Millisecond @@ -243,9 +238,9 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() stores := []*metapb.Store{ { @@ -261,45 +256,45 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter := hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err := iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) schedule := leaderServer.GetConfig().Schedule // set the time to 20 times the interval schedule.HotRegionsWriteInterval.Duration = 20 * interval leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) - pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) time.Sleep(10 * interval) endTime = time.Now().UnixNano() / int64(time.Millisecond) // it cant get new hot region because wait time smaller than hot region write interval hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NoError(err) + re.NotNil(next) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) } From eb2ed76f35a8098a756e742c44196af8dd22b58c Mon Sep 17 00:00:00 2001 From: JmPotato Date: Fri, 17 Jun 2022 15:30:35 +0800 Subject: [PATCH 28/35] tests: testify the cluster tests (#5167) ref tikv/pd#4813 Testify the cluster tests. Signed-off-by: JmPotato Co-authored-by: Ti Chi Robot --- tests/server/cluster/cluster_test.go | 888 +++++++++++----------- tests/server/cluster/cluster_work_test.go | 83 +- 2 files changed, 490 insertions(+), 481 deletions(-) diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index f493ed21b00..5c3ea03827e 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -24,11 +24,11 @@ import ( "time" "github.com/coreos/go-semver/semver" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/replication_modepb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/dashboard" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/testutil" @@ -47,10 +47,6 @@ import ( "google.golang.org/grpc/status" ) -func Test(t *testing.T) { - TestingT(t) -} - const ( initEpochVersion uint64 = 1 initEpochConfVer uint64 = 1 @@ -59,73 +55,62 @@ const ( testStoreAddr = "127.0.0.1:0" ) -var _ = Suite(&clusterTestSuite{}) - -type clusterTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *clusterTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - // to prevent GetStorage - dashboard.SetCheckInterval(30 * time.Minute) -} - -func (s *clusterTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *clusterTestSuite) TestBootstrap(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestBootstrap(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() // IsBootstrapped returns false. req := newIsBootstrapRequest(clusterID) resp, err := grpcPDClient.IsBootstrapped(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp, NotNil) - c.Assert(resp.GetBootstrapped(), IsFalse) + re.NoError(err) + re.NotNil(resp) + re.False(resp.GetBootstrapped()) // Bootstrap the cluster. - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) // IsBootstrapped returns true. req = newIsBootstrapRequest(clusterID) resp, err = grpcPDClient.IsBootstrapped(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetBootstrapped(), IsTrue) + re.NoError(err) + re.True(resp.GetBootstrapped()) // check bootstrapped error. reqBoot := newBootstrapRequest(clusterID) respBoot, err := grpcPDClient.Bootstrap(context.Background(), reqBoot) - c.Assert(err, IsNil) - c.Assert(respBoot.GetHeader().GetError(), NotNil) - c.Assert(respBoot.GetHeader().GetError().GetType(), Equals, pdpb.ErrorType_ALREADY_BOOTSTRAPPED) + re.NoError(err) + re.NotNil(respBoot.GetHeader().GetError()) + re.Equal(pdpb.ErrorType_ALREADY_BOOTSTRAPPED, respBoot.GetHeader().GetError().GetType()) } -func (s *clusterTestSuite) TestDamagedRegion(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestDamagedRegion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() region := &metapb.Region{ @@ -142,7 +127,7 @@ func (s *clusterTestSuite) TestDamagedRegion(c *C) { // To put region. regionInfo := core.NewRegionInfo(region, region.Peers[0], core.SetApproximateSize(30)) err = tc.HandleRegionHeartbeat(regionInfo) - c.Assert(err, IsNil) + re.NoError(err) stores := []*pdpb.PutStoreRequest{ { @@ -175,7 +160,7 @@ func (s *clusterTestSuite) TestDamagedRegion(c *C) { svr := &server.GrpcServer{Server: leaderServer.GetServer()} for _, store := range stores { _, err = svr.PutStore(context.Background(), store) - c.Assert(err, IsNil) + re.NoError(err) } // To validate remove peer op be added. @@ -183,50 +168,53 @@ func (s *clusterTestSuite) TestDamagedRegion(c *C) { Header: testutil.NewRequestHeader(clusterID), Stats: &pdpb.StoreStats{StoreId: 2, DamagedRegionsId: []uint64{10}}, } - c.Assert(rc.GetOperatorController().OperatorCount(operator.OpAdmin), Equals, uint64(0)) + re.Equal(uint64(0), rc.GetOperatorController().OperatorCount(operator.OpAdmin)) _, err1 := grpcPDClient.StoreHeartbeat(context.Background(), req1) - c.Assert(err1, IsNil) - c.Assert(rc.GetOperatorController().OperatorCount(operator.OpAdmin), Equals, uint64(1)) + re.NoError(err1) + re.Equal(uint64(1), rc.GetOperatorController().OperatorCount(operator.OpAdmin)) } -func (s *clusterTestSuite) TestGetPutConfig(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestGetPutConfig(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) // Get region. - region := getRegion(c, clusterID, grpcPDClient, []byte("abc")) - c.Assert(region.GetPeers(), HasLen, 1) + region := getRegion(re, clusterID, grpcPDClient, []byte("abc")) + re.Len(region.GetPeers(), 1) peer := region.GetPeers()[0] // Get region by id. - regionByID := getRegionByID(c, clusterID, grpcPDClient, region.GetId()) - c.Assert(region, DeepEquals, regionByID) + regionByID := getRegionByID(re, clusterID, grpcPDClient, region.GetId()) + re.Equal(regionByID, region) r := core.NewRegionInfo(region, region.Peers[0], core.SetApproximateSize(30)) err = tc.HandleRegionHeartbeat(r) - c.Assert(err, IsNil) + re.NoError(err) // Get store. storeID := peer.GetStoreId() - store := getStore(c, clusterID, grpcPDClient, storeID) + store := getStore(re, clusterID, grpcPDClient, storeID) // Update store. store.Address = "127.0.0.1:1" - testPutStore(c, clusterID, rc, grpcPDClient, store) + testPutStore(re, clusterID, rc, grpcPDClient, store) // Remove store. - testRemoveStore(c, clusterID, rc, grpcPDClient, store) + testRemoveStore(re, clusterID, rc, grpcPDClient, store) // Update cluster config. req := &pdpb.PutClusterConfigRequest{ @@ -237,73 +225,73 @@ func (s *clusterTestSuite) TestGetPutConfig(c *C) { }, } resp, err := grpcPDClient.PutClusterConfig(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp, NotNil) - meta := getClusterConfig(c, clusterID, grpcPDClient) - c.Assert(meta.GetMaxPeerCount(), Equals, uint32(5)) + re.NoError(err) + re.NotNil(resp) + meta := getClusterConfig(re, clusterID, grpcPDClient) + re.Equal(uint32(5), meta.GetMaxPeerCount()) } -func testPutStore(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store) { +func testPutStore(re *require.Assertions, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store) { // Update store. _, err := putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) - updatedStore := getStore(c, clusterID, grpcPDClient, store.GetId()) - c.Assert(updatedStore, DeepEquals, store) + re.NoError(err) + updatedStore := getStore(re, clusterID, grpcPDClient, store.GetId()) + re.Equal(store, updatedStore) // Update store again. _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) rc.GetAllocator().Alloc() id, err := rc.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) // Put new store with a duplicated address when old store is up will fail. _, err = putStore(grpcPDClient, clusterID, newMetaStore(id, store.GetAddress(), "2.1.0", metapb.StoreState_Up, getTestDeployPath(id))) - c.Assert(err, NotNil) + re.Error(err) id, err = rc.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) // Put new store with a duplicated address when old store is offline will fail. - resetStoreState(c, rc, store.GetId(), metapb.StoreState_Offline) + resetStoreState(re, rc, store.GetId(), metapb.StoreState_Offline) _, err = putStore(grpcPDClient, clusterID, newMetaStore(id, store.GetAddress(), "2.1.0", metapb.StoreState_Up, getTestDeployPath(id))) - c.Assert(err, NotNil) + re.Error(err) id, err = rc.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) // Put new store with a duplicated address when old store is tombstone is OK. - resetStoreState(c, rc, store.GetId(), metapb.StoreState_Tombstone) + resetStoreState(re, rc, store.GetId(), metapb.StoreState_Tombstone) rc.GetStore(store.GetId()) _, err = putStore(grpcPDClient, clusterID, newMetaStore(id, store.GetAddress(), "2.1.0", metapb.StoreState_Up, getTestDeployPath(id))) - c.Assert(err, IsNil) + re.NoError(err) id, err = rc.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) deployPath := getTestDeployPath(id) // Put a new store. _, err = putStore(grpcPDClient, clusterID, newMetaStore(id, testMetaStoreAddr, "2.1.0", metapb.StoreState_Up, deployPath)) - c.Assert(err, IsNil) + re.NoError(err) s := rc.GetStore(id).GetMeta() - c.Assert(s.DeployPath, Equals, deployPath) + re.Equal(deployPath, s.DeployPath) deployPath = fmt.Sprintf("move/test/store%d", id) _, err = putStore(grpcPDClient, clusterID, newMetaStore(id, testMetaStoreAddr, "2.1.0", metapb.StoreState_Up, deployPath)) - c.Assert(err, IsNil) + re.NoError(err) s = rc.GetStore(id).GetMeta() - c.Assert(s.DeployPath, Equals, deployPath) + re.Equal(deployPath, s.DeployPath) // Put an existed store with duplicated address with other old stores. - resetStoreState(c, rc, store.GetId(), metapb.StoreState_Up) + resetStoreState(re, rc, store.GetId(), metapb.StoreState_Up) _, err = putStore(grpcPDClient, clusterID, newMetaStore(store.GetId(), testMetaStoreAddr, "2.1.0", metapb.StoreState_Up, getTestDeployPath(store.GetId()))) - c.Assert(err, NotNil) + re.Error(err) } func getTestDeployPath(storeID uint64) string { return fmt.Sprintf("test/store%d", storeID) } -func resetStoreState(c *C, rc *cluster.RaftCluster, storeID uint64, state metapb.StoreState) { +func resetStoreState(re *require.Assertions, rc *cluster.RaftCluster, storeID uint64, state metapb.StoreState) { store := rc.GetStore(storeID) - c.Assert(store, NotNil) + re.NotNil(store) newStore := store.Clone(core.OfflineStore(false)) if state == metapb.StoreState_Up { newStore = newStore.Clone(core.UpStore()) @@ -319,7 +307,7 @@ func resetStoreState(c *C, rc *cluster.RaftCluster, storeID uint64, state metapb } } -func testStateAndLimit(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store, beforeState metapb.StoreState, run func(*cluster.RaftCluster) error, expectStates ...metapb.StoreState) { +func testStateAndLimit(re *require.Assertions, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store, beforeState metapb.StoreState, run func(*cluster.RaftCluster) error, expectStates ...metapb.StoreState) { // prepare storeID := store.GetId() oc := rc.GetOperatorController() @@ -330,68 +318,68 @@ func testStateAndLimit(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDCl op = operator.NewTestOperator(2, &metapb.RegionEpoch{}, operator.OpRegion, operator.RemovePeer{FromStore: storeID}) oc.AddOperator(op) - resetStoreState(c, rc, store.GetId(), beforeState) + resetStoreState(re, rc, store.GetId(), beforeState) _, isOKBefore := rc.GetAllStoresLimit()[storeID] // run err := run(rc) // judge _, isOKAfter := rc.GetAllStoresLimit()[storeID] if len(expectStates) != 0 { - c.Assert(err, IsNil) + re.NoError(err) expectState := expectStates[0] - c.Assert(getStore(c, clusterID, grpcPDClient, storeID).GetState(), Equals, expectState) + re.Equal(expectState, getStore(re, clusterID, grpcPDClient, storeID).GetState()) if expectState == metapb.StoreState_Offline { - c.Assert(isOKAfter, IsTrue) + re.True(isOKAfter) } else if expectState == metapb.StoreState_Tombstone { - c.Assert(isOKAfter, IsFalse) + re.False(isOKAfter) } } else { - c.Assert(err, NotNil) - c.Assert(isOKBefore, Equals, isOKAfter) + re.Error(err) + re.Equal(isOKAfter, isOKBefore) } } -func testRemoveStore(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store) { +func testRemoveStore(re *require.Assertions, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store) { rc.GetOpts().SetMaxReplicas(2) defer rc.GetOpts().SetMaxReplicas(3) { beforeState := metapb.StoreState_Up // When store is up // Case 1: RemoveStore should be OK; - testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { + testStateAndLimit(re, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { return cluster.RemoveStore(store.GetId(), false) }, metapb.StoreState_Offline) // Case 2: RemoveStore with physically destroyed should be OK; - testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { + testStateAndLimit(re, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { return cluster.RemoveStore(store.GetId(), true) }, metapb.StoreState_Offline) } { beforeState := metapb.StoreState_Offline // When store is offline // Case 1: RemoveStore should be OK; - testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { + testStateAndLimit(re, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { return cluster.RemoveStore(store.GetId(), false) }, metapb.StoreState_Offline) // Case 2: remove store with physically destroyed should be success - testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { + testStateAndLimit(re, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { return cluster.RemoveStore(store.GetId(), true) }, metapb.StoreState_Offline) } { beforeState := metapb.StoreState_Tombstone // When store is tombstone // Case 1: RemoveStore should should fail; - testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { + testStateAndLimit(re, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { return cluster.RemoveStore(store.GetId(), false) }) // Case 2: RemoveStore with physically destroyed should fail; - testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { + testStateAndLimit(re, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error { return cluster.RemoveStore(store.GetId(), true) }) } { // Put after removed should return tombstone error. resp, err := putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) - c.Assert(resp.GetHeader().GetError().GetType(), Equals, pdpb.ErrorType_STORE_TOMBSTONE) + re.NoError(err) + re.Equal(pdpb.ErrorType_STORE_TOMBSTONE, resp.GetHeader().GetError().GetType()) } { // Update after removed should return tombstone error. @@ -400,182 +388,196 @@ func testRemoveStore(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClie Stats: &pdpb.StoreStats{StoreId: store.GetId()}, } resp, err := grpcPDClient.StoreHeartbeat(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetHeader().GetError().GetType(), Equals, pdpb.ErrorType_STORE_TOMBSTONE) + re.NoError(err) + re.Equal(pdpb.ErrorType_STORE_TOMBSTONE, resp.GetHeader().GetError().GetType()) } } // Make sure PD will not panic if it start and stop again and again. -func (s *clusterTestSuite) TestRaftClusterRestart(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestRaftClusterRestart(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.Stop() err = rc.Start(leaderServer.GetServer()) - c.Assert(err, IsNil) + re.NoError(err) rc = leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.Stop() } // Make sure PD will not deadlock if it start and stop again and again. -func (s *clusterTestSuite) TestRaftClusterMultipleRestart(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestRaftClusterMultipleRestart(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) // add an offline store storeID, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) store := newMetaStore(storeID, "127.0.0.1:4", "2.1.0", metapb.StoreState_Offline, getTestDeployPath(storeID)) rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) err = rc.PutStore(store) - c.Assert(err, IsNil) - c.Assert(tc, NotNil) + re.NoError(err) + re.NotNil(tc) // let the job run at small interval - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) for i := 0; i < 100; i++ { err = rc.Start(leaderServer.GetServer()) - c.Assert(err, IsNil) + re.NoError(err) time.Sleep(time.Millisecond) rc = leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.Stop() } - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) } func newMetaStore(storeID uint64, addr, version string, state metapb.StoreState, deployPath string) *metapb.Store { return &metapb.Store{Id: storeID, Address: addr, Version: version, State: state, DeployPath: deployPath} } -func (s *clusterTestSuite) TestGetPDMembers(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestGetPDMembers(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() req := &pdpb.GetMembersRequest{Header: testutil.NewRequestHeader(clusterID)} resp, err := grpcPDClient.GetMembers(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) // A more strict test can be found at api/member_test.go - c.Assert(resp.GetMembers(), Not(HasLen), 0) + re.NotEmpty(resp.GetMembers()) } -func (s *clusterTestSuite) TestNotLeader(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 2) +func TestNotLeader(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 2) defer tc.Destroy() - c.Assert(err, IsNil) - c.Assert(tc.RunInitialServers(), IsNil) - + re.NoError(err) + re.NoError(tc.RunInitialServers()) tc.WaitLeader() followerServer := tc.GetServer(tc.GetFollower()) - grpcPDClient := testutil.MustNewGrpcClient(c, followerServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, followerServer.GetAddr()) clusterID := followerServer.GetClusterID() req := &pdpb.AllocIDRequest{Header: testutil.NewRequestHeader(clusterID)} resp, err := grpcPDClient.AllocID(context.Background(), req) - c.Assert(resp, IsNil) + re.Nil(resp) grpcStatus, ok := status.FromError(err) - c.Assert(ok, IsTrue) - c.Assert(grpcStatus.Code(), Equals, codes.Unavailable) - c.Assert(grpcStatus.Message(), Equals, "not leader") + re.True(ok) + re.Equal(codes.Unavailable, grpcStatus.Code()) + re.Equal("not leader", grpcStatus.Message()) } -func (s *clusterTestSuite) TestStoreVersionChange(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestStoreVersionChange(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) svr := leaderServer.GetServer() svr.SetClusterVersion("2.0.0") storeID, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) store := newMetaStore(storeID, "127.0.0.1:4", "2.1.0", metapb.StoreState_Up, getTestDeployPath(storeID)) var wg sync.WaitGroup - c.Assert(failpoint.Enable("github.com/tikv/pd/server/versionChangeConcurrency", `return(true)`), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/versionChangeConcurrency", `return(true)`)) wg.Add(1) go func() { defer wg.Done() _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) }() time.Sleep(100 * time.Millisecond) svr.SetClusterVersion("1.0.0") wg.Wait() v, err := semver.NewVersion("1.0.0") - c.Assert(err, IsNil) - c.Assert(svr.GetClusterVersion(), Equals, *v) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/versionChangeConcurrency"), IsNil) + re.NoError(err) + re.Equal(*v, svr.GetClusterVersion()) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/versionChangeConcurrency")) } -func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestConcurrentHandleRegion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dashboard.SetCheckInterval(30 * time.Minute) + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) - + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) - + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1", "127.0.1.1:2"} rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.SetStorage(storage.NewStorageWithMemoryBackend()) stores := make([]*metapb.Store, 0, len(storeAddrs)) id := leaderServer.GetAllocator() for _, addr := range storeAddrs { storeID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) store := newMetaStore(storeID, addr, "2.1.0", metapb.StoreState_Up, getTestDeployPath(storeID)) stores = append(stores, store) _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup // register store and bind stream for i, store := range stores { @@ -589,13 +591,13 @@ func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) { } grpcServer := &server.GrpcServer{Server: leaderServer.GetServer()} _, err := grpcServer.StoreHeartbeat(context.TODO(), req) - c.Assert(err, IsNil) + re.NoError(err) stream, err := grpcPDClient.RegionHeartbeat(ctx) - c.Assert(err, IsNil) + re.NoError(err) peerID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) regionID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) peer := &metapb.Peer{Id: peerID, StoreId: store.GetId()} regionReq := &pdpb.RegionHeartbeatRequest{ Header: testutil.NewRequestHeader(clusterID), @@ -606,7 +608,7 @@ func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) { Leader: peer, } err = stream.Send(regionReq) - c.Assert(err, IsNil) + re.NoError(err) // make sure the first store can receive one response if i == 0 { wg.Add(1) @@ -614,7 +616,7 @@ func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) { go func(isReceiver bool) { if isReceiver { _, err := stream.Recv() - c.Assert(err, IsNil) + re.NoError(err) wg.Done() } for { @@ -631,9 +633,9 @@ func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) { concurrent := 1000 for i := 0; i < concurrent; i++ { peerID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) regionID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) region := &metapb.Region{ Id: regionID, StartKey: []byte(fmt.Sprintf("%5d", i)), @@ -654,33 +656,36 @@ func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) { go func() { defer wg.Done() err := rc.HandleRegionHeartbeat(core.NewRegionInfo(region, region.Peers[0])) - c.Assert(err, IsNil) + re.NoError(err) }() } wg.Wait() } -func (s *clusterTestSuite) TestSetScheduleOpt(c *C) { +func TestSetScheduleOpt(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // TODO: enable placementrules - tc, err := tests.NewTestCluster(s.ctx, 1, func(cfg *config.Config, svr string) { cfg.Replication.EnablePlacementRules = false }) + tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, svr string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) cfg := config.NewConfig() cfg.Schedule.TolerantSizeRatio = 5 err = cfg.Adjust(nil, false) - c.Assert(err, IsNil) + re.NoError(err) opt := config.NewPersistOptions(cfg) - c.Assert(err, IsNil) + re.NoError(err) svr := leaderServer.GetServer() scheduleCfg := opt.GetScheduleConfig() @@ -693,68 +698,63 @@ func (s *clusterTestSuite) TestSetScheduleOpt(c *C) { scheduleCfg.MaxSnapshotCount = 10 pdServerCfg.UseRegionStorage = true typ, labelKey, labelValue := "testTyp", "testKey", "testValue" - - c.Assert(svr.SetScheduleConfig(*scheduleCfg), IsNil) - c.Assert(svr.SetPDServerConfig(*pdServerCfg), IsNil) - c.Assert(svr.SetLabelProperty(typ, labelKey, labelValue), IsNil) - c.Assert(svr.SetReplicationConfig(*replicationCfg), IsNil) - - c.Assert(persistOptions.GetMaxReplicas(), Equals, 5) - c.Assert(persistOptions.GetMaxSnapshotCount(), Equals, uint64(10)) - c.Assert(persistOptions.IsUseRegionStorage(), IsTrue) - c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Key, Equals, "testKey") - c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Value, Equals, "testValue") - - c.Assert(svr.DeleteLabelProperty(typ, labelKey, labelValue), IsNil) - - c.Assert(persistOptions.GetLabelPropertyConfig()[typ], HasLen, 0) + re.NoError(svr.SetScheduleConfig(*scheduleCfg)) + re.NoError(svr.SetPDServerConfig(*pdServerCfg)) + re.NoError(svr.SetLabelProperty(typ, labelKey, labelValue)) + re.NoError(svr.SetReplicationConfig(*replicationCfg)) + re.Equal(5, persistOptions.GetMaxReplicas()) + re.Equal(uint64(10), persistOptions.GetMaxSnapshotCount()) + re.True(persistOptions.IsUseRegionStorage()) + re.Equal("testKey", persistOptions.GetLabelPropertyConfig()[typ][0].Key) + re.Equal("testValue", persistOptions.GetLabelPropertyConfig()[typ][0].Value) + re.NoError(svr.DeleteLabelProperty(typ, labelKey, labelValue)) + re.Len(persistOptions.GetLabelPropertyConfig()[typ], 0) // PUT GET failed - c.Assert(failpoint.Enable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed", `return(true)`), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed", `return(true)`)) replicationCfg.MaxReplicas = 7 scheduleCfg.MaxSnapshotCount = 20 pdServerCfg.UseRegionStorage = false - - c.Assert(svr.SetScheduleConfig(*scheduleCfg), NotNil) - c.Assert(svr.SetReplicationConfig(*replicationCfg), NotNil) - c.Assert(svr.SetPDServerConfig(*pdServerCfg), NotNil) - c.Assert(svr.SetLabelProperty(typ, labelKey, labelValue), NotNil) - - c.Assert(persistOptions.GetMaxReplicas(), Equals, 5) - c.Assert(persistOptions.GetMaxSnapshotCount(), Equals, uint64(10)) - c.Assert(persistOptions.GetPDServerConfig().UseRegionStorage, IsTrue) - c.Assert(persistOptions.GetLabelPropertyConfig()[typ], HasLen, 0) + re.Error(svr.SetScheduleConfig(*scheduleCfg)) + re.Error(svr.SetReplicationConfig(*replicationCfg)) + re.Error(svr.SetPDServerConfig(*pdServerCfg)) + re.Error(svr.SetLabelProperty(typ, labelKey, labelValue)) + re.Equal(5, persistOptions.GetMaxReplicas()) + re.Equal(uint64(10), persistOptions.GetMaxSnapshotCount()) + re.True(persistOptions.GetPDServerConfig().UseRegionStorage) + re.Len(persistOptions.GetLabelPropertyConfig()[typ], 0) // DELETE failed - c.Assert(failpoint.Disable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed"), IsNil) - c.Assert(svr.SetReplicationConfig(*replicationCfg), IsNil) - - c.Assert(failpoint.Enable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed", `return(true)`), IsNil) - c.Assert(svr.DeleteLabelProperty(typ, labelKey, labelValue), NotNil) - - c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Key, Equals, "testKey") - c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Value, Equals, "testValue") - c.Assert(failpoint.Disable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed")) + re.NoError(svr.SetReplicationConfig(*replicationCfg)) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed", `return(true)`)) + re.Error(svr.DeleteLabelProperty(typ, labelKey, labelValue)) + re.Equal("testKey", persistOptions.GetLabelPropertyConfig()[typ][0].Key) + re.Equal("testValue", persistOptions.GetLabelPropertyConfig()[typ][0].Value) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/storage/kv/etcdSaveFailed")) } -func (s *clusterTestSuite) TestLoadClusterInfo(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestLoadClusterInfo(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) svr := leaderServer.GetServer() - rc := cluster.NewRaftCluster(s.ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) + rc := cluster.NewRaftCluster(ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) // Cluster is not bootstrapped. rc.InitCluster(svr.GetAllocator(), svr.GetPersistOptions(), svr.GetStorage(), svr.GetBasicCluster()) raftCluster, err := rc.LoadClusterInfo() - c.Assert(err, IsNil) - c.Assert(raftCluster, IsNil) + re.NoError(err) + re.Nil(raftCluster) storage := rc.GetStorage() basicCluster := rc.GetBasicCluster() @@ -762,7 +762,7 @@ func (s *clusterTestSuite) TestLoadClusterInfo(c *C) { // Save meta, stores and regions. n := 10 meta := &metapb.Cluster{Id: 123} - c.Assert(storage.SaveMeta(meta), IsNil) + re.NoError(storage.SaveMeta(meta)) stores := make([]*metapb.Store, 0, n) for i := 0; i < n; i++ { store := &metapb.Store{Id: uint64(i)} @@ -770,7 +770,7 @@ func (s *clusterTestSuite) TestLoadClusterInfo(c *C) { } for _, store := range stores { - c.Assert(storage.SaveStore(store), IsNil) + re.NoError(storage.SaveStore(store)) } regions := make([]*metapb.Region, 0, n) @@ -785,25 +785,25 @@ func (s *clusterTestSuite) TestLoadClusterInfo(c *C) { } for _, region := range regions { - c.Assert(storage.SaveRegion(region), IsNil) + re.NoError(storage.SaveRegion(region)) } - c.Assert(storage.Flush(), IsNil) + re.NoError(storage.Flush()) - raftCluster = cluster.NewRaftCluster(s.ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) + raftCluster = cluster.NewRaftCluster(ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) raftCluster.InitCluster(mockid.NewIDAllocator(), opt, storage, basicCluster) raftCluster, err = raftCluster.LoadClusterInfo() - c.Assert(err, IsNil) - c.Assert(raftCluster, NotNil) + re.NoError(err) + re.NotNil(raftCluster) // Check meta, stores, and regions. - c.Assert(raftCluster.GetMetaCluster(), DeepEquals, meta) - c.Assert(raftCluster.GetStoreCount(), Equals, n) + re.Equal(meta, raftCluster.GetMetaCluster()) + re.Equal(n, raftCluster.GetStoreCount()) for _, store := range raftCluster.GetMetaStores() { - c.Assert(store, DeepEquals, stores[store.GetId()]) + re.Equal(stores[store.GetId()], store) } - c.Assert(raftCluster.GetRegionCount(), Equals, n) + re.Equal(n, raftCluster.GetRegionCount()) for _, region := range raftCluster.GetMetaRegions() { - c.Assert(region, DeepEquals, regions[region.GetId()]) + re.Equal(regions[region.GetId()], region) } m := 20 @@ -819,23 +819,26 @@ func (s *clusterTestSuite) TestLoadClusterInfo(c *C) { } for _, region := range regions { - c.Assert(storage.SaveRegion(region), IsNil) + re.NoError(storage.SaveRegion(region)) } - raftCluster.GetStorage().LoadRegionsOnce(s.ctx, raftCluster.GetBasicCluster().PutRegion) - c.Assert(raftCluster.GetRegionCount(), Equals, n) + raftCluster.GetStorage().LoadRegionsOnce(ctx, raftCluster.GetBasicCluster().PutRegion) + re.Equal(n, raftCluster.GetRegionCount()) } -func (s *clusterTestSuite) TestTiFlashWithPlacementRules(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1, func(cfg *config.Config, name string) { cfg.Replication.EnablePlacementRules = false }) +func TestTiFlashWithPlacementRules(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, name string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) tiflashStore := &metapb.Store{ Id: 11, @@ -846,163 +849,155 @@ func (s *clusterTestSuite) TestTiFlashWithPlacementRules(c *C) { // cannot put TiFlash node without placement rules _, err = putStore(grpcPDClient, clusterID, tiflashStore) - c.Assert(err, NotNil) + re.Error(err) rep := leaderServer.GetConfig().Replication rep.EnablePlacementRules = true svr := leaderServer.GetServer() err = svr.SetReplicationConfig(rep) - c.Assert(err, IsNil) + re.NoError(err) _, err = putStore(grpcPDClient, clusterID, tiflashStore) - c.Assert(err, IsNil) + re.NoError(err) // test TiFlash store limit expect := map[uint64]config.StoreLimitConfig{11: {AddPeer: 30, RemovePeer: 30}} - c.Assert(svr.GetScheduleConfig().StoreLimit, DeepEquals, expect) + re.Equal(expect, svr.GetScheduleConfig().StoreLimit) // cannot disable placement rules with TiFlash nodes rep.EnablePlacementRules = false err = svr.SetReplicationConfig(rep) - c.Assert(err, NotNil) + re.Error(err) err = svr.GetRaftCluster().BuryStore(11, true) - c.Assert(err, IsNil) + re.NoError(err) err = svr.SetReplicationConfig(rep) - c.Assert(err, IsNil) - c.Assert(len(svr.GetScheduleConfig().StoreLimit), Equals, 0) + re.NoError(err) + re.Equal(0, len(svr.GetScheduleConfig().StoreLimit)) } -func (s *clusterTestSuite) TestReplicationModeStatus(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1, func(conf *config.Config, serverName string) { +func TestReplicationModeStatus(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.ReplicationMode.ReplicationMode = "dr-auto-sync" }) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() req := newBootstrapRequest(clusterID) res, err := grpcPDClient.Bootstrap(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(res.GetReplicationStatus().GetMode(), Equals, replication_modepb.ReplicationMode_DR_AUTO_SYNC) // check status in bootstrap response + re.NoError(err) + re.Equal(replication_modepb.ReplicationMode_DR_AUTO_SYNC, res.GetReplicationStatus().GetMode()) // check status in bootstrap response store := &metapb.Store{Id: 11, Address: "127.0.0.1:1", Version: "v4.1.0"} putRes, err := putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) - c.Assert(putRes.GetReplicationStatus().GetMode(), Equals, replication_modepb.ReplicationMode_DR_AUTO_SYNC) // check status in putStore response + re.NoError(err) + re.Equal(replication_modepb.ReplicationMode_DR_AUTO_SYNC, putRes.GetReplicationStatus().GetMode()) // check status in putStore response hbReq := &pdpb.StoreHeartbeatRequest{ Header: testutil.NewRequestHeader(clusterID), Stats: &pdpb.StoreStats{StoreId: store.GetId()}, } hbRes, err := grpcPDClient.StoreHeartbeat(context.Background(), hbReq) - c.Assert(err, IsNil) - c.Assert(hbRes.GetReplicationStatus().GetMode(), Equals, replication_modepb.ReplicationMode_DR_AUTO_SYNC) // check status in store heartbeat response + re.NoError(err) + re.Equal(replication_modepb.ReplicationMode_DR_AUTO_SYNC, hbRes.GetReplicationStatus().GetMode()) // check status in store heartbeat response } func newIsBootstrapRequest(clusterID uint64) *pdpb.IsBootstrappedRequest { - req := &pdpb.IsBootstrappedRequest{ + return &pdpb.IsBootstrappedRequest{ Header: testutil.NewRequestHeader(clusterID), } - - return req } func newBootstrapRequest(clusterID uint64) *pdpb.BootstrapRequest { - req := &pdpb.BootstrapRequest{ + return &pdpb.BootstrapRequest{ Header: testutil.NewRequestHeader(clusterID), Store: &metapb.Store{Id: 1, Address: testStoreAddr}, Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, } - - return req } // helper function to check and bootstrap. -func bootstrapCluster(c *C, clusterID uint64, grpcPDClient pdpb.PDClient) { +func bootstrapCluster(re *require.Assertions, clusterID uint64, grpcPDClient pdpb.PDClient) { req := newBootstrapRequest(clusterID) _, err := grpcPDClient.Bootstrap(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) } func putStore(grpcPDClient pdpb.PDClient, clusterID uint64, store *metapb.Store) (*pdpb.PutStoreResponse, error) { - req := &pdpb.PutStoreRequest{ + return grpcPDClient.PutStore(context.Background(), &pdpb.PutStoreRequest{ Header: testutil.NewRequestHeader(clusterID), Store: store, - } - resp, err := grpcPDClient.PutStore(context.Background(), req) - return resp, err + }) } -func getStore(c *C, clusterID uint64, grpcPDClient pdpb.PDClient, storeID uint64) *metapb.Store { - req := &pdpb.GetStoreRequest{ +func getStore(re *require.Assertions, clusterID uint64, grpcPDClient pdpb.PDClient, storeID uint64) *metapb.Store { + resp, err := grpcPDClient.GetStore(context.Background(), &pdpb.GetStoreRequest{ Header: testutil.NewRequestHeader(clusterID), StoreId: storeID, - } - resp, err := grpcPDClient.GetStore(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetStore().GetId(), Equals, storeID) - + }) + re.NoError(err) + re.Equal(storeID, resp.GetStore().GetId()) return resp.GetStore() } -func getRegion(c *C, clusterID uint64, grpcPDClient pdpb.PDClient, regionKey []byte) *metapb.Region { - req := &pdpb.GetRegionRequest{ +func getRegion(re *require.Assertions, clusterID uint64, grpcPDClient pdpb.PDClient, regionKey []byte) *metapb.Region { + resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ Header: testutil.NewRequestHeader(clusterID), RegionKey: regionKey, - } - - resp, err := grpcPDClient.GetRegion(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetRegion(), NotNil) - + }) + re.NoError(err) + re.NotNil(resp.GetRegion()) return resp.GetRegion() } -func getRegionByID(c *C, clusterID uint64, grpcPDClient pdpb.PDClient, regionID uint64) *metapb.Region { - req := &pdpb.GetRegionByIDRequest{ +func getRegionByID(re *require.Assertions, clusterID uint64, grpcPDClient pdpb.PDClient, regionID uint64) *metapb.Region { + resp, err := grpcPDClient.GetRegionByID(context.Background(), &pdpb.GetRegionByIDRequest{ Header: testutil.NewRequestHeader(clusterID), RegionId: regionID, - } - - resp, err := grpcPDClient.GetRegionByID(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetRegion(), NotNil) - + }) + re.NoError(err) + re.NotNil(resp.GetRegion()) return resp.GetRegion() } -func getClusterConfig(c *C, clusterID uint64, grpcPDClient pdpb.PDClient) *metapb.Cluster { - req := &pdpb.GetClusterConfigRequest{Header: testutil.NewRequestHeader(clusterID)} - - resp, err := grpcPDClient.GetClusterConfig(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetCluster(), NotNil) - +func getClusterConfig(re *require.Assertions, clusterID uint64, grpcPDClient pdpb.PDClient) *metapb.Cluster { + resp, err := grpcPDClient.GetClusterConfig(context.Background(), &pdpb.GetClusterConfigRequest{ + Header: testutil.NewRequestHeader(clusterID), + }) + re.NoError(err) + re.NotNil(resp.GetCluster()) return resp.GetCluster() } -func (s *clusterTestSuite) TestOfflineStoreLimit(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestOfflineStoreLimit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dashboard.SetCheckInterval(30 * time.Minute) + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1"} rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.SetStorage(storage.NewStorageWithMemoryBackend()) id := leaderServer.GetAllocator() for _, addr := range storeAddrs { storeID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) store := newMetaStore(storeID, addr, "4.0.0", metapb.StoreState_Up, getTestDeployPath(storeID)) _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) } for i := uint64(1); i <= 2; i++ { r := &metapb.Region{ @@ -1018,7 +1013,7 @@ func (s *clusterTestSuite) TestOfflineStoreLimit(c *C) { region := core.NewRegionInfo(r, r.Peers[0], core.SetApproximateSize(10)) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) } oc := rc.GetOperatorController() @@ -1027,22 +1022,22 @@ func (s *clusterTestSuite) TestOfflineStoreLimit(c *C) { // only can add 5 remove peer operators on store 1 for i := uint64(1); i <= 5; i++ { op := operator.NewTestOperator(1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1}) - c.Assert(oc.AddOperator(op), IsTrue) - c.Assert(oc.RemoveOperator(op), IsTrue) + re.True(oc.AddOperator(op)) + re.True(oc.RemoveOperator(op)) } op := operator.NewTestOperator(1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1}) - c.Assert(oc.AddOperator(op), IsFalse) - c.Assert(oc.RemoveOperator(op), IsFalse) + re.False(oc.AddOperator(op)) + re.False(oc.RemoveOperator(op)) // only can add 5 remove peer operators on store 2 for i := uint64(1); i <= 5; i++ { op := operator.NewTestOperator(2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2}) - c.Assert(oc.AddOperator(op), IsTrue) - c.Assert(oc.RemoveOperator(op), IsTrue) + re.True(oc.AddOperator(op)) + re.True(oc.RemoveOperator(op)) } op = operator.NewTestOperator(2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2}) - c.Assert(oc.AddOperator(op), IsFalse) - c.Assert(oc.RemoveOperator(op), IsFalse) + re.False(oc.AddOperator(op)) + re.False(oc.RemoveOperator(op)) // reset all store limit opt.SetAllStoresLimit(storelimit.RemovePeer, 2) @@ -1050,12 +1045,12 @@ func (s *clusterTestSuite) TestOfflineStoreLimit(c *C) { // only can add 5 remove peer operators on store 2 for i := uint64(1); i <= 5; i++ { op := operator.NewTestOperator(2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2}) - c.Assert(oc.AddOperator(op), IsTrue) - c.Assert(oc.RemoveOperator(op), IsTrue) + re.True(oc.AddOperator(op)) + re.True(oc.RemoveOperator(op)) } op = operator.NewTestOperator(2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2}) - c.Assert(oc.AddOperator(op), IsFalse) - c.Assert(oc.RemoveOperator(op), IsFalse) + re.False(oc.AddOperator(op)) + re.False(oc.RemoveOperator(op)) // offline store 1 rc.SetStoreLimit(1, storelimit.RemovePeer, storelimit.Unlimited) @@ -1064,28 +1059,32 @@ func (s *clusterTestSuite) TestOfflineStoreLimit(c *C) { // can add unlimited remove peer operators on store 1 for i := uint64(1); i <= 30; i++ { op := operator.NewTestOperator(1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1}) - c.Assert(oc.AddOperator(op), IsTrue) - c.Assert(oc.RemoveOperator(op), IsTrue) + re.True(oc.AddOperator(op)) + re.True(oc.RemoveOperator(op)) } } -func (s *clusterTestSuite) TestUpgradeStoreLimit(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) +func TestUpgradeStoreLimit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dashboard.SetCheckInterval(30 * time.Minute) + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.SetStorage(storage.NewStorageWithMemoryBackend()) store := newMetaStore(1, "127.0.1.1:0", "4.0.0", metapb.StoreState_Up, "test/store1") _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) r := &metapb.Region{ Id: 1, RegionEpoch: &metapb.RegionEpoch{ @@ -1099,58 +1098,60 @@ func (s *clusterTestSuite) TestUpgradeStoreLimit(c *C) { region := core.NewRegionInfo(r, r.Peers[0], core.SetApproximateSize(10)) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) // restart PD // Here we use an empty storelimit to simulate the upgrade progress. opt := rc.GetOpts() scheduleCfg := opt.GetScheduleConfig().Clone() scheduleCfg.StoreLimit = map[uint64]config.StoreLimitConfig{} - c.Assert(leaderServer.GetServer().SetScheduleConfig(*scheduleCfg), IsNil) + re.NoError(leaderServer.GetServer().SetScheduleConfig(*scheduleCfg)) err = leaderServer.Stop() - c.Assert(err, IsNil) + re.NoError(err) err = leaderServer.Run() - c.Assert(err, IsNil) + re.NoError(err) oc := rc.GetOperatorController() // only can add 5 remove peer operators on store 1 for i := uint64(1); i <= 5; i++ { op := operator.NewTestOperator(1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1}) - c.Assert(oc.AddOperator(op), IsTrue) - c.Assert(oc.RemoveOperator(op), IsTrue) + re.True(oc.AddOperator(op)) + re.True(oc.RemoveOperator(op)) } op := operator.NewTestOperator(1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1}) - c.Assert(oc.AddOperator(op), IsFalse) - c.Assert(oc.RemoveOperator(op), IsFalse) + re.False(oc.AddOperator(op)) + re.False(oc.RemoveOperator(op)) } -func (s *clusterTestSuite) TestStaleTermHeartbeat(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) +func TestStaleTermHeartbeat(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dashboard.SetCheckInterval(30 * time.Minute) + tc, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) defer tc.Destroy() - err = tc.RunInitialServers() - c.Assert(err, IsNil) - + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1", "127.0.1.1:2"} rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) rc.SetStorage(storage.NewStorageWithMemoryBackend()) peers := make([]*metapb.Peer, 0, len(storeAddrs)) id := leaderServer.GetAllocator() for _, addr := range storeAddrs { storeID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) peerID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) store := newMetaStore(storeID, addr, "3.0.0", metapb.StoreState_Up, getTestDeployPath(storeID)) _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) peers = append(peers, &metapb.Peer{ Id: peerID, StoreId: storeID, @@ -1176,45 +1177,45 @@ func (s *clusterTestSuite) TestStaleTermHeartbeat(c *C) { region := core.RegionFromHeartbeat(regionReq) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) // Transfer leader regionReq.Term = 6 regionReq.Leader = peers[1] region = core.RegionFromHeartbeat(regionReq) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) // issue #3379 regionReq.KeysWritten = uint64(18446744073709551615) // -1 regionReq.BytesWritten = uint64(18446744073709550602) // -1024 region = core.RegionFromHeartbeat(regionReq) - c.Assert(region.GetKeysWritten(), Equals, uint64(0)) - c.Assert(region.GetBytesWritten(), Equals, uint64(0)) + re.Equal(uint64(0), region.GetKeysWritten()) + re.Equal(uint64(0), region.GetBytesWritten()) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) // Stale heartbeat, update check should fail regionReq.Term = 5 regionReq.Leader = peers[0] region = core.RegionFromHeartbeat(regionReq) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, NotNil) + re.Error(err) // Allow regions that are created by unsafe recover to send a heartbeat, even though they // are considered "stale" because their conf ver and version are both equal to 1. regionReq.Region.RegionEpoch.ConfVer = 1 region = core.RegionFromHeartbeat(regionReq) err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) } -func (s *clusterTestSuite) putRegionWithLeader(c *C, rc *cluster.RaftCluster, id id.Allocator, storeID uint64) { +func putRegionWithLeader(re *require.Assertions, rc *cluster.RaftCluster, id id.Allocator, storeID uint64) { for i := 0; i < 3; i++ { regionID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) peerID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) region := &metapb.Region{ Id: regionID, Peers: []*metapb.Peer{{Id: peerID, StoreId: storeID}}, @@ -1223,43 +1224,46 @@ func (s *clusterTestSuite) putRegionWithLeader(c *C, rc *cluster.RaftCluster, id } rc.HandleRegionHeartbeat(core.NewRegionInfo(region, region.Peers[0])) } - c.Assert(rc.GetStore(storeID).GetLeaderCount(), Equals, 3) + re.Equal(3, rc.GetStore(storeID).GetLeaderCount()) } -func (s *clusterTestSuite) checkMinResolvedTSFromStorage(c *C, rc *cluster.RaftCluster, expect uint64) { +func checkMinResolvedTSFromStorage(re *require.Assertions, rc *cluster.RaftCluster, expect uint64) { time.Sleep(time.Millisecond * 10) ts2, err := rc.GetStorage().LoadMinResolvedTS() - c.Assert(err, IsNil) - c.Assert(ts2, Equals, expect) + re.NoError(err) + re.Equal(expect, ts2) } -func (s *clusterTestSuite) setMinResolvedTSPersistenceInterval(c *C, rc *cluster.RaftCluster, svr *server.Server, interval time.Duration) { +func setMinResolvedTSPersistenceInterval(re *require.Assertions, rc *cluster.RaftCluster, svr *server.Server, interval time.Duration) { cfg := rc.GetOpts().GetPDServerConfig().Clone() cfg.MinResolvedTSPersistenceInterval = typeutil.NewDuration(interval) err := svr.SetPDServerConfig(*cfg) - c.Assert(err, IsNil) + re.NoError(err) time.Sleep(time.Millisecond + interval) } -func (s *clusterTestSuite) TestMinResolvedTS(c *C) { +func TestMinResolvedTS(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cluster.DefaultMinResolvedTSPersistenceInterval = time.Millisecond - tc, err := tests.NewTestCluster(s.ctx, 1) + tc, err := tests.NewTestCluster(ctx, 1) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) id := leaderServer.GetAllocator() - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) svr := leaderServer.GetServer() - addStoreAndCheckMinResolvedTS := func(c *C, isTiflash bool, minResolvedTS, expect uint64) uint64 { + addStoreAndCheckMinResolvedTS := func(re *require.Assertions, isTiflash bool, minResolvedTS, expect uint64) uint64 { storeID, err := id.Alloc() - c.Assert(err, IsNil) + re.NoError(err) store := &metapb.Store{ Id: storeID, Version: "v6.0.0", @@ -1269,95 +1273,104 @@ func (s *clusterTestSuite) TestMinResolvedTS(c *C) { store.Labels = []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}} } _, err = putStore(grpcPDClient, clusterID, store) - c.Assert(err, IsNil) + re.NoError(err) req := &pdpb.ReportMinResolvedTsRequest{ Header: testutil.NewRequestHeader(clusterID), StoreId: storeID, MinResolvedTs: minResolvedTS, } _, err = grpcPDClient.ReportMinResolvedTS(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) ts := rc.GetMinResolvedTS() - c.Assert(ts, Equals, expect) + re.Equal(expect, ts) return storeID } // case1: cluster is no initialized // min resolved ts should be not available status, err := rc.LoadClusterStatus() - c.Assert(err, IsNil) - c.Assert(status.IsInitialized, IsFalse) + re.NoError(err) + re.False(status.IsInitialized) store1TS := uint64(233) - store1 := addStoreAndCheckMinResolvedTS(c, false /* not tiflash */, store1TS, math.MaxUint64) + store1 := addStoreAndCheckMinResolvedTS(re, false /* not tiflash */, store1TS, math.MaxUint64) // case2: add leader peer to store1 but no run job // min resolved ts should be zero - s.putRegionWithLeader(c, rc, id, store1) + putRegionWithLeader(re, rc, id, store1) + time.Sleep(time.Millisecond) ts := rc.GetMinResolvedTS() - c.Assert(ts, Equals, uint64(0)) + re.Equal(uint64(0), ts) // case3: add leader peer to store1 and run job // min resolved ts should be store1TS - s.setMinResolvedTSPersistenceInterval(c, rc, svr, time.Millisecond) + setMinResolvedTSPersistenceInterval(re, rc, svr, time.Millisecond) + time.Sleep(time.Millisecond) ts = rc.GetMinResolvedTS() - c.Assert(ts, Equals, store1TS) - s.checkMinResolvedTSFromStorage(c, rc, ts) + re.Equal(store1TS, ts) + checkMinResolvedTSFromStorage(re, rc, ts) // case4: add tiflash store // min resolved ts should no change - addStoreAndCheckMinResolvedTS(c, true /* is tiflash */, 0, store1TS) + addStoreAndCheckMinResolvedTS(re, true /* is tiflash */, 0, store1TS) // case5: add new store with lager min resolved ts // min resolved ts should no change store3TS := store1TS + 10 - store3 := addStoreAndCheckMinResolvedTS(c, false /* not tiflash */, store3TS, store1TS) - s.putRegionWithLeader(c, rc, id, store3) + store3 := addStoreAndCheckMinResolvedTS(re, false /* not tiflash */, store3TS, store1TS) + putRegionWithLeader(re, rc, id, store3) // case6: set store1 to tombstone // min resolved ts should change to store 3 - resetStoreState(c, rc, store1, metapb.StoreState_Tombstone) + resetStoreState(re, rc, store1, metapb.StoreState_Tombstone) + time.Sleep(time.Millisecond) ts = rc.GetMinResolvedTS() - c.Assert(ts, Equals, store3TS) + re.Equal(store3TS, ts) // case7: add a store with leader peer but no report min resolved ts // min resolved ts should be no change - s.checkMinResolvedTSFromStorage(c, rc, store3TS) - store4 := addStoreAndCheckMinResolvedTS(c, false /* not tiflash */, 0, store3TS) - s.putRegionWithLeader(c, rc, id, store4) + checkMinResolvedTSFromStorage(re, rc, store3TS) + store4 := addStoreAndCheckMinResolvedTS(re, false /* not tiflash */, 0, store3TS) + putRegionWithLeader(re, rc, id, store4) + time.Sleep(time.Millisecond) ts = rc.GetMinResolvedTS() - c.Assert(ts, Equals, store3TS) - s.checkMinResolvedTSFromStorage(c, rc, store3TS) - resetStoreState(c, rc, store4, metapb.StoreState_Tombstone) + re.Equal(store3TS, ts) + checkMinResolvedTSFromStorage(re, rc, store3TS) + resetStoreState(re, rc, store4, metapb.StoreState_Tombstone) // case8: set min resolved ts persist interval to zero // although min resolved ts increase, it should be not persisted until job running. store5TS := store3TS + 10 - s.setMinResolvedTSPersistenceInterval(c, rc, svr, 0) - store5 := addStoreAndCheckMinResolvedTS(c, false /* not tiflash */, store5TS, store3TS) - resetStoreState(c, rc, store3, metapb.StoreState_Tombstone) - s.putRegionWithLeader(c, rc, id, store5) + setMinResolvedTSPersistenceInterval(re, rc, svr, 0) + store5 := addStoreAndCheckMinResolvedTS(re, false /* not tiflash */, store5TS, store3TS) + resetStoreState(re, rc, store3, metapb.StoreState_Tombstone) + putRegionWithLeader(re, rc, id, store5) + time.Sleep(time.Millisecond) ts = rc.GetMinResolvedTS() - c.Assert(ts, Equals, store3TS) - s.setMinResolvedTSPersistenceInterval(c, rc, svr, time.Millisecond) + re.Equal(store3TS, ts) + setMinResolvedTSPersistenceInterval(re, rc, svr, time.Millisecond) + time.Sleep(time.Millisecond) ts = rc.GetMinResolvedTS() - c.Assert(ts, Equals, store5TS) + re.Equal(store5TS, ts) } // See https://github.com/tikv/pd/issues/4941 -func (s *clusterTestSuite) TestTransferLeaderBack(c *C) { - tc, err := tests.NewTestCluster(s.ctx, 2) +func TestTransferLeaderBack(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 2) defer tc.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = tc.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) tc.WaitLeader() leaderServer := tc.GetServer(tc.GetLeader()) svr := leaderServer.GetServer() - rc := cluster.NewRaftCluster(s.ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) + rc := cluster.NewRaftCluster(ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) rc.InitCluster(svr.GetAllocator(), svr.GetPersistOptions(), svr.GetStorage(), svr.GetBasicCluster()) storage := rc.GetStorage() meta := &metapb.Cluster{Id: 123} - c.Assert(storage.SaveMeta(meta), IsNil) + re.NoError(storage.SaveMeta(meta)) n := 4 stores := make([]*metapb.Store, 0, n) for i := 1; i <= n; i++ { @@ -1366,14 +1379,14 @@ func (s *clusterTestSuite) TestTransferLeaderBack(c *C) { } for _, store := range stores { - c.Assert(storage.SaveStore(store), IsNil) + re.NoError(storage.SaveStore(store)) } rc, err = rc.LoadClusterInfo() - c.Assert(err, IsNil) - c.Assert(rc, NotNil) + re.NoError(err) + re.NotNil(rc) // offline a store - c.Assert(rc.RemoveStore(1, false), IsNil) - c.Assert(rc.GetStore(1).GetState(), Equals, metapb.StoreState_Offline) + re.NoError(rc.RemoveStore(1, false)) + re.Equal(metapb.StoreState_Offline, rc.GetStore(1).GetState()) // transfer PD leader to another PD tc.ResignLeader() @@ -1381,11 +1394,12 @@ func (s *clusterTestSuite) TestTransferLeaderBack(c *C) { leaderServer = tc.GetServer(tc.GetLeader()) svr1 := leaderServer.GetServer() rc1 := svr1.GetRaftCluster() - c.Assert(err, IsNil) - c.Assert(rc1, NotNil) + re.NoError(err) + re.NotNil(rc1) + // tombstone a store, and remove its record - c.Assert(rc1.BuryStore(1, false), IsNil) - c.Assert(rc1.RemoveTombStoneRecords(), IsNil) + re.NoError(rc1.BuryStore(1, false)) + re.NoError(rc1.RemoveTombStoneRecords()) // transfer PD leader back to the previous PD tc.ResignLeader() @@ -1393,9 +1407,9 @@ func (s *clusterTestSuite) TestTransferLeaderBack(c *C) { leaderServer = tc.GetServer(tc.GetLeader()) svr = leaderServer.GetServer() rc = svr.GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) // check store count - c.Assert(rc.GetMetaCluster(), DeepEquals, meta) - c.Assert(rc.GetStoreCount(), Equals, 3) + re.Equal(meta, rc.GetMetaCluster()) + re.Equal(3, rc.GetStoreCount()) } diff --git a/tests/server/cluster/cluster_work_test.go b/tests/server/cluster/cluster_work_test.go index b3d9fdcf9e0..5dee7da02cd 100644 --- a/tests/server/cluster/cluster_work_test.go +++ b/tests/server/cluster/cluster_work_test.go @@ -17,44 +17,33 @@ package cluster_test import ( "context" "sort" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/core" "github.com/tikv/pd/tests" ) -var _ = Suite(&clusterWorkerTestSuite{}) - -type clusterWorkerTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *clusterWorkerTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *clusterWorkerTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *clusterWorkerTestSuite) TestValidRequestRegion(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestValidRequestRegion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() r1 := core.NewRegionInfo(&metapb.Region{ @@ -71,31 +60,34 @@ func (s *clusterWorkerTestSuite) TestValidRequestRegion(c *C) { StoreId: 1, }) err = rc.HandleRegionHeartbeat(r1) - c.Assert(err, IsNil) + re.NoError(err) r2 := &metapb.Region{Id: 2, StartKey: []byte("a"), EndKey: []byte("b")} - c.Assert(rc.ValidRequestRegion(r2), NotNil) + re.Error(rc.ValidRequestRegion(r2)) r3 := &metapb.Region{Id: 1, StartKey: []byte(""), EndKey: []byte("a"), RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 2}} - c.Assert(rc.ValidRequestRegion(r3), NotNil) + re.Error(rc.ValidRequestRegion(r3)) r4 := &metapb.Region{Id: 1, StartKey: []byte(""), EndKey: []byte("a"), RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 1}} - c.Assert(rc.ValidRequestRegion(r4), NotNil) + re.Error(rc.ValidRequestRegion(r4)) r5 := &metapb.Region{Id: 1, StartKey: []byte(""), EndKey: []byte("a"), RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 2}} - c.Assert(rc.ValidRequestRegion(r5), IsNil) + re.NoError(rc.ValidRequestRegion(r5)) rc.Stop() } -func (s *clusterWorkerTestSuite) TestAskSplit(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestAskSplit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() opt := rc.GetOpts() opt.SetSplitMergeInterval(time.Hour) @@ -109,7 +101,7 @@ func (s *clusterWorkerTestSuite) TestAskSplit(c *C) { } _, err = rc.HandleAskSplit(req) - c.Assert(err, IsNil) + re.NoError(err) req1 := &pdpb.AskBatchSplitRequest{ Header: &pdpb.RequestHeader{ @@ -120,27 +112,30 @@ func (s *clusterWorkerTestSuite) TestAskSplit(c *C) { } _, err = rc.HandleAskBatchSplit(req1) - c.Assert(err, IsNil) + re.NoError(err) // test region id whether valid opt.SetSplitMergeInterval(time.Duration(0)) mergeChecker := rc.GetMergeChecker() mergeChecker.Check(regions[0]) - c.Assert(err, IsNil) + re.NoError(err) } -func (s *clusterWorkerTestSuite) TestSuspectRegions(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestSuspectRegions(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() - bootstrapCluster(c, clusterID, grpcPDClient) + bootstrapCluster(re, clusterID, grpcPDClient) rc := leaderServer.GetRaftCluster() opt := rc.GetOpts() opt.SetSplitMergeInterval(time.Hour) @@ -154,10 +149,10 @@ func (s *clusterWorkerTestSuite) TestSuspectRegions(c *C) { SplitCount: 2, } res, err := rc.HandleAskBatchSplit(req) - c.Assert(err, IsNil) + re.NoError(err) ids := []uint64{regions[0].GetMeta().GetId(), res.Ids[0].NewRegionId, res.Ids[1].NewRegionId} sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) suspects := rc.GetSuspectRegions() sort.Slice(suspects, func(i, j int) bool { return suspects[i] < suspects[j] }) - c.Assert(suspects, DeepEquals, ids) + re.Equal(ids, suspects) } From 32bccb7ca1ce6ca5ffaf81e419bc341f4200fee9 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 20 Jun 2022 16:06:37 +0800 Subject: [PATCH 29/35] checker: migrate test framework to testify (#5174) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- pkg/testutil/operator_check.go | 60 ++ .../checker/joint_state_checker_test.go | 61 +- .../schedule/checker/learner_checker_test.go | 44 +- server/schedule/checker/merge_checker_test.go | 445 +++++------ .../checker/priority_inspector_test.go | 49 +- .../schedule/checker/replica_checker_test.go | 251 +++--- server/schedule/checker/rule_checker_test.go | 741 +++++++++--------- server/schedule/checker/split_checker_test.go | 66 +- 8 files changed, 833 insertions(+), 884 deletions(-) diff --git a/pkg/testutil/operator_check.go b/pkg/testutil/operator_check.go index 90779b7059a..1df641e7e0a 100644 --- a/pkg/testutil/operator_check.go +++ b/pkg/testutil/operator_check.go @@ -16,6 +16,7 @@ package testutil import ( "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/schedule/operator" ) @@ -141,3 +142,62 @@ func CheckTransferPeerWithLeaderTransferFrom(c *check.C, op *operator.Operator, kind |= operator.OpRegion | operator.OpLeader c.Assert(op.Kind()&kind, check.Equals, kind) } + +// CheckAddPeerWithTestify checks if the operator is to add peer on specified store. +func CheckAddPeerWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, storeID uint64) { + re.NotNil(op) + re.Equal(2, op.Len()) + re.Equal(storeID, op.Step(0).(operator.AddLearner).ToStore) + re.IsType(operator.PromoteLearner{}, op.Step(1)) + kind |= operator.OpRegion + re.Equal(kind, op.Kind()&kind) +} + +// CheckRemovePeerWithTestify checks if the operator is to remove peer on specified store. +func CheckRemovePeerWithTestify(re *require.Assertions, op *operator.Operator, storeID uint64) { + re.NotNil(op) + if op.Len() == 1 { + re.Equal(storeID, op.Step(0).(operator.RemovePeer).FromStore) + } else { + re.Equal(2, op.Len()) + re.Equal(storeID, op.Step(0).(operator.TransferLeader).FromStore) + re.Equal(storeID, op.Step(1).(operator.RemovePeer).FromStore) + } +} + +// CheckTransferPeerWithTestify checks if the operator is to transfer peer between the specified source and target stores. +func CheckTransferPeerWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { + re.NotNil(op) + + steps, _ := trimTransferLeaders(op) + re.Len(steps, 3) + re.Equal(targetID, steps[0].(operator.AddLearner).ToStore) + re.IsType(operator.PromoteLearner{}, steps[1]) + re.Equal(sourceID, steps[2].(operator.RemovePeer).FromStore) + kind |= operator.OpRegion + re.Equal(kind, op.Kind()&kind) +} + +// CheckSteps checks if the operator matches the given steps. +func CheckSteps(re *require.Assertions, op *operator.Operator, steps []operator.OpStep) { + re.NotEqual(0, op.Kind()&operator.OpMerge) + re.NotNil(steps) + re.Len(steps, op.Len()) + for i := range steps { + switch op.Step(i).(type) { + case operator.AddLearner: + re.Equal(steps[i].(operator.AddLearner).ToStore, op.Step(i).(operator.AddLearner).ToStore) + case operator.PromoteLearner: + re.Equal(steps[i].(operator.PromoteLearner).ToStore, op.Step(i).(operator.PromoteLearner).ToStore) + case operator.TransferLeader: + re.Equal(steps[i].(operator.TransferLeader).FromStore, op.Step(i).(operator.TransferLeader).FromStore) + re.Equal(steps[i].(operator.TransferLeader).ToStore, op.Step(i).(operator.TransferLeader).ToStore) + case operator.RemovePeer: + re.Equal(steps[i].(operator.RemovePeer).FromStore, op.Step(i).(operator.RemovePeer).FromStore) + case operator.MergeRegion: + re.Equal(steps[i].(operator.MergeRegion).IsPassive, op.Step(i).(operator.MergeRegion).IsPassive) + default: + re.FailNow("unknown operator step type") + } + } +} diff --git a/server/schedule/checker/joint_state_checker_test.go b/server/schedule/checker/joint_state_checker_test.go index 5d759c51e67..b350de469c4 100644 --- a/server/schedule/checker/joint_state_checker_test.go +++ b/server/schedule/checker/joint_state_checker_test.go @@ -16,42 +16,25 @@ package checker import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/schedule/operator" ) -var _ = Suite(&testJointStateCheckerSuite{}) - -type testJointStateCheckerSuite struct { - cluster *mockcluster.Cluster - jsc *JointStateChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testJointStateCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testJointStateCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testJointStateCheckerSuite) SetUpTest(c *C) { - s.cluster = mockcluster.NewCluster(s.ctx, config.NewTestOptions()) - s.jsc = NewJointStateChecker(s.cluster) +func TestLeaveJointState(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + jsc := NewJointStateChecker(cluster) for id := uint64(1); id <= 10; id++ { - s.cluster.PutStoreWithLabels(id) + cluster.PutStoreWithLabels(id) } -} - -func (s *testJointStateCheckerSuite) TestLeaveJointState(c *C) { - jsc := s.jsc type testCase struct { Peers []*metapb.Peer // first is leader OpSteps []operator.OpStep @@ -131,38 +114,38 @@ func (s *testJointStateCheckerSuite) TestLeaveJointState(c *C) { for _, tc := range cases { region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: tc.Peers}, tc.Peers[0]) op := jsc.Check(region) - s.checkSteps(c, op, tc.OpSteps) + checkSteps(re, op, tc.OpSteps) } } -func (s *testJointStateCheckerSuite) checkSteps(c *C, op *operator.Operator, steps []operator.OpStep) { +func checkSteps(re *require.Assertions, op *operator.Operator, steps []operator.OpStep) { if len(steps) == 0 { - c.Assert(op, IsNil) + re.Nil(op) return } - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "leave-joint-state") + re.NotNil(op) + re.Equal("leave-joint-state", op.Desc()) - c.Assert(op.Len(), Equals, len(steps)) + re.Len(steps, op.Len()) for i := range steps { switch obtain := op.Step(i).(type) { case operator.ChangePeerV2Leave: expect := steps[i].(operator.ChangePeerV2Leave) - c.Assert(len(obtain.PromoteLearners), Equals, len(expect.PromoteLearners)) - c.Assert(len(obtain.DemoteVoters), Equals, len(expect.DemoteVoters)) + re.Equal(len(expect.PromoteLearners), len(obtain.PromoteLearners)) + re.Equal(len(expect.DemoteVoters), len(obtain.DemoteVoters)) for j, p := range expect.PromoteLearners { - c.Assert(expect.PromoteLearners[j].ToStore, Equals, p.ToStore) + re.Equal(p.ToStore, obtain.PromoteLearners[j].ToStore) } for j, d := range expect.DemoteVoters { - c.Assert(obtain.DemoteVoters[j].ToStore, Equals, d.ToStore) + re.Equal(d.ToStore, obtain.DemoteVoters[j].ToStore) } case operator.TransferLeader: expect := steps[i].(operator.TransferLeader) - c.Assert(obtain.FromStore, Equals, expect.FromStore) - c.Assert(obtain.ToStore, Equals, expect.ToStore) + re.Equal(expect.FromStore, obtain.FromStore) + re.Equal(expect.ToStore, obtain.ToStore) default: - c.Fatal("unknown operator step type") + re.FailNow("unknown operator step type") } } } diff --git a/server/schedule/checker/learner_checker_test.go b/server/schedule/checker/learner_checker_test.go index 1a403e79043..afe4b920313 100644 --- a/server/schedule/checker/learner_checker_test.go +++ b/server/schedule/checker/learner_checker_test.go @@ -16,9 +16,10 @@ package checker import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" @@ -26,31 +27,16 @@ import ( "github.com/tikv/pd/server/versioninfo" ) -var _ = Suite(&testLearnerCheckerSuite{}) - -type testLearnerCheckerSuite struct { - cluster *mockcluster.Cluster - lc *LearnerChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testLearnerCheckerSuite) SetUpTest(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.cluster = mockcluster.NewCluster(s.ctx, config.NewTestOptions()) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.lc = NewLearnerChecker(s.cluster) +func TestPromoteLearner(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + lc := NewLearnerChecker(cluster) for id := uint64(1); id <= 10; id++ { - s.cluster.PutStoreWithLabels(id) + cluster.PutStoreWithLabels(id) } -} - -func (s *testLearnerCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testLearnerCheckerSuite) TestPromoteLearner(c *C) { - lc := s.lc region := core.NewRegionInfo( &metapb.Region{ @@ -62,12 +48,12 @@ func (s *testLearnerCheckerSuite) TestPromoteLearner(c *C) { }, }, &metapb.Peer{Id: 101, StoreId: 1}) op := lc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "promote-learner") - c.Assert(op.Step(0), FitsTypeOf, operator.PromoteLearner{}) - c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(3)) + re.NotNil(op) + re.Equal("promote-learner", op.Desc()) + re.IsType(operator.PromoteLearner{}, op.Step(0)) + re.Equal(uint64(3), op.Step(0).(operator.PromoteLearner).ToStore) region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetPeer(103)})) op = lc.Check(region) - c.Assert(op, IsNil) + re.Nil(op) } diff --git a/server/schedule/checker/merge_checker_test.go b/server/schedule/checker/merge_checker_test.go index 21c6eeec410..b0f5c8ae270 100644 --- a/server/schedule/checker/merge_checker_test.go +++ b/server/schedule/checker/merge_checker_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/config" @@ -36,17 +36,12 @@ import ( "go.uber.org/goleak" ) -func TestMergeChecker(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&testMergeCheckerSuite{}) - -type testMergeCheckerSuite struct { +type mergeCheckerTestSuite struct { + suite.Suite ctx context.Context cancel context.CancelFunc cluster *mockcluster.Cluster @@ -54,145 +49,146 @@ type testMergeCheckerSuite struct { regions []*core.RegionInfo } -func (s *testMergeCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) +func TestMergeCheckerTestSuite(t *testing.T) { + suite.Run(t, new(mergeCheckerTestSuite)) } -func (s *testMergeCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testMergeCheckerSuite) SetUpTest(c *C) { +func (suite *mergeCheckerTestSuite) SetupTest() { cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetMaxMergeRegionSize(2) - s.cluster.SetMaxMergeRegionKeys(2) - s.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetMaxMergeRegionSize(2) + suite.cluster.SetMaxMergeRegionKeys(2) + suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ config.RejectLeader: {{Key: "reject", Value: "leader"}}, }) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) stores := map[uint64][]string{ 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, 7: {"reject", "leader"}, 8: {"reject", "leader"}, } for storeID, labels := range stores { - s.cluster.PutStoreWithLabels(storeID, labels...) + suite.cluster.PutStoreWithLabels(storeID, labels...) } - s.regions = []*core.RegionInfo{ + suite.regions = []*core.RegionInfo{ newRegionInfo(1, "", "a", 1, 1, []uint64{101, 1}, []uint64{101, 1}, []uint64{102, 2}), newRegionInfo(2, "a", "t", 200, 200, []uint64{104, 4}, []uint64{103, 1}, []uint64{104, 4}, []uint64{105, 5}), newRegionInfo(3, "t", "x", 1, 1, []uint64{108, 6}, []uint64{106, 2}, []uint64{107, 5}, []uint64{108, 6}), newRegionInfo(4, "x", "", 1, 1, []uint64{109, 4}, []uint64{109, 4}), } - for _, region := range s.regions { - s.cluster.PutRegion(region) + for _, region := range suite.regions { + suite.cluster.PutRegion(region) } - s.mc = NewMergeChecker(s.ctx, s.cluster) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster) } -func (s *testMergeCheckerSuite) TestBasic(c *C) { - s.cluster.SetSplitMergeInterval(0) +func (suite *mergeCheckerTestSuite) TearDownTest() { + suite.cancel() +} + +func (suite *mergeCheckerTestSuite) TestBasic() { + suite.cluster.SetSplitMergeInterval(0) // should with same peer count - ops := s.mc.Check(s.regions[0]) - c.Assert(ops, IsNil) + ops := suite.mc.Check(suite.regions[0]) + suite.Nil(ops) // The size should be small enough. - ops = s.mc.Check(s.regions[1]) - c.Assert(ops, IsNil) + ops = suite.mc.Check(suite.regions[1]) + suite.Nil(ops) // target region size is too large - s.cluster.PutRegion(s.regions[1].Clone(core.SetApproximateSize(600))) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + suite.cluster.PutRegion(suite.regions[1].Clone(core.SetApproximateSize(600))) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // it can merge if the max region size of the store is greater than the target region size. - config := s.cluster.GetStoreConfig() + config := suite.cluster.GetStoreConfig() config.RegionMaxSize = "10Gib" - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) config.RegionMaxSize = "144Mib" - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // change the size back - s.cluster.PutRegion(s.regions[1].Clone(core.SetApproximateSize(200))) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) + suite.cluster.PutRegion(suite.regions[1].Clone(core.SetApproximateSize(200))) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) // Check merge with previous region. - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the peer store check. - store := s.cluster.GetStore(1) - c.Assert(store, NotNil) + store := suite.cluster.GetStore(1) + suite.NotNil(store) // Test the peer store is deleted. - s.cluster.DeleteStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + suite.cluster.DeleteStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // Test the store is normal. - s.cluster.PutStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.cluster.PutStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the store is offline. - s.cluster.SetStoreOffline(store.GetID()) - ops = s.mc.Check(s.regions[2]) + suite.cluster.SetStoreOffline(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) // Only target region have a peer on the offline store, // so it's not ok to merge. - c.Assert(ops, IsNil) + suite.Nil(ops) // Test the store is up. - s.cluster.SetStoreUp(store.GetID()) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) - store = s.cluster.GetStore(5) - c.Assert(store, NotNil) + suite.cluster.SetStoreUp(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) + store = suite.cluster.GetStore(5) + suite.NotNil(store) // Test the peer store is deleted. - s.cluster.DeleteStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + suite.cluster.DeleteStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // Test the store is normal. - s.cluster.PutStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.cluster.PutStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the store is offline. - s.cluster.SetStoreOffline(store.GetID()) - ops = s.mc.Check(s.regions[2]) + suite.cluster.SetStoreOffline(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) // Both regions have peers on the offline store, // so it's ok to merge. - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the store is up. - s.cluster.SetStoreUp(store.GetID()) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.cluster.SetStoreUp(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Enable one way merge - s.cluster.SetEnableOneWayMerge(true) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - s.cluster.SetEnableOneWayMerge(false) + suite.cluster.SetEnableOneWayMerge(true) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + suite.cluster.SetEnableOneWayMerge(false) // Make up peers for next region. - s.regions[3] = s.regions[3].Clone(core.WithAddPeer(&metapb.Peer{Id: 110, StoreId: 1}), core.WithAddPeer(&metapb.Peer{Id: 111, StoreId: 2})) - s.cluster.PutRegion(s.regions[3]) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) + suite.regions[3] = suite.regions[3].Clone(core.WithAddPeer(&metapb.Peer{Id: 110, StoreId: 1}), core.WithAddPeer(&metapb.Peer{Id: 111, StoreId: 2})) + suite.cluster.PutRegion(suite.regions[3]) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) // Now it merges to next region. - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[3].GetID()) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[3].GetID(), ops[1].RegionID()) // merge cannot across rule key. - s.cluster.SetEnablePlacementRules(true) - s.cluster.RuleManager.SetRule(&placement.Rule{ + suite.cluster.SetEnablePlacementRules(true) + suite.cluster.RuleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 1, @@ -203,83 +199,60 @@ func (s *testMergeCheckerSuite) TestBasic(c *C) { Count: 3, }) // region 2 can only merge with previous region now. - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) - s.cluster.RuleManager.DeleteRule("pd", "test") + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) + suite.cluster.RuleManager.DeleteRule("pd", "test") // check 'merge_option' label - s.cluster.GetRegionLabeler().SetLabelRule(&labeler.LabelRule{ + suite.cluster.GetRegionLabeler().SetLabelRule(&labeler.LabelRule{ ID: "test", Labels: []labeler.RegionLabel{{Key: mergeOptionLabel, Value: mergeOptionValueDeny}}, RuleType: labeler.KeyRange, Data: makeKeyRanges("", "74"), }) - ops = s.mc.Check(s.regions[0]) - c.Assert(ops, HasLen, 0) - ops = s.mc.Check(s.regions[1]) - c.Assert(ops, HasLen, 0) + ops = suite.mc.Check(suite.regions[0]) + suite.Len(ops, 0) + ops = suite.mc.Check(suite.regions[1]) + suite.Len(ops, 0) // Skip recently split regions. - s.cluster.SetSplitMergeInterval(time.Hour) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - - s.mc.startTime = time.Now().Add(-2 * time.Hour) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, NotNil) - - s.mc.RecordRegionSplit([]uint64{s.regions[2].GetID()}) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, IsNil) - - s.cluster.SetSplitMergeInterval(500 * time.Millisecond) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, IsNil) + suite.cluster.SetSplitMergeInterval(time.Hour) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + + suite.mc.startTime = time.Now().Add(-2 * time.Hour) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.NotNil(ops) + + suite.mc.RecordRegionSplit([]uint64{suite.regions[2].GetID()}) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.Nil(ops) + + suite.cluster.SetSplitMergeInterval(500 * time.Millisecond) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.Nil(ops) time.Sleep(500 * time.Millisecond) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, NotNil) -} - -func (s *testMergeCheckerSuite) checkSteps(c *C, op *operator.Operator, steps []operator.OpStep) { - c.Assert(op.Kind()&operator.OpMerge, Not(Equals), 0) - c.Assert(steps, NotNil) - c.Assert(op.Len(), Equals, len(steps)) - for i := range steps { - switch op.Step(i).(type) { - case operator.AddLearner: - c.Assert(op.Step(i).(operator.AddLearner).ToStore, Equals, steps[i].(operator.AddLearner).ToStore) - case operator.PromoteLearner: - c.Assert(op.Step(i).(operator.PromoteLearner).ToStore, Equals, steps[i].(operator.PromoteLearner).ToStore) - case operator.TransferLeader: - c.Assert(op.Step(i).(operator.TransferLeader).FromStore, Equals, steps[i].(operator.TransferLeader).FromStore) - c.Assert(op.Step(i).(operator.TransferLeader).ToStore, Equals, steps[i].(operator.TransferLeader).ToStore) - case operator.RemovePeer: - c.Assert(op.Step(i).(operator.RemovePeer).FromStore, Equals, steps[i].(operator.RemovePeer).FromStore) - case operator.MergeRegion: - c.Assert(op.Step(i).(operator.MergeRegion).IsPassive, Equals, steps[i].(operator.MergeRegion).IsPassive) - default: - c.Fatal("unknown operator step type") - } - } + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.NotNil(ops) } -func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { - s.cluster.SetSplitMergeInterval(0) +func (suite *mergeCheckerTestSuite) TestMatchPeers() { + suite.cluster.SetSplitMergeInterval(0) // partial store overlap not including leader - ops := s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - s.checkSteps(c, ops[0], []operator.OpStep{ + ops := suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 2}, @@ -288,21 +261,21 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.TransferLeader{FromStore: 6, ToStore: 5}, operator.RemovePeer{FromStore: 6}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // partial store overlap including leader - newRegion := s.regions[2].Clone( + newRegion := suite.regions[2].Clone( core.SetPeers([]*metapb.Peer{ {Id: 106, StoreId: 1}, {Id: 107, StoreId: 5}, @@ -310,59 +283,59 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 106, StoreId: 1}), ) - s.regions[2] = newRegion - s.cluster.PutRegion(s.regions[2]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.regions[2] = newRegion + suite.cluster.PutRegion(suite.regions[2]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 4}, operator.PromoteLearner{ToStore: 4}, operator.RemovePeer{FromStore: 6}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // all stores overlap - s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ + suite.regions[2] = suite.regions[2].Clone(core.SetPeers([]*metapb.Peer{ {Id: 106, StoreId: 1}, {Id: 107, StoreId: 5}, {Id: 108, StoreId: 4}, })) - s.cluster.PutRegion(s.regions[2]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[2]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // all stores not overlap - s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ + suite.regions[2] = suite.regions[2].Clone(core.SetPeers([]*metapb.Peer{ {Id: 109, StoreId: 2}, {Id: 110, StoreId: 3}, {Id: 111, StoreId: 6}, }), core.WithLeader(&metapb.Peer{Id: 109, StoreId: 2})) - s.cluster.PutRegion(s.regions[2]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[2]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -374,21 +347,21 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.TransferLeader{FromStore: 2, ToStore: 1}, operator.RemovePeer{FromStore: 2}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // no overlap with reject leader label - s.regions[1] = s.regions[1].Clone( + suite.regions[1] = suite.regions[1].Clone( core.SetPeers([]*metapb.Peer{ {Id: 112, StoreId: 7}, {Id: 113, StoreId: 8}, @@ -396,9 +369,9 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 114, StoreId: 1}), ) - s.cluster.PutRegion(s.regions[1]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[1]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -413,21 +386,21 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.RemovePeer{FromStore: 2}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // overlap with reject leader label - s.regions[1] = s.regions[1].Clone( + suite.regions[1] = suite.regions[1].Clone( core.SetPeers([]*metapb.Peer{ {Id: 115, StoreId: 7}, {Id: 116, StoreId: 8}, @@ -435,7 +408,7 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 117, StoreId: 1}), ) - s.regions[2] = s.regions[2].Clone( + suite.regions[2] = suite.regions[2].Clone( core.SetPeers([]*metapb.Peer{ {Id: 118, StoreId: 7}, {Id: 119, StoreId: 3}, @@ -443,9 +416,9 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 120, StoreId: 2}), ) - s.cluster.PutRegion(s.regions[1]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[1]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -454,23 +427,23 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.TransferLeader{FromStore: 2, ToStore: 1}, operator.RemovePeer{FromStore: 2}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) } -func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { +func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { cfg := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, cfg) + tc := mockcluster.NewCluster(suite.ctx, cfg) tc.SetMaxMergeRegionSize(2) tc.SetMaxMergeRegionKeys(2) tc.SetSplitMergeInterval(0) @@ -489,9 +462,9 @@ func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { tc.PutRegion(region) } - mc := NewMergeChecker(s.ctx, tc) - stream := hbstream.NewTestHeartbeatStreams(s.ctx, tc.ID, tc, false /* no need to run */) - oc := schedule.NewOperatorController(s.ctx, tc, stream) + mc := NewMergeChecker(suite.ctx, tc) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + oc := schedule.NewOperatorController(suite.ctx, tc, stream) regions[2] = regions[2].Clone( core.SetPeers([]*metapb.Peer{ @@ -509,8 +482,8 @@ func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { // The size of Region is less or equal than 1MB. for i := 0; i < 50; i++ { ops := mc.Check(regions[2]) - c.Assert(ops, NotNil) - c.Assert(oc.AddOperator(ops...), IsTrue) + suite.NotNil(ops) + suite.True(oc.AddOperator(ops...)) for _, op := range ops { oc.RemoveOperator(op) } @@ -523,49 +496,49 @@ func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { // The size of Region is more than 1MB but no more than 20MB. for i := 0; i < 5; i++ { ops := mc.Check(regions[2]) - c.Assert(ops, NotNil) - c.Assert(oc.AddOperator(ops...), IsTrue) + suite.NotNil(ops) + suite.True(oc.AddOperator(ops...)) for _, op := range ops { oc.RemoveOperator(op) } } { ops := mc.Check(regions[2]) - c.Assert(ops, NotNil) - c.Assert(oc.AddOperator(ops...), IsFalse) + suite.NotNil(ops) + suite.False(oc.AddOperator(ops...)) } } -func (s *testMergeCheckerSuite) TestCache(c *C) { +func (suite *mergeCheckerTestSuite) TestCache() { cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetMaxMergeRegionSize(2) - s.cluster.SetMaxMergeRegionKeys(2) - s.cluster.SetSplitMergeInterval(time.Hour) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetMaxMergeRegionSize(2) + suite.cluster.SetMaxMergeRegionKeys(2) + suite.cluster.SetSplitMergeInterval(time.Hour) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) stores := map[uint64][]string{ 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, } for storeID, labels := range stores { - s.cluster.PutStoreWithLabels(storeID, labels...) + suite.cluster.PutStoreWithLabels(storeID, labels...) } - s.regions = []*core.RegionInfo{ + suite.regions = []*core.RegionInfo{ newRegionInfo(2, "a", "t", 200, 200, []uint64{104, 4}, []uint64{103, 1}, []uint64{104, 4}, []uint64{105, 5}), newRegionInfo(3, "t", "x", 1, 1, []uint64{108, 6}, []uint64{106, 2}, []uint64{107, 5}, []uint64{108, 6}), } - for _, region := range s.regions { - s.cluster.PutRegion(region) + for _, region := range suite.regions { + suite.cluster.PutRegion(region) } - s.mc = NewMergeChecker(s.ctx, s.cluster) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster) - ops := s.mc.Check(s.regions[1]) - c.Assert(ops, IsNil) - s.cluster.SetSplitMergeInterval(0) + ops := suite.mc.Check(suite.regions[1]) + suite.Nil(ops) + suite.cluster.SetSplitMergeInterval(0) time.Sleep(time.Second) - ops = s.mc.Check(s.regions[1]) - c.Assert(ops, NotNil) + ops = suite.mc.Check(suite.regions[1]) + suite.NotNil(ops) } func makeKeyRanges(keys ...string) []interface{} { diff --git a/server/schedule/checker/priority_inspector_test.go b/server/schedule/checker/priority_inspector_test.go index 319c330d359..35662846c4a 100644 --- a/server/schedule/checker/priority_inspector_test.go +++ b/server/schedule/checker/priority_inspector_test.go @@ -16,31 +16,20 @@ package checker import ( "context" + "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" ) -var _ = Suite(&testPriorityInspectorSuite{}) - -type testPriorityInspectorSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testPriorityInspectorSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testPriorityInspectorSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testPriorityInspectorSuite) TestCheckPriorityRegions(c *C) { +func TestCheckPriorityRegions(t *testing.T) { + re := require.New(t) opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc := mockcluster.NewCluster(ctx, opt) tc.AddRegionStore(1, 0) tc.AddRegionStore(2, 0) tc.AddRegionStore(3, 0) @@ -49,42 +38,42 @@ func (s *testPriorityInspectorSuite) TestCheckPriorityRegions(c *C) { tc.AddLeaderRegion(3, 2) pc := NewPriorityInspector(tc) - checkPriorityRegionTest(pc, tc, c) + checkPriorityRegionTest(re, pc, tc) opt.SetPlacementRuleEnabled(true) - c.Assert(opt.IsPlacementRulesEnabled(), IsTrue) - checkPriorityRegionTest(pc, tc, c) + re.True(opt.IsPlacementRulesEnabled()) + checkPriorityRegionTest(re, pc, tc) } -func checkPriorityRegionTest(pc *PriorityInspector, tc *mockcluster.Cluster, c *C) { +func checkPriorityRegionTest(re *require.Assertions, pc *PriorityInspector, tc *mockcluster.Cluster) { // case1: inspect region 1, it doesn't lack replica region := tc.GetRegion(1) opt := tc.GetOpts() pc.Inspect(region) - c.Assert(0, Equals, pc.queue.Len()) + re.Equal(0, pc.queue.Len()) // case2: inspect region 2, it lacks one replica region = tc.GetRegion(2) pc.Inspect(region) - c.Assert(1, Equals, pc.queue.Len()) + re.Equal(1, pc.queue.Len()) // the region will not rerun after it checks - c.Assert(0, Equals, len(pc.GetPriorityRegions())) + re.Equal(0, len(pc.GetPriorityRegions())) // case3: inspect region 3, it will has high priority region = tc.GetRegion(3) pc.Inspect(region) - c.Assert(2, Equals, pc.queue.Len()) + re.Equal(2, pc.queue.Len()) time.Sleep(opt.GetPatrolRegionInterval() * 10) // region 3 has higher priority ids := pc.GetPriorityRegions() - c.Assert(2, Equals, len(ids)) - c.Assert(uint64(3), Equals, ids[0]) - c.Assert(uint64(2), Equals, ids[1]) + re.Equal(2, len(ids)) + re.Equal(uint64(3), ids[0]) + re.Equal(uint64(2), ids[1]) // case4: inspect region 2 again after it fixup replicas tc.AddLeaderRegion(2, 2, 3, 1) region = tc.GetRegion(2) pc.Inspect(region) - c.Assert(1, Equals, pc.queue.Len()) + re.Equal(1, pc.queue.Len()) // recover tc.AddLeaderRegion(2, 2, 3) diff --git a/server/schedule/checker/replica_checker_test.go b/server/schedule/checker/replica_checker_test.go index 87c813d0111..8a0327c09c7 100644 --- a/server/schedule/checker/replica_checker_test.go +++ b/server/schedule/checker/replica_checker_test.go @@ -16,11 +16,12 @@ package checker import ( "context" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/testutil" @@ -35,28 +36,24 @@ const ( MB = 1024 * KB ) -var _ = Suite(&testReplicaCheckerSuite{}) - -type testReplicaCheckerSuite struct { +type replicaCheckerTestSuite struct { + suite.Suite cluster *mockcluster.Cluster rc *ReplicaChecker ctx context.Context cancel context.CancelFunc } -func (s *testReplicaCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testReplicaCheckerSuite) TearDownTest(c *C) { - s.cancel() +func TestReplicaCheckerTestSuite(t *testing.T) { + suite.Run(t, new(replicaCheckerTestSuite)) } -func (s *testReplicaCheckerSuite) SetUpTest(c *C) { +func (suite *replicaCheckerTestSuite) SetupTest() { cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.rc = NewReplicaChecker(s.cluster, cache.NewDefaultCache(10)) + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.rc = NewReplicaChecker(suite.cluster, cache.NewDefaultCache(10)) stats := &pdpb.StoreStats{ Capacity: 100, Available: 100, @@ -88,12 +85,16 @@ func (s *testReplicaCheckerSuite) SetUpTest(c *C) { ), } for _, store := range stores { - s.cluster.PutStore(store) + suite.cluster.PutStore(store) } - s.cluster.AddLabelsStore(2, 1, map[string]string{"noleader": "true"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"noleader": "true"}) +} + +func (suite *replicaCheckerTestSuite) TearDownTest() { + suite.cancel() } -func (s *testReplicaCheckerSuite) TestReplacePendingPeer(c *C) { +func (suite *replicaCheckerTestSuite) TestReplacePendingPeer() { peers := []*metapb.Peer{ { Id: 2, @@ -109,16 +110,16 @@ func (s *testReplicaCheckerSuite) TestReplacePendingPeer(c *C) { }, } r := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: peers}, peers[1], core.WithPendingPeers(peers[0:1])) - s.cluster.PutRegion(r) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(1).(operator.PromoteLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(2).(operator.RemovePeer).FromStore, Equals, uint64(1)) + suite.cluster.PutRegion(r) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.Equal(uint64(4), op.Step(1).(operator.PromoteLearner).ToStore) + suite.Equal(uint64(1), op.Step(2).(operator.RemovePeer).FromStore) } -func (s *testReplicaCheckerSuite) TestReplaceOfflinePeer(c *C) { - s.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ +func (suite *replicaCheckerTestSuite) TestReplaceOfflinePeer() { + suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ config.RejectLeader: {{Key: "noleader", Value: "true"}}, }) peers := []*metapb.Peer{ @@ -136,17 +137,17 @@ func (s *testReplicaCheckerSuite) TestReplaceOfflinePeer(c *C) { }, } r := core.NewRegionInfo(&metapb.Region{Id: 2, Peers: peers}, peers[0]) - s.cluster.PutRegion(r) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Step(0).(operator.TransferLeader).ToStore, Equals, uint64(3)) - c.Assert(op.Step(1).(operator.AddLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(2).(operator.PromoteLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(3).(operator.RemovePeer).FromStore, Equals, uint64(1)) + suite.cluster.PutRegion(r) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal(uint64(3), op.Step(0).(operator.TransferLeader).ToStore) + suite.Equal(uint64(4), op.Step(1).(operator.AddLearner).ToStore) + suite.Equal(uint64(4), op.Step(2).(operator.PromoteLearner).ToStore) + suite.Equal(uint64(1), op.Step(3).(operator.RemovePeer).FromStore) } -func (s *testReplicaCheckerSuite) TestOfflineWithOneReplica(c *C) { - s.cluster.SetMaxReplicas(1) +func (suite *replicaCheckerTestSuite) TestOfflineWithOneReplica() { + suite.cluster.SetMaxReplicas(1) peers := []*metapb.Peer{ { Id: 4, @@ -154,27 +155,27 @@ func (s *testReplicaCheckerSuite) TestOfflineWithOneReplica(c *C) { }, } r := core.NewRegionInfo(&metapb.Region{Id: 2, Peers: peers}, peers[0]) - s.cluster.PutRegion(r) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-offline-replica") + suite.cluster.PutRegion(r) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("replace-offline-replica", op.Desc()) } -func (s *testReplicaCheckerSuite) TestDownPeer(c *C) { +func (suite *replicaCheckerTestSuite) TestDownPeer() { // down a peer, the number of normal peers(except learner) is enough. - op := s.downPeerAndCheck(c, metapb.PeerRole_Voter) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-extra-down-replica") + op := suite.downPeerAndCheck(metapb.PeerRole_Voter) + suite.NotNil(op) + suite.Equal("remove-extra-down-replica", op.Desc()) // down a peer,the number of peers(except learner) is not enough. - op = s.downPeerAndCheck(c, metapb.PeerRole_Learner) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-down-replica") + op = suite.downPeerAndCheck(metapb.PeerRole_Learner) + suite.NotNil(op) + suite.Equal("replace-down-replica", op.Desc()) } -func (s *testReplicaCheckerSuite) downPeerAndCheck(c *C, aliveRole metapb.PeerRole) *operator.Operator { - s.cluster.SetMaxReplicas(2) - s.cluster.SetStoreUp(1) +func (suite *replicaCheckerTestSuite) downPeerAndCheck(aliveRole metapb.PeerRole) *operator.Operator { + suite.cluster.SetMaxReplicas(2) + suite.cluster.SetStoreUp(1) downStoreID := uint64(3) peers := []*metapb.Peer{ { @@ -192,8 +193,8 @@ func (s *testReplicaCheckerSuite) downPeerAndCheck(c *C, aliveRole metapb.PeerRo }, } r := core.NewRegionInfo(&metapb.Region{Id: 2, Peers: peers}, peers[0]) - s.cluster.PutRegion(r) - s.cluster.SetStoreDown(downStoreID) + suite.cluster.PutRegion(r) + suite.cluster.SetStoreDown(downStoreID) downPeer := &pdpb.PeerStats{ Peer: &metapb.Peer{ Id: 14, @@ -202,13 +203,13 @@ func (s *testReplicaCheckerSuite) downPeerAndCheck(c *C, aliveRole metapb.PeerRo DownSeconds: 24 * 60 * 60, } r = r.Clone(core.WithDownPeers(append(r.GetDownPeers(), downPeer))) - c.Assert(r.GetDownPeers(), HasLen, 1) - return s.rc.Check(r) + suite.Len(r.GetDownPeers(), 1) + return suite.rc.Check(r) } -func (s *testReplicaCheckerSuite) TestBasic(c *C) { +func (suite *replicaCheckerTestSuite) TestBasic() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetMaxSnapshotCount(2) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -223,41 +224,41 @@ func (s *testReplicaCheckerSuite) TestBasic(c *C) { // Region has 2 peers, we need to add a new peer. region := tc.GetRegion(1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Disable make up replica feature. tc.SetEnableMakeUpReplica(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetEnableMakeUpReplica(true) // Test healthFilter. // If store 4 is down, we add to store 3. tc.SetStoreDown(4) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) tc.SetStoreUp(4) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Test snapshotCountFilter. // If snapshotCount > MaxSnapshotCount, we add to store 3. tc.UpdateSnapshotCount(4, 3) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) // If snapshotCount < MaxSnapshotCount, we can add peer again. tc.UpdateSnapshotCount(4, 1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Add peer in store 4, and we have enough replicas. peer4, _ := tc.AllocPeer(4) region = region.Clone(core.WithAddPeer(peer4)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Add peer in store 3, and we have redundant replicas. peer3, _ := tc.AllocPeer(3) region = region.Clone(core.WithAddPeer(peer3)) - testutil.CheckRemovePeer(c, rc.Check(region), 1) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 1) // Disable remove extra replica feature. tc.SetEnableRemoveExtraReplica(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetEnableRemoveExtraReplica(true) region = region.Clone(core.WithRemoveStorePeer(1), core.WithLeader(region.GetStorePeer(3))) @@ -270,18 +271,18 @@ func (s *testReplicaCheckerSuite) TestBasic(c *C) { } region = region.Clone(core.WithDownPeers(append(region.GetDownPeers(), downPeer))) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 2, 1) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 1) region = region.Clone(core.WithDownPeers(nil)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Peer in store 3 is offline, transfer peer to store 1. tc.SetStoreOffline(3) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 1) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 1) } -func (s *testReplicaCheckerSuite) TestLostStore(c *C) { +func (suite *replicaCheckerTestSuite) TestLostStore() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 1) @@ -295,12 +296,12 @@ func (s *testReplicaCheckerSuite) TestLostStore(c *C) { tc.AddLeaderRegion(1, 1, 2, 3) region := tc.GetRegion(1) op := rc.Check(region) - c.Assert(op, IsNil) + suite.Nil(op) } -func (s *testReplicaCheckerSuite) TestOffline(c *C) { +func (suite *replicaCheckerTestSuite) TestOffline() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) @@ -316,43 +317,43 @@ func (s *testReplicaCheckerSuite) TestOffline(c *C) { region := tc.GetRegion(1) // Store 2 has different zone and smallest region score. - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2) peer2, _ := tc.AllocPeer(2) region = region.Clone(core.WithAddPeer(peer2)) // Store 3 has different zone and smallest region score. - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) peer3, _ := tc.AllocPeer(3) region = region.Clone(core.WithAddPeer(peer3)) // Store 4 has the same zone with store 3 and larger region score. peer4, _ := tc.AllocPeer(4) region = region.Clone(core.WithAddPeer(peer4)) - testutil.CheckRemovePeer(c, rc.Check(region), 4) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 4) // Test offline // the number of region peers more than the maxReplicas // remove the peer tc.SetStoreOffline(3) - testutil.CheckRemovePeer(c, rc.Check(region), 3) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 3) region = region.Clone(core.WithRemoveStorePeer(4)) // the number of region peers equals the maxReplicas // Transfer peer to store 4. - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) // Store 5 has a same label score with store 4, but the region score smaller than store 4, we will choose store 5. tc.AddLabelsStore(5, 3, map[string]string{"zone": "z4", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 5) // Store 5 has too many snapshots, choose store 4 tc.UpdateSnapshotCount(5, 100) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) tc.UpdatePendingPeerCount(4, 100) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } -func (s *testReplicaCheckerSuite) TestDistinctScore(c *C) { +func (suite *replicaCheckerTestSuite) TestDistinctScore() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) @@ -365,73 +366,73 @@ func (s *testReplicaCheckerSuite) TestDistinctScore(c *C) { // We need 3 replicas. tc.AddLeaderRegion(1, 1) region := tc.GetRegion(1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2) peer2, _ := tc.AllocPeer(2) region = region.Clone(core.WithAddPeer(peer2)) // Store 1,2,3 have the same zone, rack, and host. tc.AddLabelsStore(3, 5, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) // Store 4 has smaller region score. tc.AddLabelsStore(4, 4, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Store 5 has a different host. tc.AddLabelsStore(5, 5, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 5) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 5) // Store 6 has a different rack. tc.AddLabelsStore(6, 6, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 6) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 6) // Store 7 has a different zone. tc.AddLabelsStore(7, 7, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 7) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 7) // Test stateFilter. tc.SetStoreOffline(7) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 6) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 6) tc.SetStoreUp(7) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 7) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 7) // Add peer to store 7. peer7, _ := tc.AllocPeer(7) region = region.Clone(core.WithAddPeer(peer7)) // Replace peer in store 1 with store 6 because it has a different rack. - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 1, 6) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 6) // Disable locationReplacement feature. tc.SetEnableLocationReplacement(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetEnableLocationReplacement(true) peer6, _ := tc.AllocPeer(6) region = region.Clone(core.WithAddPeer(peer6)) - testutil.CheckRemovePeer(c, rc.Check(region), 1) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 1) region = region.Clone(core.WithRemoveStorePeer(1), core.WithLeader(region.GetStorePeer(2))) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Store 8 has the same zone and different rack with store 7. // Store 1 has the same zone and different rack with store 6. // So store 8 and store 1 are equivalent. tc.AddLabelsStore(8, 1, map[string]string{"zone": "z2", "rack": "r2", "host": "h1"}) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Store 10 has a different zone. // Store 2 and 6 have the same distinct score, but store 2 has larger region score. // So replace peer in store 2 with store 10. tc.AddLabelsStore(10, 1, map[string]string{"zone": "z3", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 2, 10) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 10) peer10, _ := tc.AllocPeer(10) region = region.Clone(core.WithAddPeer(peer10)) - testutil.CheckRemovePeer(c, rc.Check(region), 2) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 2) region = region.Clone(core.WithRemoveStorePeer(2)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } -func (s *testReplicaCheckerSuite) TestDistinctScore2(c *C) { +func (suite *replicaCheckerTestSuite) TestDistinctScore2() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(5) tc.SetLocationLabels([]string{"zone", "host"}) @@ -448,20 +449,20 @@ func (s *testReplicaCheckerSuite) TestDistinctScore2(c *C) { tc.AddLeaderRegion(1, 1, 2, 4) region := tc.GetRegion(1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 6) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 6) peer6, _ := tc.AllocPeer(6) region = region.Clone(core.WithAddPeer(peer6)) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 5) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 5) peer5, _ := tc.AllocPeer(5) region = region.Clone(core.WithAddPeer(peer5)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } -func (s *testReplicaCheckerSuite) TestStorageThreshold(c *C) { +func (suite *replicaCheckerTestSuite) TestStorageThreshold() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetLocationLabels([]string{"zone"}) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -480,24 +481,24 @@ func (s *testReplicaCheckerSuite) TestStorageThreshold(c *C) { // Move peer to better location. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 1, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.9, 0.1) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.AddLeaderRegion(2, 1, 3) region = tc.GetRegion(2) // Add peer on store4. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.8, 0) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2) } -func (s *testReplicaCheckerSuite) TestOpts(c *C) { +func (suite *replicaCheckerTestSuite) TestOpts() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -518,17 +519,17 @@ func (s *testReplicaCheckerSuite) TestOpts(c *C) { })) tc.SetStoreOffline(2) // RemoveDownReplica has higher priority than replaceOfflineReplica. - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 1, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) tc.SetEnableRemoveDownReplica(false) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 2, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 4) tc.SetEnableReplaceOfflineReplica(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testReplicaCheckerSuite) TestFixDownPeer(c *C) { +func (suite *replicaCheckerTestSuite) TestFixDownPeer() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -541,25 +542,25 @@ func (s *testReplicaCheckerSuite) TestFixDownPeer(c *C) { tc.AddLeaderRegion(1, 1, 3, 4) region := tc.GetRegion(1) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetStoreDown(4) region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreDown(5) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testReplicaCheckerSuite) TestFixOfflinePeer(c *C) { +func (suite *replicaCheckerTestSuite) TestFixOfflinePeer() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -572,14 +573,14 @@ func (s *testReplicaCheckerSuite) TestFixOfflinePeer(c *C) { tc.AddLeaderRegion(1, 1, 3, 4) region := tc.GetRegion(1) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetStoreOffline(4) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreOffline(5) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } diff --git a/server/schedule/checker/rule_checker_test.go b/server/schedule/checker/rule_checker_test.go index f3a908939bf..ea9a369348e 100644 --- a/server/schedule/checker/rule_checker_test.go +++ b/server/schedule/checker/rule_checker_test.go @@ -16,11 +16,12 @@ package checker import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/testutil" @@ -31,36 +32,12 @@ import ( "github.com/tikv/pd/server/versioninfo" ) -var _ = Suite(&testRuleCheckerSuite{}) -var _ = SerialSuites(&testRuleCheckerSerialSuite{}) - -type testRuleCheckerSerialSuite struct { - cluster *mockcluster.Cluster - ruleManager *placement.RuleManager - rc *RuleChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testRuleCheckerSerialSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) +func TestRuleCheckerTestSuite(t *testing.T) { + suite.Run(t, new(ruleCheckerTestSuite)) } -func (s *testRuleCheckerSerialSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testRuleCheckerSerialSuite) SetUpTest(c *C) { - cfg := config.NewTestOptions() - cfg.SetPlacementRulesCacheEnabled(true) - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.cluster.SetEnablePlacementRules(true) - s.ruleManager = s.cluster.RuleManager - s.rc = NewRuleChecker(s.cluster, s.ruleManager, cache.NewDefaultCache(10)) -} - -type testRuleCheckerSuite struct { +type ruleCheckerTestSuite struct { + suite.Suite cluster *mockcluster.Cluster ruleManager *placement.RuleManager rc *RuleChecker @@ -68,42 +45,39 @@ type testRuleCheckerSuite struct { cancel context.CancelFunc } -func (s *testRuleCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testRuleCheckerSuite) TearDownTest(c *C) { - s.cancel() +func (suite *ruleCheckerTestSuite) SetupTest() { + cfg := config.NewTestOptions() + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster.SetEnablePlacementRules(true) + suite.ruleManager = suite.cluster.RuleManager + suite.rc = NewRuleChecker(suite.cluster, suite.ruleManager, cache.NewDefaultCache(10)) } -func (s *testRuleCheckerSuite) SetUpTest(c *C) { - cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.cluster.SetEnablePlacementRules(true) - s.ruleManager = s.cluster.RuleManager - s.rc = NewRuleChecker(s.cluster, s.ruleManager, cache.NewDefaultCache(10)) +func (suite *ruleCheckerTestSuite) TearDownTest() { + suite.cancel() } -func (s *testRuleCheckerSuite) TestAddRulePeer(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(3)) +func (suite *ruleCheckerTestSuite) TestAddRulePeer() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) + suite.Equal(uint64(3), op.Step(0).(operator.AddLearner).ToStore) } -func (s *testRuleCheckerSuite) TestAddRulePeerWithIsolationLevel(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z1", "rack": "r3", "host": "h1"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestAddRulePeerWithIsolationLevel() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z1", "rack": "r3", "host": "h1"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -113,10 +87,10 @@ func (s *testRuleCheckerSuite) TestAddRulePeerWithIsolationLevel(c *C) { LocationLabels: []string{"zone", "rack", "host"}, IsolationLevel: "zone", }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) - s.ruleManager.SetRule(&placement.Rule{ + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -126,75 +100,75 @@ func (s *testRuleCheckerSuite) TestAddRulePeerWithIsolationLevel(c *C) { LocationLabels: []string{"zone", "rack", "host"}, IsolationLevel: "rack", }) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) } -func (s *testRuleCheckerSuite) TestFixPeer(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderStore(4, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - s.cluster.SetStoreDown(2) - r := s.cluster.GetRegion(1) +func (suite *ruleCheckerTestSuite) TestFixPeer() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderStore(4, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + suite.cluster.SetStoreDown(2) + r := suite.cluster.GetRegion(1) r = r.Clone(core.WithDownPeers([]*pdpb.PeerStats{{Peer: r.GetStorePeer(2), DownSeconds: 60000}})) - op = s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-down-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) + op = suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("replace-rule-down-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) var add operator.AddLearner - c.Assert(op.Step(0), FitsTypeOf, add) - s.cluster.SetStoreUp(2) - s.cluster.SetStoreOffline(2) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) - c.Assert(op.Step(0), FitsTypeOf, add) - - s.cluster.SetStoreUp(2) + suite.IsType(add, op.Step(0)) + suite.cluster.SetStoreUp(2) + suite.cluster.SetStoreOffline(2) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("replace-rule-offline-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) + suite.IsType(add, op.Step(0)) + + suite.cluster.SetStoreUp(2) // leader store offline - s.cluster.SetStoreOffline(1) - r1 := s.cluster.GetRegion(1) + suite.cluster.SetStoreOffline(1) + r1 := suite.cluster.GetRegion(1) nr1 := r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(3)})) - s.cluster.PutRegion(nr1) + suite.cluster.PutRegion(nr1) hasTransferLeader := false for i := 0; i < 100; i++ { - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) if step, ok := op.Step(0).(operator.TransferLeader); ok { - c.Assert(step.FromStore, Equals, uint64(1)) - c.Assert(step.ToStore, Not(Equals), uint64(3)) + suite.Equal(uint64(1), step.FromStore) + suite.NotEqual(uint64(3), step.ToStore) hasTransferLeader = true } } - c.Assert(hasTransferLeader, IsTrue) + suite.True(hasTransferLeader) } -func (s *testRuleCheckerSuite) TestFixOrphanPeers(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderStore(4, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") - c.Assert(op.Step(0).(operator.RemovePeer).FromStore, Equals, uint64(4)) +func (suite *ruleCheckerTestSuite) TestFixOrphanPeers() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderStore(4, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("remove-orphan-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.RemovePeer).FromStore) } -func (s *testRuleCheckerSuite) TestFixOrphanPeers2(c *C) { +func (suite *ruleCheckerTestSuite) TestFixOrphanPeers2() { // check orphan peers can only be handled when all rules are satisfied. - s.cluster.AddLabelsStore(1, 1, map[string]string{"foo": "bar"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"foo": "bar"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"foo": "baz"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) - s.ruleManager.SetRule(&placement.Rule{ + suite.cluster.AddLabelsStore(1, 1, map[string]string{"foo": "bar"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"foo": "bar"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"foo": "baz"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r1", Index: 100, @@ -205,32 +179,32 @@ func (s *testRuleCheckerSuite) TestFixOrphanPeers2(c *C) { {Key: "foo", Op: "in", Values: []string{"baz"}}, }, }) - s.cluster.SetStoreDown(2) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + suite.cluster.SetStoreDown(2) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestFixRole(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 2, 1, 3) - r := s.cluster.GetRegion(1) +func (suite *ruleCheckerTestSuite) TestFixRole() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 2, 1, 3) + r := suite.cluster.GetRegion(1) p := r.GetStorePeer(1) p.Role = metapb.PeerRole_Learner r = r.Clone(core.WithLearners([]*metapb.Peer{p})) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-peer-role") - c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(1)) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("fix-peer-role", op.Desc()) + suite.Equal(uint64(1), op.Step(0).(operator.PromoteLearner).ToStore) } -func (s *testRuleCheckerSuite) TestFixRoleLeader(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"role": "follower"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"role": "voter"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestFixRoleLeader() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"role": "follower"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"role": "voter"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r1", Index: 100, @@ -241,7 +215,7 @@ func (s *testRuleCheckerSuite) TestFixRoleLeader(c *C) { {Key: "role", Op: "in", Values: []string{"voter"}}, }, }) - s.ruleManager.SetRule(&placement.Rule{ + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r2", Index: 101, @@ -251,17 +225,17 @@ func (s *testRuleCheckerSuite) TestFixRoleLeader(c *C) { {Key: "role", Op: "in", Values: []string{"follower"}}, }, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-follower-role") - c.Assert(op.Step(0).(operator.TransferLeader).ToStore, Equals, uint64(3)) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("fix-follower-role", op.Desc()) + suite.Equal(uint64(3), op.Step(0).(operator.TransferLeader).ToStore) } -func (s *testRuleCheckerSuite) TestFixRoleLeaderIssue3130(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"role": "leader"}) - s.cluster.AddLeaderRegion(1, 1, 2) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestFixRoleLeaderIssue3130() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"role": "leader"}) + suite.cluster.AddLeaderRegion(1, 1, 2) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r1", Index: 100, @@ -272,30 +246,30 @@ func (s *testRuleCheckerSuite) TestFixRoleLeaderIssue3130(c *C) { {Key: "role", Op: "in", Values: []string{"leader"}}, }, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-leader-role") - c.Assert(op.Step(0).(operator.TransferLeader).ToStore, Equals, uint64(2)) - - s.cluster.SetStoreBusy(2, true) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - s.cluster.SetStoreBusy(2, false) - - s.cluster.AddLeaderRegion(1, 2, 1) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") - c.Assert(op.Step(0).(operator.RemovePeer).FromStore, Equals, uint64(1)) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("fix-leader-role", op.Desc()) + suite.Equal(uint64(2), op.Step(0).(operator.TransferLeader).ToStore) + + suite.cluster.SetStoreBusy(2, true) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + suite.cluster.SetStoreBusy(2, false) + + suite.cluster.AddLeaderRegion(1, 2, 1) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("remove-orphan-peer", op.Desc()) + suite.Equal(uint64(1), op.Step(0).(operator.RemovePeer).FromStore) } -func (s *testRuleCheckerSuite) TestBetterReplacement(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host3"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestBetterReplacement() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host3"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -304,22 +278,22 @@ func (s *testRuleCheckerSuite) TestBetterReplacement(c *C) { Count: 3, LocationLabels: []string{"host"}, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "move-to-better-location") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("move-to-better-location", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestBetterReplacement2(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "host": "host2"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "host": "host3"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z2", "host": "host1"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestBetterReplacement2() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "host": "host2"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "host": "host3"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z2", "host": "host1"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -328,21 +302,21 @@ func (s *testRuleCheckerSuite) TestBetterReplacement2(c *C) { Count: 3, LocationLabels: []string{"zone", "host"}, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "move-to-better-location") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("move-to-better-location", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestNoBetterReplacement(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestNoBetterReplacement() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -351,72 +325,72 @@ func (s *testRuleCheckerSuite) TestNoBetterReplacement(c *C) { Count: 3, LocationLabels: []string{"host"}, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestIssue2419(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderStore(4, 1) - s.cluster.SetStoreOffline(3) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - r := s.cluster.GetRegion(1) +func (suite *ruleCheckerTestSuite) TestIssue2419() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderStore(4, 1) + suite.cluster.SetStoreOffline(3) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + r := suite.cluster.GetRegion(1) r = r.Clone(core.WithAddPeer(&metapb.Peer{Id: 5, StoreId: 4, Role: metapb.PeerRole_Learner})) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") - c.Assert(op.Step(0).(operator.RemovePeer).FromStore, Equals, uint64(4)) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("remove-orphan-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.RemovePeer).FromStore) r = r.Clone(core.WithRemoveStorePeer(4)) - op = s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(1).(operator.PromoteLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(2).(operator.RemovePeer).FromStore, Equals, uint64(3)) + op = suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("replace-rule-offline-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.Equal(uint64(4), op.Step(1).(operator.PromoteLearner).ToStore) + suite.Equal(uint64(3), op.Step(2).(operator.RemovePeer).FromStore) } // Ref https://github.com/tikv/pd/issues/3521 // The problem is when offline a store, we may add learner multiple times if // the operator is timeout. -func (s *testRuleCheckerSuite) TestPriorityFixOrphanPeer(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) +func (suite *ruleCheckerTestSuite) TestPriorityFixOrphanPeer() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) var add operator.AddLearner var remove operator.RemovePeer - s.cluster.SetStoreOffline(2) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Step(0), FitsTypeOf, add) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") - r := s.cluster.GetRegion(1).Clone(core.WithAddPeer( + suite.cluster.SetStoreOffline(2) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.IsType(add, op.Step(0)) + suite.Equal("replace-rule-offline-peer", op.Desc()) + r := suite.cluster.GetRegion(1).Clone(core.WithAddPeer( &metapb.Peer{ Id: 5, StoreId: 4, Role: metapb.PeerRole_Learner, })) - s.cluster.PutRegion(r) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + suite.cluster.PutRegion(r) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestIssue3293(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - err := s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerTestSuite) TestIssue3293() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + err := suite.ruleManager.SetRule(&placement.Rule{ GroupID: "TiDB_DDL_51", ID: "0", Role: placement.Follower, @@ -431,26 +405,26 @@ func (s *testRuleCheckerSuite) TestIssue3293(c *C) { }, }, }) - c.Assert(err, IsNil) - s.cluster.DeleteStore(s.cluster.GetStore(5)) - err = s.ruleManager.SetRule(&placement.Rule{ + suite.NoError(err) + suite.cluster.DeleteStore(suite.cluster.GetStore(5)) + err = suite.ruleManager.SetRule(&placement.Rule{ GroupID: "TiDB_DDL_51", ID: "default", Role: placement.Voter, Count: 3, }) - c.Assert(err, IsNil) - err = s.ruleManager.DeleteRule("pd", "default") - c.Assert(err, IsNil) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") + suite.NoError(err) + err = suite.ruleManager.DeleteRule("pd", "default") + suite.NoError(err) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestIssue3299(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"dc": "sh"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) +func (suite *ruleCheckerTestSuite) TestIssue3299() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"dc": "sh"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) testCases := []struct { constraints []placement.LabelConstraint @@ -524,7 +498,7 @@ func (s *testRuleCheckerSuite) TestIssue3299(c *C) { } for _, t := range testCases { - err := s.ruleManager.SetRule(&placement.Rule{ + err := suite.ruleManager.SetRule(&placement.Rule{ GroupID: "p", ID: "0", Role: placement.Follower, @@ -532,21 +506,21 @@ func (s *testRuleCheckerSuite) TestIssue3299(c *C) { LabelConstraints: t.constraints, }) if t.err != "" { - c.Assert(err, ErrorMatches, t.err) + suite.Regexp(t.err, err.Error()) } else { - c.Assert(err, IsNil) + suite.NoError(err) } } } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testRuleCheckerSuite) TestFixDownPeer(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLeaderRegion(1, 1, 3, 4) +func (suite *ruleCheckerTestSuite) TestFixDownPeer() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -556,33 +530,33 @@ func (s *testRuleCheckerSuite) TestFixDownPeer(c *C) { Count: 3, LocationLabels: []string{"zone"}, } - s.ruleManager.SetRule(rule) + suite.ruleManager.SetRule(rule) - region := s.cluster.GetRegion(1) - c.Assert(s.rc.Check(region), IsNil) + region := suite.cluster.GetRegion(1) + suite.Nil(suite.rc.Check(region)) - s.cluster.SetStoreDown(4) + suite.cluster.SetStoreDown(4) region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) - s.cluster.SetStoreDown(5) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 2) + suite.cluster.SetStoreDown(5) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" - s.ruleManager.SetRule(rule) - c.Assert(s.rc.Check(region), IsNil) + suite.ruleManager.SetRule(rule) + suite.Nil(suite.rc.Check(region)) } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testRuleCheckerSuite) TestFixOfflinePeer(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLeaderRegion(1, 1, 3, 4) +func (suite *ruleCheckerTestSuite) TestFixOfflinePeer() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -592,30 +566,31 @@ func (s *testRuleCheckerSuite) TestFixOfflinePeer(c *C) { Count: 3, LocationLabels: []string{"zone"}, } - s.ruleManager.SetRule(rule) + suite.ruleManager.SetRule(rule) - region := s.cluster.GetRegion(1) - c.Assert(s.rc.Check(region), IsNil) + region := suite.cluster.GetRegion(1) + suite.Nil(suite.rc.Check(region)) - s.cluster.SetStoreOffline(4) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 5) + suite.cluster.SetStoreOffline(4) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) - s.cluster.SetStoreOffline(5) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 2) + suite.cluster.SetStoreOffline(5) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" - s.ruleManager.SetRule(rule) - c.Assert(s.rc.Check(region), IsNil) + suite.ruleManager.SetRule(rule) + suite.Nil(suite.rc.Check(region)) } -func (s *testRuleCheckerSerialSuite) TestRuleCache(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) - s.cluster.AddRegionStore(999, 1) - s.cluster.AddLeaderRegion(1, 1, 3, 4) +func (suite *ruleCheckerTestSuite) TestRuleCache() { + suite.cluster.PersistOptions.SetPlacementRulesCacheEnabled(true) + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddRegionStore(999, 1) + suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -625,10 +600,10 @@ func (s *testRuleCheckerSerialSuite) TestRuleCache(c *C) { Count: 3, LocationLabels: []string{"zone"}, } - s.ruleManager.SetRule(rule) - region := s.cluster.GetRegion(1) + suite.ruleManager.SetRule(rule) + region := suite.cluster.GetRegion(1) region = region.Clone(core.WithIncConfVer(), core.WithIncVersion()) - c.Assert(s.rc.Check(region), IsNil) + suite.Nil(suite.rc.Check(region)) testcases := []struct { name string @@ -669,35 +644,35 @@ func (s *testRuleCheckerSerialSuite) TestRuleCache(c *C) { }, } for _, testcase := range testcases { - c.Log(testcase.name) + suite.T().Log(testcase.name) if testcase.stillCached { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldCache", "return(true)"), IsNil) - s.rc.Check(testcase.region) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldCache"), IsNil) + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldCache", "return(true)")) + suite.rc.Check(testcase.region) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldCache")) } else { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache", "return(true)"), IsNil) - s.rc.Check(testcase.region) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache"), IsNil) + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache", "return(true)")) + suite.rc.Check(testcase.region) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache")) } } } // Ref https://github.com/tikv/pd/issues/4045 -func (s *testRuleCheckerSuite) TestSkipFixOrphanPeerIfSelectedPeerisPendingOrDown(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) +func (suite *ruleCheckerTestSuite) TestSkipFixOrphanPeerIfSelectedPeerisPendingOrDown() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) // set peer3 and peer4 to pending - r1 := s.cluster.GetRegion(1) + r1 := suite.cluster.GetRegion(1) r1 = r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(3), r1.GetStorePeer(4)})) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) // should not remove extra peer - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) // set peer3 to down-peer r1 = r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(4)})) @@ -707,39 +682,39 @@ func (s *testRuleCheckerSuite) TestSkipFixOrphanPeerIfSelectedPeerisPendingOrDow DownSeconds: 42, }, })) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) // should not remove extra peer - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) // set peer3 to normal r1 = r1.Clone(core.WithDownPeers(nil)) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) // should remove extra peer now var remove operator.RemovePeer - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestPriorityFitHealthPeers(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) - r1 := s.cluster.GetRegion(1) +func (suite *ruleCheckerTestSuite) TestPriorityFitHealthPeers() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) + r1 := suite.cluster.GetRegion(1) // set peer3 to pending r1 = r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetPeer(3)})) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) var remove operator.RemovePeer - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) // set peer3 to down r1 = r1.Clone(core.WithDownPeers([]*pdpb.PeerStats{ @@ -749,18 +724,18 @@ func (s *testRuleCheckerSuite) TestPriorityFitHealthPeers(c *C) { }, })) r1 = r1.Clone(core.WithPendingPeers(nil)) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) } // Ref https://github.com/tikv/pd/issues/4140 -func (s *testRuleCheckerSuite) TestDemoteVoter(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) - region := s.cluster.AddLeaderRegion(1, 1, 4) +func (suite *ruleCheckerTestSuite) TestDemoteVoter() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) + region := suite.cluster.AddLeaderRegion(1, 1, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -787,57 +762,57 @@ func (s *testRuleCheckerSuite) TestDemoteVoter(c *C) { }, }, } - s.ruleManager.SetRule(rule) - s.ruleManager.SetRule(rule2) - s.ruleManager.DeleteRule("pd", "default") - op := s.rc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-demote-voter") + suite.ruleManager.SetRule(rule) + suite.ruleManager.SetRule(rule2) + suite.ruleManager.DeleteRule("pd", "default") + op := suite.rc.Check(region) + suite.NotNil(op) + suite.Equal("fix-demote-voter", op.Desc()) } -func (s *testRuleCheckerSuite) TestOfflineAndDownStore(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z4"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) - region := s.cluster.AddLeaderRegion(1, 1, 2, 3) - op := s.rc.Check(region) - c.Assert(op, IsNil) +func (suite *ruleCheckerTestSuite) TestOfflineAndDownStore() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z4"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) + region := suite.cluster.AddLeaderRegion(1, 1, 2, 3) + op := suite.rc.Check(region) + suite.Nil(op) // assert rule checker should generate replace offline peer operator after cached - s.cluster.SetStoreOffline(1) - op = s.rc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") + suite.cluster.SetStoreOffline(1) + op = suite.rc.Check(region) + suite.NotNil(op) + suite.Equal("replace-rule-offline-peer", op.Desc()) // re-cache the regionFit - s.cluster.SetStoreUp(1) - op = s.rc.Check(region) - c.Assert(op, IsNil) + suite.cluster.SetStoreUp(1) + op = suite.rc.Check(region) + suite.Nil(op) // assert rule checker should generate replace down peer operator after cached - s.cluster.SetStoreDown(2) + suite.cluster.SetStoreDown(2) region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{{Peer: region.GetStorePeer(2), DownSeconds: 60000}})) - op = s.rc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-down-peer") + op = suite.rc.Check(region) + suite.NotNil(op) + suite.Equal("replace-rule-down-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestPendingList(c *C) { +func (suite *ruleCheckerTestSuite) TestPendingList() { // no enough store - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - _, exist := s.rc.pendingList.Get(1) - c.Assert(exist, IsTrue) + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + _, exist := suite.rc.pendingList.Get(1) + suite.True(exist) // add more stores - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(3)) - _, exist = s.rc.pendingList.Get(1) - c.Assert(exist, IsFalse) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) + suite.Equal(uint64(3), op.Step(0).(operator.AddLearner).ToStore) + _, exist = suite.rc.pendingList.Get(1) + suite.False(exist) } diff --git a/server/schedule/checker/split_checker_test.go b/server/schedule/checker/split_checker_test.go index 606c5953762..957ca87bc07 100644 --- a/server/schedule/checker/split_checker_test.go +++ b/server/schedule/checker/split_checker_test.go @@ -17,8 +17,9 @@ package checker import ( "context" "encoding/hex" + "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/labeler" @@ -26,37 +27,18 @@ import ( "github.com/tikv/pd/server/schedule/placement" ) -var _ = Suite(&testSplitCheckerSuite{}) - -type testSplitCheckerSuite struct { - cluster *mockcluster.Cluster - ruleManager *placement.RuleManager - labeler *labeler.RegionLabeler - sc *SplitChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testSplitCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testSplitCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testSplitCheckerSuite) SetUpTest(c *C) { +func TestSplit(t *testing.T) { + re := require.New(t) cfg := config.NewTestOptions() cfg.GetReplicationConfig().EnablePlacementRules = true - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.ruleManager = s.cluster.RuleManager - s.labeler = s.cluster.RegionLabeler - s.sc = NewSplitChecker(s.cluster, s.ruleManager, s.labeler) -} - -func (s *testSplitCheckerSuite) TestSplit(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.ruleManager.SetRule(&placement.Rule{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster := mockcluster.NewCluster(ctx, cfg) + ruleManager := cluster.RuleManager + regionLabeler := cluster.RegionLabeler + sc := NewSplitChecker(cluster, ruleManager, regionLabeler) + cluster.AddLeaderStore(1, 1) + ruleManager.SetRule(&placement.Rule{ GroupID: "test", ID: "test", StartKeyHex: "aa", @@ -64,25 +46,25 @@ func (s *testSplitCheckerSuite) TestSplit(c *C) { Role: placement.Voter, Count: 1, }) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1) - op := s.sc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Len(), Equals, 1) + cluster.AddLeaderRegionWithRange(1, "", "", 1) + op := sc.Check(cluster.GetRegion(1)) + re.NotNil(op) + re.Equal(1, op.Len()) splitKeys := op.Step(0).(operator.SplitRegion).SplitKeys - c.Assert(hex.EncodeToString(splitKeys[0]), Equals, "aa") - c.Assert(hex.EncodeToString(splitKeys[1]), Equals, "cc") + re.Equal("aa", hex.EncodeToString(splitKeys[0])) + re.Equal("cc", hex.EncodeToString(splitKeys[1])) // region label has higher priority. - s.labeler.SetLabelRule(&labeler.LabelRule{ + regionLabeler.SetLabelRule(&labeler.LabelRule{ ID: "test", Labels: []labeler.RegionLabel{{Key: "test", Value: "test"}}, RuleType: labeler.KeyRange, Data: makeKeyRanges("bb", "dd"), }) - op = s.sc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Len(), Equals, 1) + op = sc.Check(cluster.GetRegion(1)) + re.NotNil(op) + re.Equal(1, op.Len()) splitKeys = op.Step(0).(operator.SplitRegion).SplitKeys - c.Assert(hex.EncodeToString(splitKeys[0]), Equals, "bb") - c.Assert(hex.EncodeToString(splitKeys[1]), Equals, "dd") + re.Equal("bb", hex.EncodeToString(splitKeys[0])) + re.Equal("dd", hex.EncodeToString(splitKeys[1])) } From ddf711bd5f9f39fb71d4fdfc78c11fabe041ee4e Mon Sep 17 00:00:00 2001 From: JmPotato Date: Mon, 20 Jun 2022 18:10:37 +0800 Subject: [PATCH 30/35] tests: testify the TSO tests (#5169) ref tikv/pd#4813 Testify the TSO tests. Signed-off-by: JmPotato Co-authored-by: Ti Chi Robot --- pkg/testutil/testutil.go | 21 +- tests/client/client_test.go | 4 +- tests/cluster.go | 26 +- tests/server/tso/allocator_test.go | 115 ++++----- tests/server/tso/common_test.go | 25 +- tests/server/tso/consistency_test.go | 345 ++++++++++++--------------- tests/server/tso/global_tso_test.go | 122 ++++------ tests/server/tso/manager_test.go | 94 ++++---- tests/server/tso/tso_test.go | 57 ++--- 9 files changed, 351 insertions(+), 458 deletions(-) diff --git a/pkg/testutil/testutil.go b/pkg/testutil/testutil.go index bc54e901a63..59063aa5385 100644 --- a/pkg/testutil/testutil.go +++ b/pkg/testutil/testutil.go @@ -26,8 +26,9 @@ import ( ) const ( - waitMaxRetry = 200 - waitRetrySleep = time.Millisecond * 100 + defaultWaitRetryTimes = 200 + defaultSleepInterval = time.Millisecond * 100 + defaultWaitFor = time.Second * 20 ) // CheckFunc is a condition checker that passed to WaitUntil. Its implementation @@ -38,6 +39,7 @@ type CheckFunc func() bool type WaitOp struct { retryTimes int sleepInterval time.Duration + waitFor time.Duration } // WaitOption configures WaitOp @@ -53,13 +55,18 @@ func WithSleepInterval(sleep time.Duration) WaitOption { return func(op *WaitOp) { op.sleepInterval = sleep } } +// WithWaitFor specify the max wait for duration +func WithWaitFor(waitFor time.Duration) WaitOption { + return func(op *WaitOp) { op.waitFor = waitFor } +} + // WaitUntil repeatedly evaluates f() for a period of time, util it returns true. // NOTICE: this function will be removed soon, please use `Eventually` instead. func WaitUntil(c *check.C, f CheckFunc, opts ...WaitOption) { c.Log("wait start") option := &WaitOp{ - retryTimes: waitMaxRetry, - sleepInterval: waitRetrySleep, + retryTimes: defaultWaitRetryTimes, + sleepInterval: defaultSleepInterval, } for _, opt := range opts { opt(option) @@ -76,15 +83,15 @@ func WaitUntil(c *check.C, f CheckFunc, opts ...WaitOption) { // Eventually asserts that given condition will be met in a period of time. func Eventually(re *require.Assertions, condition func() bool, opts ...WaitOption) { option := &WaitOp{ - retryTimes: waitMaxRetry, - sleepInterval: waitRetrySleep, + waitFor: defaultWaitFor, + sleepInterval: defaultSleepInterval, } for _, opt := range opts { opt(option) } re.Eventually( condition, - option.sleepInterval*time.Duration(option.retryTimes), + option.waitFor, option.sleepInterval, ) } diff --git a/tests/client/client_test.go b/tests/client/client_test.go index 975b54d72f8..86824261d12 100644 --- a/tests/client/client_test.go +++ b/tests/client/client_test.go @@ -235,7 +235,7 @@ func TestTSOAllocatorLeader(t *testing.T) { err = cluster.RunInitialServers() re.NoError(err) - cluster.WaitAllLeadersWithTestify(re, dcLocationConfig) + cluster.WaitAllLeaders(re, dcLocationConfig) var ( testServers = cluster.GetServers() @@ -347,7 +347,7 @@ func TestGlobalAndLocalTSO(t *testing.T) { re.NoError(err) dcLocationConfig["pd4"] = "dc-4" cluster.CheckClusterDCLocation() - cluster.WaitAllLeadersWithTestify(re, dcLocationConfig) + cluster.WaitAllLeaders(re, dcLocationConfig) // Test a nonexistent dc-location for Local TSO p, l, err := cli.GetLocalTS(context.TODO(), "nonexistent-dc") diff --git a/tests/cluster.go b/tests/cluster.go index 0d7efe90ec9..6c79d680c7f 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -22,7 +22,6 @@ import ( "time" "github.com/coreos/go-semver/semver" - "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" @@ -605,27 +604,7 @@ func (c *TestCluster) WaitAllocatorLeader(dcLocation string, ops ...WaitOption) } // WaitAllLeaders will block and wait for the election of PD leader and all Local TSO Allocator leaders. -func (c *TestCluster) WaitAllLeaders(testC *check.C, dcLocations map[string]string) { - c.WaitLeader() - c.CheckClusterDCLocation() - // Wait for each DC's Local TSO Allocator leader - wg := sync.WaitGroup{} - for _, dcLocation := range dcLocations { - wg.Add(1) - go func(dc string) { - testutil.WaitUntil(testC, func() bool { - leaderName := c.WaitAllocatorLeader(dc) - return leaderName != "" - }) - wg.Done() - }(dcLocation) - } - wg.Wait() -} - -// WaitAllLeadersWithTestify will block and wait for the election of PD leader and all Local TSO Allocator leaders. -// NOTICE: this is a temporary function that we will be used to replace `WaitAllLeaders` later. -func (c *TestCluster) WaitAllLeadersWithTestify(re *require.Assertions, dcLocations map[string]string) { +func (c *TestCluster) WaitAllLeaders(re *require.Assertions, dcLocations map[string]string) { c.WaitLeader() c.CheckClusterDCLocation() // Wait for each DC's Local TSO Allocator leader @@ -634,8 +613,7 @@ func (c *TestCluster) WaitAllLeadersWithTestify(re *require.Assertions, dcLocati wg.Add(1) go func(dc string) { testutil.Eventually(re, func() bool { - leaderName := c.WaitAllocatorLeader(dc) - return leaderName != "" + return c.WaitAllocatorLeader(dc) != "" }) wg.Done() }(dcLocation) diff --git a/tests/server/tso/allocator_test.go b/tests/server/tso/allocator_test.go index c7bb38e5d9a..59cedea0783 100644 --- a/tests/server/tso/allocator_test.go +++ b/tests/server/tso/allocator_test.go @@ -21,10 +21,11 @@ import ( "context" "strconv" "sync" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/testutil" @@ -33,23 +34,10 @@ import ( "github.com/tikv/pd/tests" ) -var _ = Suite(&testAllocatorSuite{}) - -type testAllocatorSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testAllocatorSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testAllocatorSuite) TearDownSuite(c *C) { - s.cancel() -} - -// Make sure we have the correct number of Local TSO Allocator leaders. -func (s *testAllocatorSuite) TestAllocatorLeader(c *C) { +func TestAllocatorLeader(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // There will be three Local TSO Allocator leaders elected dcLocationConfig := map[string]string{ "pd2": "dc-1", @@ -57,19 +45,16 @@ func (s *testAllocatorSuite) TestAllocatorLeader(c *C) { "pd6": "leader", /* Test dc-location name is same as the special key */ } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum*2, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, dcLocationNum*2, func(conf *config.Config, serverName string) { if zoneLabel, ok := dcLocationConfig[serverName]; ok { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = zoneLabel } }) + re.NoError(err) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) - - cluster.WaitAllLeaders(c, dcLocationConfig) + re.NoError(cluster.RunInitialServers()) + cluster.WaitAllLeaders(re, dcLocationConfig) // To check whether we have enough Local TSO Allocator leaders allAllocatorLeaders := make([]tso.Allocator, 0, dcLocationNum) for _, server := range cluster.GetServers() { @@ -80,7 +65,7 @@ func (s *testAllocatorSuite) TestAllocatorLeader(c *C) { tso.FilterUninitialized()) // One PD server will have at most three initialized Local TSO Allocators, // which also means three allocator leaders - c.Assert(len(allocators), LessEqual, dcLocationNum) + re.LessOrEqual(len(allocators), dcLocationNum) if len(allocators) == 0 { continue } @@ -96,7 +81,7 @@ func (s *testAllocatorSuite) TestAllocatorLeader(c *C) { } // At the end, we should have three initialized Local TSO Allocator, // i.e., the Local TSO Allocator leaders for all dc-locations in testDCLocations - c.Assert(allAllocatorLeaders, HasLen, dcLocationNum) + re.Len(allAllocatorLeaders, dcLocationNum) allocatorLeaderMemberIDs := make([]uint64, 0, dcLocationNum) for _, allocator := range allAllocatorLeaders { allocatorLeader, _ := allocator.(*tso.LocalTSOAllocator) @@ -106,62 +91,63 @@ func (s *testAllocatorSuite) TestAllocatorLeader(c *C) { // Filter out Global TSO Allocator allocators := server.GetTSOAllocatorManager().GetAllocators(tso.FilterDCLocation(tso.GlobalDCLocation)) if _, ok := dcLocationConfig[server.GetServer().Name()]; !ok { - c.Assert(allocators, HasLen, 0) + re.Empty(allocators) continue } - c.Assert(allocators, HasLen, dcLocationNum) + re.Len(allocators, dcLocationNum) for _, allocator := range allocators { allocatorFollower, _ := allocator.(*tso.LocalTSOAllocator) allocatorFollowerMemberID := allocatorFollower.GetAllocatorLeader().GetMemberId() - c.Assert( + re.True( slice.AnyOf( allocatorLeaderMemberIDs, - func(i int) bool { return allocatorLeaderMemberIDs[i] == allocatorFollowerMemberID }), - IsTrue) + func(i int) bool { return allocatorLeaderMemberIDs[i] == allocatorFollowerMemberID }, + ), + ) } } } -func (s *testAllocatorSuite) TestPriorityAndDifferentLocalTSO(c *C) { +func TestPriorityAndDifferentLocalTSO(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) - - cluster.WaitAllLeaders(c, dcLocationConfig) + cluster.WaitAllLeaders(re, dcLocationConfig) // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) // Join a new dc-location - pd4, err := cluster.Join(s.ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(ctx, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) - c.Assert(err, IsNil) - err = pd4.Run() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(pd4.Run()) dcLocationConfig["pd4"] = "dc-4" cluster.CheckClusterDCLocation() - testutil.WaitUntil(c, func() bool { - leaderName := cluster.WaitAllocatorLeader("dc-4") - return leaderName != "" - }) + re.NotEqual("", cluster.WaitAllocatorLeader( + "dc-4", + tests.WithRetryTimes(90), tests.WithWaitInterval(time.Second), + )) // Scatter the Local TSO Allocators to different servers waitAllocatorPriorityCheck(cluster) - cluster.WaitAllLeaders(c, dcLocationConfig) + cluster.WaitAllLeaders(re, dcLocationConfig) // Before the priority is checked, we may have allocators typology like this: // pd1: dc-1, dc-2 and dc-3 allocator leader @@ -178,10 +164,9 @@ func (s *testAllocatorSuite) TestPriorityAndDifferentLocalTSO(c *C) { for serverName, dcLocation := range dcLocationConfig { go func(serName, dc string) { defer wg.Done() - testutil.WaitUntil(c, func() bool { - leaderName := cluster.WaitAllocatorLeader(dc) - return leaderName == serName - }, testutil.WithRetryTimes(12), testutil.WithSleepInterval(5*time.Second)) + testutil.Eventually(re, func() bool { + return cluster.WaitAllocatorLeader(dc) == serName + }, testutil.WithWaitFor(time.Second*90), testutil.WithSleepInterval(time.Second)) }(serverName, dcLocation) } wg.Wait() @@ -189,12 +174,12 @@ func (s *testAllocatorSuite) TestPriorityAndDifferentLocalTSO(c *C) { for serverName, server := range cluster.GetServers() { tsoAllocatorManager := server.GetTSOAllocatorManager() localAllocatorLeaders, err := tsoAllocatorManager.GetHoldingLocalAllocatorLeaders() - c.Assert(err, IsNil) + re.NoError(err) for _, localAllocatorLeader := range localAllocatorLeaders { - s.testTSOSuffix(c, cluster, tsoAllocatorManager, localAllocatorLeader.GetDCLocation()) + testTSOSuffix(re, cluster, tsoAllocatorManager, localAllocatorLeader.GetDCLocation()) } if serverName == cluster.GetLeader() { - s.testTSOSuffix(c, cluster, tsoAllocatorManager, tso.GlobalDCLocation) + testTSOSuffix(re, cluster, tsoAllocatorManager, tso.GlobalDCLocation) } } } @@ -211,29 +196,29 @@ func waitAllocatorPriorityCheck(cluster *tests.TestCluster) { wg.Wait() } -func (s *testAllocatorSuite) testTSOSuffix(c *C, cluster *tests.TestCluster, am *tso.AllocatorManager, dcLocation string) { +func testTSOSuffix(re *require.Assertions, cluster *tests.TestCluster, am *tso.AllocatorManager, dcLocation string) { suffixBits := am.GetSuffixBits() - c.Assert(suffixBits, Greater, 0) + re.Greater(suffixBits, 0) var suffix int64 // The suffix of a Global TSO will always be 0 if dcLocation != tso.GlobalDCLocation { suffixResp, err := etcdutil.EtcdKVGet( cluster.GetEtcdClient(), am.GetLocalTSOSuffixPath(dcLocation)) - c.Assert(err, IsNil) - c.Assert(suffixResp.Kvs, HasLen, 1) + re.NoError(err) + re.Len(suffixResp.Kvs, 1) suffix, err = strconv.ParseInt(string(suffixResp.Kvs[0].Value), 10, 64) - c.Assert(err, IsNil) - c.Assert(suffixBits, GreaterEqual, tso.CalSuffixBits(int32(suffix))) + re.NoError(err) + re.GreaterOrEqual(suffixBits, tso.CalSuffixBits(int32(suffix))) } allocator, err := am.GetAllocator(dcLocation) - c.Assert(err, IsNil) + re.NoError(err) var tso pdpb.Timestamp - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { tso, err = allocator.GenerateTSO(1) - c.Assert(err, IsNil) + re.NoError(err) return tso.GetPhysical() != 0 }) // Test whether the TSO has the right suffix - c.Assert(suffix, Equals, tso.Logical&((1<>timestamp.GetSuffixBits(), GreaterEqual, req.GetCount()) + re.Greater(timestamp.GetPhysical(), int64(0)) + re.GreaterOrEqual(uint32(timestamp.GetLogical())>>timestamp.GetSuffixBits(), req.GetCount()) return timestamp } -func testGetTimestamp(c *C, ctx context.Context, pdCli pdpb.PDClient, req *pdpb.TsoRequest) *pdpb.Timestamp { +func testGetTimestamp(re *require.Assertions, ctx context.Context, pdCli pdpb.PDClient, req *pdpb.TsoRequest) *pdpb.Timestamp { tsoClient, err := pdCli.Tso(ctx) - c.Assert(err, IsNil) + re.NoError(err) defer tsoClient.CloseSend() - err = tsoClient.Send(req) - c.Assert(err, IsNil) + re.NoError(tsoClient.Send(req)) resp, err := tsoClient.Recv() - c.Assert(err, IsNil) - return checkAndReturnTimestampResponse(c, req, resp) -} - -func Test(t *testing.T) { - TestingT(t) + re.NoError(err) + return checkAndReturnTimestampResponse(re, req, resp) } func TestMain(m *testing.M) { diff --git a/tests/server/tso/consistency_test.go b/tests/server/tso/consistency_test.go index 170a1b4e9a8..430160cd5ac 100644 --- a/tests/server/tso/consistency_test.go +++ b/tests/server/tso/consistency_test.go @@ -20,11 +20,13 @@ package tso_test import ( "context" "sync" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/pkg/tsoutil" @@ -33,9 +35,8 @@ import ( "github.com/tikv/pd/tests" ) -var _ = Suite(&testTSOConsistencySuite{}) - -type testTSOConsistencySuite struct { +type tsoConsistencyTestSuite struct { + suite.Suite ctx context.Context cancel context.CancelFunc @@ -46,42 +47,44 @@ type testTSOConsistencySuite struct { tsPool map[uint64]struct{} } -func (s *testTSOConsistencySuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.dcClientMap = make(map[string]pdpb.PDClient) - s.tsPool = make(map[uint64]struct{}) +func TestTSOConsistencyTestSuite(t *testing.T) { + suite.Run(t, new(tsoConsistencyTestSuite)) +} + +func (suite *tsoConsistencyTestSuite) SetupSuite() { + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.dcClientMap = make(map[string]pdpb.PDClient) + suite.tsPool = make(map[uint64]struct{}) } -func (s *testTSOConsistencySuite) TearDownSuite(c *C) { - s.cancel() +func (suite *tsoConsistencyTestSuite) TearDownSuite() { + suite.cancel() } // TestNormalGlobalTSO is used to test the normal way of global TSO generation. -func (s *testTSOConsistencySuite) TestNormalGlobalTSO(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func (suite *tsoConsistencyTestSuite) TestNormalGlobalTSO() { + cluster, err := tests.NewTestCluster(suite.ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(suite.Require(), leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(clusterID), Count: uint32(tsoCount), DcLocation: tso.GlobalDCLocation, } - s.requestGlobalTSOConcurrently(c, grpcPDClient, req) + suite.requestGlobalTSOConcurrently(grpcPDClient, req) // Test Global TSO after the leader change leaderServer.GetServer().GetMember().ResetLeader() cluster.WaitLeader() - s.requestGlobalTSOConcurrently(c, grpcPDClient, req) + suite.requestGlobalTSOConcurrently(grpcPDClient, req) } -func (s *testTSOConsistencySuite) requestGlobalTSOConcurrently(c *C, grpcPDClient pdpb.PDClient, req *pdpb.TsoRequest) { +func (suite *tsoConsistencyTestSuite) requestGlobalTSOConcurrently(grpcPDClient pdpb.PDClient, req *pdpb.TsoRequest) { var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) for i := 0; i < tsoRequestConcurrencyNumber; i++ { @@ -92,9 +95,9 @@ func (s *testTSOConsistencySuite) requestGlobalTSOConcurrently(c *C, grpcPDClien Logical: 0, } for j := 0; j < tsoRequestRound; j++ { - ts := s.testGetNormalGlobalTimestamp(c, grpcPDClient, req) + ts := suite.testGetNormalGlobalTimestamp(grpcPDClient, req) // Check whether the TSO fallbacks - c.Assert(tsoutil.CompareTimestamp(ts, last), Equals, 1) + suite.Equal(1, tsoutil.CompareTimestamp(ts, last)) last = ts time.Sleep(10 * time.Millisecond) } @@ -103,49 +106,47 @@ func (s *testTSOConsistencySuite) requestGlobalTSOConcurrently(c *C, grpcPDClien wg.Wait() } -func (s *testTSOConsistencySuite) testGetNormalGlobalTimestamp(c *C, pdCli pdpb.PDClient, req *pdpb.TsoRequest) *pdpb.Timestamp { +func (suite *tsoConsistencyTestSuite) testGetNormalGlobalTimestamp(pdCli pdpb.PDClient, req *pdpb.TsoRequest) *pdpb.Timestamp { ctx, cancel := context.WithCancel(context.Background()) defer cancel() tsoClient, err := pdCli.Tso(ctx) - c.Assert(err, IsNil) + suite.NoError(err) defer tsoClient.CloseSend() - err = tsoClient.Send(req) - c.Assert(err, IsNil) + suite.NoError(tsoClient.Send(req)) resp, err := tsoClient.Recv() - c.Assert(err, IsNil) - c.Assert(resp.GetCount(), Equals, req.GetCount()) + suite.NoError(err) + suite.Equal(req.GetCount(), resp.GetCount()) res := resp.GetTimestamp() - c.Assert(res.GetPhysical(), Greater, int64(0)) - c.Assert(uint32(res.GetLogical())>>res.GetSuffixBits(), GreaterEqual, req.GetCount()) + suite.Greater(res.GetPhysical(), int64(0)) + suite.GreaterOrEqual(uint32(res.GetLogical())>>res.GetSuffixBits(), req.GetCount()) return res } // TestSynchronizedGlobalTSO is used to test the synchronized way of global TSO generation. -func (s *testTSOConsistencySuite) TestSynchronizedGlobalTSO(c *C) { +func (suite *tsoConsistencyTestSuite) TestSynchronizedGlobalTSO() { dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(suite.ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, dcLocationConfig) + re := suite.Require() + cluster.WaitAllLeaders(re, dcLocationConfig) - s.leaderServer = cluster.GetServer(cluster.GetLeader()) - c.Assert(s.leaderServer, NotNil) - s.dcClientMap[tso.GlobalDCLocation] = testutil.MustNewGrpcClient(c, s.leaderServer.GetAddr()) + suite.leaderServer = cluster.GetServer(cluster.GetLeader()) + suite.NotNil(suite.leaderServer) + suite.dcClientMap[tso.GlobalDCLocation] = testutil.MustNewGrpcClientWithTestify(re, suite.leaderServer.GetAddr()) for _, dcLocation := range dcLocationConfig { - pdName := s.leaderServer.GetAllocatorLeader(dcLocation).GetName() - s.dcClientMap[dcLocation] = testutil.MustNewGrpcClient(c, cluster.GetServer(pdName).GetAddr()) + pdName := suite.leaderServer.GetAllocatorLeader(dcLocation).GetName() + suite.dcClientMap[dcLocation] = testutil.MustNewGrpcClientWithTestify(re, cluster.GetServer(pdName).GetAddr()) } ctx, cancel := context.WithCancel(context.Background()) @@ -155,14 +156,14 @@ func (s *testTSOConsistencySuite) TestSynchronizedGlobalTSO(c *C) { // Get some local TSOs first oldLocalTSOs := make([]*pdpb.Timestamp, 0, dcLocationNum) for _, dcLocation := range dcLocationConfig { - localTSO := s.getTimestampByDC(ctx, c, cluster, tsoCount, dcLocation) + localTSO := suite.getTimestampByDC(ctx, cluster, dcLocation) oldLocalTSOs = append(oldLocalTSOs, localTSO) - c.Assert(tsoutil.CompareTimestamp(maxGlobalTSO, localTSO), Equals, -1) + suite.Equal(-1, tsoutil.CompareTimestamp(maxGlobalTSO, localTSO)) } // Get a global TSO then - globalTSO := s.getTimestampByDC(ctx, c, cluster, tsoCount, tso.GlobalDCLocation) + globalTSO := suite.getTimestampByDC(ctx, cluster, tso.GlobalDCLocation) for _, oldLocalTSO := range oldLocalTSOs { - c.Assert(tsoutil.CompareTimestamp(globalTSO, oldLocalTSO), Equals, 1) + suite.Equal(1, tsoutil.CompareTimestamp(globalTSO, oldLocalTSO)) } if tsoutil.CompareTimestamp(maxGlobalTSO, globalTSO) < 0 { maxGlobalTSO = globalTSO @@ -170,153 +171,147 @@ func (s *testTSOConsistencySuite) TestSynchronizedGlobalTSO(c *C) { // Get some local TSOs again newLocalTSOs := make([]*pdpb.Timestamp, 0, dcLocationNum) for _, dcLocation := range dcLocationConfig { - newLocalTSOs = append(newLocalTSOs, s.getTimestampByDC(ctx, c, cluster, tsoCount, dcLocation)) + newLocalTSOs = append(newLocalTSOs, suite.getTimestampByDC(ctx, cluster, dcLocation)) } for _, newLocalTSO := range newLocalTSOs { - c.Assert(tsoutil.CompareTimestamp(maxGlobalTSO, newLocalTSO), Equals, -1) + suite.Equal(-1, tsoutil.CompareTimestamp(maxGlobalTSO, newLocalTSO)) } } } -func (s *testTSOConsistencySuite) getTimestampByDC(ctx context.Context, c *C, cluster *tests.TestCluster, n uint32, dcLocation string) *pdpb.Timestamp { +func (suite *tsoConsistencyTestSuite) getTimestampByDC(ctx context.Context, cluster *tests.TestCluster, dcLocation string) *pdpb.Timestamp { req := &pdpb.TsoRequest{ - Header: testutil.NewRequestHeader(s.leaderServer.GetClusterID()), - Count: n, + Header: testutil.NewRequestHeader(suite.leaderServer.GetClusterID()), + Count: tsoCount, DcLocation: dcLocation, } - pdClient, ok := s.dcClientMap[dcLocation] - c.Assert(ok, IsTrue) - forwardedHost := cluster.GetServer(s.leaderServer.GetAllocatorLeader(dcLocation).GetName()).GetAddr() + pdClient, ok := suite.dcClientMap[dcLocation] + suite.True(ok) + forwardedHost := cluster.GetServer(suite.leaderServer.GetAllocatorLeader(dcLocation).GetName()).GetAddr() ctx = grpcutil.BuildForwardContext(ctx, forwardedHost) tsoClient, err := pdClient.Tso(ctx) - c.Assert(err, IsNil) + suite.NoError(err) defer tsoClient.CloseSend() - err = tsoClient.Send(req) - c.Assert(err, IsNil) + suite.NoError(tsoClient.Send(req)) resp, err := tsoClient.Recv() - c.Assert(err, IsNil) - return checkAndReturnTimestampResponse(c, req, resp) + suite.NoError(err) + return checkAndReturnTimestampResponse(suite.Require(), req, resp) } -func (s *testTSOConsistencySuite) TestSynchronizedGlobalTSOOverflow(c *C) { +func (suite *tsoConsistencyTestSuite) TestSynchronizedGlobalTSOOverflow() { dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(suite.ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, dcLocationConfig) + re := suite.Require() + cluster.WaitAllLeaders(re, dcLocationConfig) - s.leaderServer = cluster.GetServer(cluster.GetLeader()) - c.Assert(s.leaderServer, NotNil) - s.dcClientMap[tso.GlobalDCLocation] = testutil.MustNewGrpcClient(c, s.leaderServer.GetAddr()) + suite.leaderServer = cluster.GetServer(cluster.GetLeader()) + suite.NotNil(suite.leaderServer) + suite.dcClientMap[tso.GlobalDCLocation] = testutil.MustNewGrpcClientWithTestify(re, suite.leaderServer.GetAddr()) for _, dcLocation := range dcLocationConfig { - pdName := s.leaderServer.GetAllocatorLeader(dcLocation).GetName() - s.dcClientMap[dcLocation] = testutil.MustNewGrpcClient(c, cluster.GetServer(pdName).GetAddr()) + pdName := suite.leaderServer.GetAllocatorLeader(dcLocation).GetName() + suite.dcClientMap[dcLocation] = testutil.MustNewGrpcClientWithTestify(re, cluster.GetServer(pdName).GetAddr()) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/globalTSOOverflow", `return(true)`), IsNil) - s.getTimestampByDC(ctx, c, cluster, tsoCount, tso.GlobalDCLocation) - failpoint.Disable("github.com/tikv/pd/server/tso/globalTSOOverflow") + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/globalTSOOverflow", `return(true)`)) + suite.getTimestampByDC(ctx, cluster, tso.GlobalDCLocation) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/tso/globalTSOOverflow")) } -func (s *testTSOConsistencySuite) TestLocalAllocatorLeaderChange(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/mockLocalAllocatorLeaderChange", `return(true)`), IsNil) - defer failpoint.Disable("github.com/tikv/pd/server/mockLocalAllocatorLeaderChange") +func (suite *tsoConsistencyTestSuite) TestLocalAllocatorLeaderChange() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/mockLocalAllocatorLeaderChange", `return(true)`)) dcLocationConfig := map[string]string{ "pd1": "dc-1", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(suite.ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, dcLocationConfig) + re := suite.Require() + cluster.WaitAllLeaders(re, dcLocationConfig) - s.leaderServer = cluster.GetServer(cluster.GetLeader()) - c.Assert(s.leaderServer, NotNil) - s.dcClientMap[tso.GlobalDCLocation] = testutil.MustNewGrpcClient(c, s.leaderServer.GetAddr()) + suite.leaderServer = cluster.GetServer(cluster.GetLeader()) + suite.NotNil(suite.leaderServer) + suite.dcClientMap[tso.GlobalDCLocation] = testutil.MustNewGrpcClientWithTestify(re, suite.leaderServer.GetAddr()) for _, dcLocation := range dcLocationConfig { - pdName := s.leaderServer.GetAllocatorLeader(dcLocation).GetName() - s.dcClientMap[dcLocation] = testutil.MustNewGrpcClient(c, cluster.GetServer(pdName).GetAddr()) + pdName := suite.leaderServer.GetAllocatorLeader(dcLocation).GetName() + suite.dcClientMap[dcLocation] = testutil.MustNewGrpcClientWithTestify(re, cluster.GetServer(pdName).GetAddr()) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s.getTimestampByDC(ctx, c, cluster, tsoCount, tso.GlobalDCLocation) + suite.getTimestampByDC(ctx, cluster, tso.GlobalDCLocation) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/mockLocalAllocatorLeaderChange")) } -func (s *testTSOConsistencySuite) TestLocalTSO(c *C) { +func (suite *tsoConsistencyTestSuite) TestLocalTSO() { dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(suite.ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) - - cluster.WaitAllLeaders(c, dcLocationConfig) - s.testTSO(c, cluster, dcLocationConfig, nil) + cluster.WaitAllLeaders(suite.Require(), dcLocationConfig) + suite.testTSO(cluster, dcLocationConfig, nil) } -func (s *testTSOConsistencySuite) checkTSOUnique(tso *pdpb.Timestamp) bool { - s.tsPoolMutex.Lock() - defer s.tsPoolMutex.Unlock() +func (suite *tsoConsistencyTestSuite) checkTSOUnique(tso *pdpb.Timestamp) bool { + suite.tsPoolMutex.Lock() + defer suite.tsPoolMutex.Unlock() ts := tsoutil.GenerateTS(tso) - if _, exist := s.tsPool[ts]; exist { + if _, exist := suite.tsPool[ts]; exist { return false } - s.tsPool[ts] = struct{}{} + suite.tsPool[ts] = struct{}{} return true } -func (s *testTSOConsistencySuite) TestLocalTSOAfterMemberChanged(c *C) { +func (suite *tsoConsistencyTestSuite) TestLocalTSOAfterMemberChanged() { dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(suite.ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, dcLocationConfig) + re := suite.Require() + cluster.WaitAllLeaders(re, dcLocationConfig) leaderServer := cluster.GetServer(cluster.GetLeader()) - leaderCli := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + leaderCli := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(cluster.GetCluster().GetId()), Count: tsoCount, @@ -324,40 +319,40 @@ func (s *testTSOConsistencySuite) TestLocalTSOAfterMemberChanged(c *C) { } ctx, cancel := context.WithCancel(context.Background()) ctx = grpcutil.BuildForwardContext(ctx, leaderServer.GetAddr()) - previousTS := testGetTimestamp(c, ctx, leaderCli, req) + previousTS := testGetTimestamp(re, ctx, leaderCli, req) cancel() // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) // Mock the situation that the system time of PD nodes in dc-4 is slower than others. - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/systemTimeSlow", `return(true)`), IsNil) + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/systemTimeSlow", `return(true)`)) // Join a new dc-location - pd4, err := cluster.Join(s.ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(suite.ctx, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) - c.Assert(err, IsNil) - err = pd4.Run() - c.Assert(err, IsNil) + suite.NoError(err) + suite.NoError(pd4.Run()) dcLocationConfig["pd4"] = "dc-4" cluster.CheckClusterDCLocation() - testutil.WaitUntil(c, func() bool { - leaderName := cluster.WaitAllocatorLeader("dc-4") - return leaderName != "" - }) - s.testTSO(c, cluster, dcLocationConfig, previousTS) + re.NotEqual("", cluster.WaitAllocatorLeader( + "dc-4", + tests.WithRetryTimes(90), tests.WithWaitInterval(time.Second), + )) + suite.testTSO(cluster, dcLocationConfig, previousTS) - failpoint.Disable("github.com/tikv/pd/server/tso/systemTimeSlow") + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/tso/systemTimeSlow")) } -func (s *testTSOConsistencySuite) testTSO(c *C, cluster *tests.TestCluster, dcLocationConfig map[string]string, previousTS *pdpb.Timestamp) { +func (suite *tsoConsistencyTestSuite) testTSO(cluster *tests.TestCluster, dcLocationConfig map[string]string, previousTS *pdpb.Timestamp) { + re := suite.Require() leaderServer := cluster.GetServer(cluster.GetLeader()) dcClientMap := make(map[string]pdpb.PDClient) for _, dcLocation := range dcLocationConfig { pdName := leaderServer.GetAllocatorLeader(dcLocation).GetName() - dcClientMap[dcLocation] = testutil.MustNewGrpcClient(c, cluster.GetServer(pdName).GetAddr()) + dcClientMap[dcLocation] = testutil.MustNewGrpcClientWithTestify(re, cluster.GetServer(pdName).GetAddr()) } var wg sync.WaitGroup @@ -381,68 +376,49 @@ func (s *testTSOConsistencySuite) testTSO(c *C, cluster *tests.TestCluster, dcLo } ctx, cancel := context.WithCancel(context.Background()) ctx = grpcutil.BuildForwardContext(ctx, cluster.GetServer(leaderServer.GetAllocatorLeader(dcLocation).GetName()).GetAddr()) - ts := testGetTimestamp(c, ctx, dcClientMap[dcLocation], req) + ts := testGetTimestamp(re, ctx, dcClientMap[dcLocation], req) cancel() lastTS := lastList[dcLocation] // Check whether the TSO fallbacks - c.Assert(tsoutil.CompareTimestamp(ts, lastTS), Equals, 1) + suite.Equal(1, tsoutil.CompareTimestamp(ts, lastTS)) if previousTS != nil { // Because we have a Global TSO synchronization, even though the system time // of the PD nodes in dc-4 is slower, its TSO will still be big enough. - c.Assert(tsoutil.CompareTimestamp(ts, previousTS), Equals, 1) + suite.Equal(1, tsoutil.CompareTimestamp(ts, previousTS)) } lastList[dcLocation] = ts // Check whether the TSO is not unique - c.Assert(s.checkTSOUnique(ts), IsTrue) + suite.True(suite.checkTSOUnique(ts)) } time.Sleep(10 * time.Millisecond) } }() } wg.Wait() - - failpoint.Disable("github.com/tikv/pd/server/tso/systemTimeSlow") -} - -var _ = Suite(&testFallbackTSOConsistencySuite{}) - -type testFallbackTSOConsistencySuite struct { - ctx context.Context - cancel context.CancelFunc - cluster *tests.TestCluster - grpcPDClient pdpb.PDClient - server *tests.TestServer } -func (s *testFallbackTSOConsistencySuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/fallBackSync", `return(true)`), IsNil) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/fallBackUpdate", `return(true)`), IsNil) +func TestFallbackTSOConsistency(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + re.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/fallBackSync", `return(true)`)) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/fallBackUpdate", `return(true)`)) var err error - s.cluster, err = tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) + defer cluster.Destroy() - err = s.cluster.RunInitialServers() - c.Assert(err, IsNil) - s.cluster.WaitLeader() + re.NoError(cluster.RunInitialServers()) + cluster.WaitLeader() - s.server = s.cluster.GetServer(s.cluster.GetLeader()) - s.grpcPDClient = testutil.MustNewGrpcClient(c, s.server.GetAddr()) - svr := s.server.GetServer() + server := cluster.GetServer(cluster.GetLeader()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, server.GetAddr()) + svr := server.GetServer() svr.Close() - failpoint.Disable("github.com/tikv/pd/server/tso/fallBackSync") - failpoint.Disable("github.com/tikv/pd/server/tso/fallBackUpdate") - err = svr.Run() - c.Assert(err, IsNil) - s.cluster.WaitLeader() -} - -func (s *testFallbackTSOConsistencySuite) TearDownSuite(c *C) { - s.cancel() - s.cluster.Destroy() -} - -func (s *testFallbackTSOConsistencySuite) TestFallbackTSOConsistency(c *C) { + re.NoError(failpoint.Disable("github.com/tikv/pd/server/tso/fallBackSync")) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/tso/fallBackUpdate")) + re.NoError(svr.Run()) + cluster.WaitLeader() var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) for i := 0; i < tsoRequestConcurrencyNumber; i++ { @@ -453,8 +429,23 @@ func (s *testFallbackTSOConsistencySuite) TestFallbackTSOConsistency(c *C) { Logical: 0, } for j := 0; j < tsoRequestRound; j++ { - ts := s.testGetTSO(c, 10) - c.Assert(tsoutil.CompareTimestamp(ts, last), Equals, 1) + clusterID := server.GetClusterID() + req := &pdpb.TsoRequest{ + Header: testutil.NewRequestHeader(clusterID), + Count: 10, + DcLocation: tso.GlobalDCLocation, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tsoClient, err := grpcPDClient.Tso(ctx) + re.NoError(err) + defer tsoClient.CloseSend() + re.NoError(tsoClient.Send(req)) + resp, err := tsoClient.Recv() + re.NoError(err) + ts := checkAndReturnTimestampResponse(re, req, resp) + re.Equal(1, tsoutil.CompareTimestamp(ts, last)) last = ts time.Sleep(10 * time.Millisecond) } @@ -462,23 +453,3 @@ func (s *testFallbackTSOConsistencySuite) TestFallbackTSOConsistency(c *C) { } wg.Wait() } - -func (s *testFallbackTSOConsistencySuite) testGetTSO(c *C, n uint32) *pdpb.Timestamp { - clusterID := s.server.GetClusterID() - req := &pdpb.TsoRequest{ - Header: testutil.NewRequestHeader(clusterID), - Count: n, - DcLocation: tso.GlobalDCLocation, - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - tsoClient, err := s.grpcPDClient.Tso(ctx) - c.Assert(err, IsNil) - defer tsoClient.CloseSend() - err = tsoClient.Send(req) - c.Assert(err, IsNil) - resp, err := tsoClient.Recv() - c.Assert(err, IsNil) - return checkAndReturnTimestampResponse(c, req, resp) -} diff --git a/tests/server/tso/global_tso_test.go b/tests/server/tso/global_tso_test.go index 1086751fa08..795841b6830 100644 --- a/tests/server/tso/global_tso_test.go +++ b/tests/server/tso/global_tso_test.go @@ -19,13 +19,13 @@ package tso_test import ( "context" - "strings" "sync" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/tso" @@ -41,32 +41,19 @@ import ( // which will coordinate and synchronize a TSO with other Local TSO Allocator // leaders. -var _ = Suite(&testNormalGlobalTSOSuite{}) - -type testNormalGlobalTSOSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testNormalGlobalTSOSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testNormalGlobalTSOSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *testNormalGlobalTSOSuite) TestConcurrentlyReset(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestConcurrentlyReset(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader() leader := cluster.GetServer(cluster.GetLeader()) - c.Assert(leader, NotNil) + re.NotNil(leader) var wg sync.WaitGroup wg.Add(2) @@ -84,41 +71,41 @@ func (s *testNormalGlobalTSOSuite) TestConcurrentlyReset(c *C) { wg.Wait() } -func (s *testNormalGlobalTSOSuite) TestZeroTSOCount(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestZeroTSOCount(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) clusterID := leaderServer.GetClusterID() req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(clusterID), DcLocation: tso.GlobalDCLocation, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() tsoClient, err := grpcPDClient.Tso(ctx) - c.Assert(err, IsNil) + re.NoError(err) defer tsoClient.CloseSend() - err = tsoClient.Send(req) - c.Assert(err, IsNil) + re.NoError(tsoClient.Send(req)) _, err = tsoClient.Recv() - c.Assert(err, NotNil) + re.Error(err) } -func (s *testNormalGlobalTSOSuite) TestRequestFollower(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 2) - c.Assert(err, IsNil) +func TestRequestFollower(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 2) + re.NoError(err) defer cluster.Destroy() - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader() var followerServer *tests.TestServer @@ -127,79 +114,74 @@ func (s *testNormalGlobalTSOSuite) TestRequestFollower(c *C) { followerServer = s } } - c.Assert(followerServer, NotNil) + re.NotNil(followerServer) - grpcPDClient := testutil.MustNewGrpcClient(c, followerServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, followerServer.GetAddr()) clusterID := followerServer.GetClusterID() req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(clusterID), Count: 1, DcLocation: tso.GlobalDCLocation, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() ctx = grpcutil.BuildForwardContext(ctx, followerServer.GetAddr()) tsoClient, err := grpcPDClient.Tso(ctx) - c.Assert(err, IsNil) + re.NoError(err) defer tsoClient.CloseSend() start := time.Now() - err = tsoClient.Send(req) - c.Assert(err, IsNil) + re.NoError(tsoClient.Send(req)) _, err = tsoClient.Recv() - c.Assert(err, NotNil) - c.Assert(strings.Contains(err.Error(), "generate timestamp failed"), IsTrue) + re.Error(err) + re.Contains(err.Error(), "generate timestamp failed") // Requesting follower should fail fast, or the unavailable time will be // too long. - c.Assert(time.Since(start), Less, time.Second) + re.Less(time.Since(start), time.Second) } // In some cases, when a TSO request arrives, the SyncTimestamp may not finish yet. // This test is used to simulate this situation and verify that the retry mechanism. -func (s *testNormalGlobalTSOSuite) TestDelaySyncTimestamp(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 2) - c.Assert(err, IsNil) +func TestDelaySyncTimestamp(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 2) + re.NoError(err) defer cluster.Destroy() - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader() var leaderServer, nextLeaderServer *tests.TestServer leaderServer = cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer, NotNil) + re.NotNil(leaderServer) for _, s := range cluster.GetServers() { if s.GetConfig().Name != cluster.GetLeader() { nextLeaderServer = s } } - c.Assert(nextLeaderServer, NotNil) + re.NotNil(nextLeaderServer) - grpcPDClient := testutil.MustNewGrpcClient(c, nextLeaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, nextLeaderServer.GetAddr()) clusterID := nextLeaderServer.GetClusterID() req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(clusterID), Count: 1, DcLocation: tso.GlobalDCLocation, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/delaySyncTimestamp", `return(true)`), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/delaySyncTimestamp", `return(true)`)) // Make the old leader resign and wait for the new leader to get a lease leaderServer.ResignLeader() - c.Assert(nextLeaderServer.WaitLeader(), IsTrue) + re.True(nextLeaderServer.WaitLeader()) ctx = grpcutil.BuildForwardContext(ctx, nextLeaderServer.GetAddr()) tsoClient, err := grpcPDClient.Tso(ctx) - c.Assert(err, IsNil) + re.NoError(err) defer tsoClient.CloseSend() - err = tsoClient.Send(req) - c.Assert(err, IsNil) + re.NoError(tsoClient.Send(req)) resp, err := tsoClient.Recv() - c.Assert(err, IsNil) - c.Assert(checkAndReturnTimestampResponse(c, req, resp), NotNil) - failpoint.Disable("github.com/tikv/pd/server/tso/delaySyncTimestamp") + re.NoError(err) + re.NotNil(checkAndReturnTimestampResponse(re, req, resp)) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/tso/delaySyncTimestamp")) } diff --git a/tests/server/tso/manager_test.go b/tests/server/tso/manager_test.go index 26fa07cc1d5..5ea8bc4be92 100644 --- a/tests/server/tso/manager_test.go +++ b/tests/server/tso/manager_test.go @@ -20,10 +20,11 @@ package tso_test import ( "context" "strconv" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/config" @@ -32,24 +33,12 @@ import ( "go.etcd.io/etcd/clientv3" ) -var _ = Suite(&testManagerSuite{}) - -type testManagerSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testManagerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testManagerSuite) TearDownSuite(c *C) { - s.cancel() -} - // TestClusterDCLocations will write different dc-locations to each server // and test whether we can get the whole dc-location config from each server. -func (s *testManagerSuite) TestClusterDCLocations(c *C) { +func TestClusterDCLocations(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() testCase := struct { dcLocationNumber int dcLocationConfig map[string]string @@ -65,17 +54,15 @@ func (s *testManagerSuite) TestClusterDCLocations(c *C) { }, } serverNumber := len(testCase.dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, serverNumber, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, serverNumber, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = testCase.dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) - - cluster.WaitAllLeaders(c, testCase.dcLocationConfig) + cluster.WaitAllLeaders(re, testCase.dcLocationConfig) serverNameMap := make(map[uint64]string) for _, server := range cluster.GetServers() { serverNameMap[server.GetServerID()] = server.GetServer().Name() @@ -86,21 +73,24 @@ func (s *testManagerSuite) TestClusterDCLocations(c *C) { for _, server := range cluster.GetServers() { obtainedServerNumber := 0 dcLocationMap := server.GetTSOAllocatorManager().GetClusterDCLocations() - c.Assert(err, IsNil) - c.Assert(dcLocationMap, HasLen, testCase.dcLocationNumber) + re.NoError(err) + re.Len(dcLocationMap, testCase.dcLocationNumber) for obtainedDCLocation, info := range dcLocationMap { obtainedServerNumber += len(info.ServerIDs) for _, serverID := range info.ServerIDs { expectedDCLocation, exist := testCase.dcLocationConfig[serverNameMap[serverID]] - c.Assert(exist, IsTrue) - c.Assert(obtainedDCLocation, Equals, expectedDCLocation) + re.True(exist) + re.Equal(expectedDCLocation, obtainedDCLocation) } } - c.Assert(obtainedServerNumber, Equals, serverNumber) + re.Equal(serverNumber, obtainedServerNumber) } } -func (s *testManagerSuite) TestLocalTSOSuffix(c *C) { +func TestLocalTSOSuffix(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() testCase := struct { dcLocations []string dcLocationConfig map[string]string @@ -116,44 +106,45 @@ func (s *testManagerSuite) TestLocalTSOSuffix(c *C) { }, } serverNumber := len(testCase.dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, serverNumber, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, serverNumber, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = testCase.dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, testCase.dcLocationConfig) + cluster.WaitAllLeaders(re, testCase.dcLocationConfig) tsoAllocatorManager := cluster.GetServer("pd1").GetTSOAllocatorManager() for _, dcLocation := range testCase.dcLocations { suffixResp, err := etcdutil.EtcdKVGet( cluster.GetEtcdClient(), tsoAllocatorManager.GetLocalTSOSuffixPath(dcLocation)) - c.Assert(err, IsNil) - c.Assert(suffixResp.Kvs, HasLen, 1) + re.NoError(err) + re.Len(suffixResp.Kvs, 1) // Test the increment of the suffix allSuffixResp, err := etcdutil.EtcdKVGet( cluster.GetEtcdClient(), tsoAllocatorManager.GetLocalTSOSuffixPathPrefix(), clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByValue, clientv3.SortAscend)) - c.Assert(err, IsNil) - c.Assert(len(allSuffixResp.Kvs), Equals, len(testCase.dcLocations)) + re.NoError(err) + re.Equal(len(testCase.dcLocations), len(allSuffixResp.Kvs)) var lastSuffixNum int64 for _, kv := range allSuffixResp.Kvs { suffixNum, err := strconv.ParseInt(string(kv.Value), 10, 64) - c.Assert(err, IsNil) - c.Assert(suffixNum, Greater, lastSuffixNum) + re.NoError(err) + re.Greater(suffixNum, lastSuffixNum) lastSuffixNum = suffixNum } } } -func (s *testManagerSuite) TestNextLeaderKey(c *C) { +func TestNextLeaderKey(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tso.PriorityCheck = 5 * time.Second defer func() { tso.PriorityCheck = 1 * time.Minute @@ -163,32 +154,31 @@ func (s *testManagerSuite) TestNextLeaderKey(c *C) { "pd2": "dc-1", } serverNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, serverNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, serverNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/injectNextLeaderKey", "return(true)"), IsNil) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/injectNextLeaderKey", "return(true)")) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader(tests.WithWaitInterval(5*time.Second), tests.WithRetryTimes(3)) // To speed up the test, we force to do the check cluster.CheckClusterDCLocation() originName := cluster.WaitAllocatorLeader("dc-1", tests.WithRetryTimes(5), tests.WithWaitInterval(5*time.Second)) - c.Assert(originName, Equals, "") - c.Assert(failpoint.Disable("github.com/tikv/pd/server/tso/injectNextLeaderKey"), IsNil) + re.Equal("", originName) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/tso/injectNextLeaderKey")) cluster.CheckClusterDCLocation() originName = cluster.WaitAllocatorLeader("dc-1") - c.Assert(originName, Not(Equals), "") + re.NotEqual("", originName) for name, server := range cluster.GetServers() { if name == originName { continue } err := server.GetTSOAllocatorManager().TransferAllocatorForDCLocation("dc-1", server.GetServer().GetMember().ID()) - c.Assert(err, IsNil) - testutil.WaitUntil(c, func() bool { + re.NoError(err) + testutil.Eventually(re, func() bool { cluster.CheckClusterDCLocation() currName := cluster.WaitAllocatorLeader("dc-1") return currName == name diff --git a/tests/server/tso/tso_test.go b/tests/server/tso/tso_test.go index 27bc53d5652..8cb6b6d837f 100644 --- a/tests/server/tso/tso_test.go +++ b/tests/server/tso/tso_test.go @@ -19,81 +19,66 @@ package tso_test import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/grpcutil" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" ) -var _ = Suite(&testTSOSuite{}) - -type testTSOSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testTSOSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testTSOSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *testTSOSuite) TestLoadTimestamp(c *C) { +func TestLoadTimestamp(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) - - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, dcLocationConfig) + cluster.WaitAllLeaders(re, dcLocationConfig) - lastTSMap := requestLocalTSOs(c, cluster, dcLocationConfig) + lastTSMap := requestLocalTSOs(re, cluster, dcLocationConfig) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/tso/systemTimeSlow", `return(true)`), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/tso/systemTimeSlow", `return(true)`)) // Reboot the cluster. - err = cluster.StopAll() - c.Assert(err, IsNil) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.StopAll()) + re.NoError(cluster.RunInitialServers()) - cluster.WaitAllLeaders(c, dcLocationConfig) + cluster.WaitAllLeaders(re, dcLocationConfig) // Re-request the Local TSOs. - newTSMap := requestLocalTSOs(c, cluster, dcLocationConfig) + newTSMap := requestLocalTSOs(re, cluster, dcLocationConfig) for dcLocation, newTS := range newTSMap { lastTS, ok := lastTSMap[dcLocation] - c.Assert(ok, IsTrue) + re.True(ok) // The new physical time of TSO should be larger even if the system time is slow. - c.Assert(newTS.GetPhysical()-lastTS.GetPhysical(), Greater, int64(0)) + re.Greater(newTS.GetPhysical()-lastTS.GetPhysical(), int64(0)) } failpoint.Disable("github.com/tikv/pd/server/tso/systemTimeSlow") } -func requestLocalTSOs(c *C, cluster *tests.TestCluster, dcLocationConfig map[string]string) map[string]*pdpb.Timestamp { +func requestLocalTSOs(re *require.Assertions, cluster *tests.TestCluster, dcLocationConfig map[string]string) map[string]*pdpb.Timestamp { dcClientMap := make(map[string]pdpb.PDClient) tsMap := make(map[string]*pdpb.Timestamp) leaderServer := cluster.GetServer(cluster.GetLeader()) for _, dcLocation := range dcLocationConfig { pdName := leaderServer.GetAllocatorLeader(dcLocation).GetName() - dcClientMap[dcLocation] = testutil.MustNewGrpcClient(c, cluster.GetServer(pdName).GetAddr()) + dcClientMap[dcLocation] = testutil.MustNewGrpcClientWithTestify(re, cluster.GetServer(pdName).GetAddr()) } for _, dcLocation := range dcLocationConfig { req := &pdpb.TsoRequest{ @@ -103,7 +88,7 @@ func requestLocalTSOs(c *C, cluster *tests.TestCluster, dcLocationConfig map[str } ctx, cancel := context.WithCancel(context.Background()) ctx = grpcutil.BuildForwardContext(ctx, cluster.GetServer(leaderServer.GetAllocatorLeader(dcLocation).GetName()).GetAddr()) - tsMap[dcLocation] = testGetTimestamp(c, ctx, dcClientMap[dcLocation], req) + tsMap[dcLocation] = testGetTimestamp(re, ctx, dcClientMap[dcLocation], req) cancel() } return tsMap From e0f5b49af3c44417e0fcfee02dd1a3e48f17bb8f Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Tue, 21 Jun 2022 12:02:37 +0800 Subject: [PATCH 31/35] operator: migrate test framework to testify (#5191) ref tikv/pd#4813 Signed-off-by: lhy1024 Co-authored-by: Ti Chi Robot --- server/schedule/operator/builder_test.go | 237 +++++++++--------- server/schedule/operator/status_test.go | 17 +- .../schedule/operator/status_tracker_test.go | 122 ++++----- server/schedule/operator/step_test.go | 130 +++++----- 4 files changed, 258 insertions(+), 248 deletions(-) diff --git a/server/schedule/operator/builder_test.go b/server/schedule/operator/builder_test.go index 22fac017ec0..ed8e6b88ffe 100644 --- a/server/schedule/operator/builder_test.go +++ b/server/schedule/operator/builder_test.go @@ -16,132 +16,137 @@ package operator import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testBuilderSuite{}) +type operatorBuilderTestSuite struct { + suite.Suite -type testBuilderSuite struct { cluster *mockcluster.Cluster ctx context.Context cancel context.CancelFunc } -func (s *testBuilderSuite) SetUpTest(c *C) { +func TestOperatorBuilderTestSuite(t *testing.T) { + suite.Run(t, new(operatorBuilderTestSuite)) +} + +func (suite *operatorBuilderTestSuite) SetupTest() { opts := config.NewTestOptions() - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.cluster = mockcluster.NewCluster(s.ctx, opts) - s.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, opts) + suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ config.RejectLeader: {{Key: "noleader", Value: "true"}}, }) - s.cluster.SetLocationLabels([]string{"zone", "host"}) - s.cluster.AddLabelsStore(1, 0, map[string]string{"zone": "z1", "host": "h1"}) - s.cluster.AddLabelsStore(2, 0, map[string]string{"zone": "z1", "host": "h1"}) - s.cluster.AddLabelsStore(3, 0, map[string]string{"zone": "z1", "host": "h1"}) - s.cluster.AddLabelsStore(4, 0, map[string]string{"zone": "z1", "host": "h1"}) - s.cluster.AddLabelsStore(5, 0, map[string]string{"zone": "z1", "host": "h1"}) - s.cluster.AddLabelsStore(6, 0, map[string]string{"zone": "z1", "host": "h2"}) - s.cluster.AddLabelsStore(7, 0, map[string]string{"zone": "z1", "host": "h2"}) - s.cluster.AddLabelsStore(8, 0, map[string]string{"zone": "z2", "host": "h1"}) - s.cluster.AddLabelsStore(9, 0, map[string]string{"zone": "z2", "host": "h2"}) - s.cluster.AddLabelsStore(10, 0, map[string]string{"zone": "z3", "host": "h1", "noleader": "true"}) + suite.cluster.SetLocationLabels([]string{"zone", "host"}) + suite.cluster.AddLabelsStore(1, 0, map[string]string{"zone": "z1", "host": "h1"}) + suite.cluster.AddLabelsStore(2, 0, map[string]string{"zone": "z1", "host": "h1"}) + suite.cluster.AddLabelsStore(3, 0, map[string]string{"zone": "z1", "host": "h1"}) + suite.cluster.AddLabelsStore(4, 0, map[string]string{"zone": "z1", "host": "h1"}) + suite.cluster.AddLabelsStore(5, 0, map[string]string{"zone": "z1", "host": "h1"}) + suite.cluster.AddLabelsStore(6, 0, map[string]string{"zone": "z1", "host": "h2"}) + suite.cluster.AddLabelsStore(7, 0, map[string]string{"zone": "z1", "host": "h2"}) + suite.cluster.AddLabelsStore(8, 0, map[string]string{"zone": "z2", "host": "h1"}) + suite.cluster.AddLabelsStore(9, 0, map[string]string{"zone": "z2", "host": "h2"}) + suite.cluster.AddLabelsStore(10, 0, map[string]string{"zone": "z3", "host": "h1", "noleader": "true"}) } -func (s *testBuilderSuite) TearDownTest(c *C) { - s.cancel() +func (suite *operatorBuilderTestSuite) TearDownTest() { + suite.cancel() } -func (s *testBuilderSuite) TestNewBuilder(c *C) { +func (suite *operatorBuilderTestSuite) TestNewBuilder() { peers := []*metapb.Peer{{Id: 11, StoreId: 1}, {Id: 12, StoreId: 2, Role: metapb.PeerRole_Learner}} region := core.NewRegionInfo(&metapb.Region{Id: 42, Peers: peers}, peers[0]) - builder := NewBuilder("test", s.cluster, region) - c.Assert(builder.err, IsNil) - c.Assert(builder.originPeers, HasLen, 2) - c.Assert(builder.originPeers[1], DeepEquals, peers[0]) - c.Assert(builder.originPeers[2], DeepEquals, peers[1]) - c.Assert(builder.originLeaderStoreID, Equals, uint64(1)) - c.Assert(builder.targetPeers, HasLen, 2) - c.Assert(builder.targetPeers[1], DeepEquals, peers[0]) - c.Assert(builder.targetPeers[2], DeepEquals, peers[1]) + builder := NewBuilder("test", suite.cluster, region) + suite.NoError(builder.err) + suite.Len(builder.originPeers, 2) + suite.Equal(peers[0], builder.originPeers[1]) + suite.Equal(peers[1], builder.originPeers[2]) + suite.Equal(uint64(1), builder.originLeaderStoreID) + suite.Len(builder.targetPeers, 2) + suite.Equal(peers[0], builder.targetPeers[1]) + suite.Equal(peers[1], builder.targetPeers[2]) region = region.Clone(core.WithLeader(nil)) - builder = NewBuilder("test", s.cluster, region) - c.Assert(builder.err, NotNil) + builder = NewBuilder("test", suite.cluster, region) + suite.Error(builder.err) } -func (s *testBuilderSuite) newBuilder() *Builder { +func (suite *operatorBuilderTestSuite) newBuilder() *Builder { peers := []*metapb.Peer{ {Id: 11, StoreId: 1}, {Id: 12, StoreId: 2}, {Id: 13, StoreId: 3, Role: metapb.PeerRole_Learner}, } region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: peers}, peers[0]) - return NewBuilder("test", s.cluster, region) + return NewBuilder("test", suite.cluster, region) } -func (s *testBuilderSuite) TestRecord(c *C) { - c.Assert(s.newBuilder().AddPeer(&metapb.Peer{StoreId: 1}).err, NotNil) - c.Assert(s.newBuilder().AddPeer(&metapb.Peer{StoreId: 4}).err, IsNil) - c.Assert(s.newBuilder().PromoteLearner(1).err, NotNil) - c.Assert(s.newBuilder().PromoteLearner(3).err, IsNil) - c.Assert(s.newBuilder().SetLeader(1).SetLeader(2).err, IsNil) - c.Assert(s.newBuilder().SetLeader(3).err, NotNil) - c.Assert(s.newBuilder().RemovePeer(4).err, NotNil) - c.Assert(s.newBuilder().AddPeer(&metapb.Peer{StoreId: 4, Role: metapb.PeerRole_Learner}).RemovePeer(4).err, IsNil) - c.Assert(s.newBuilder().SetLeader(2).RemovePeer(2).err, NotNil) - c.Assert(s.newBuilder().PromoteLearner(4).err, NotNil) - c.Assert(s.newBuilder().SetLeader(4).err, NotNil) - c.Assert(s.newBuilder().SetPeers(map[uint64]*metapb.Peer{2: {Id: 2}}).err, NotNil) +func (suite *operatorBuilderTestSuite) TestRecord() { + suite.Error(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 1}).err) + suite.NoError(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 4}).err) + suite.Error(suite.newBuilder().PromoteLearner(1).err) + suite.NoError(suite.newBuilder().PromoteLearner(3).err) + suite.NoError(suite.newBuilder().SetLeader(1).SetLeader(2).err) + suite.Error(suite.newBuilder().SetLeader(3).err) + suite.Error(suite.newBuilder().RemovePeer(4).err) + suite.NoError(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 4, Role: metapb.PeerRole_Learner}).RemovePeer(4).err) + suite.Error(suite.newBuilder().SetLeader(2).RemovePeer(2).err) + suite.Error(suite.newBuilder().PromoteLearner(4).err) + suite.Error(suite.newBuilder().SetLeader(4).err) + suite.Error(suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{2: {Id: 2}}).err) m := map[uint64]*metapb.Peer{ 2: {StoreId: 2}, 3: {StoreId: 3, Role: metapb.PeerRole_Learner}, 4: {StoreId: 4}, } - builder := s.newBuilder().SetPeers(m).EnableLightWeight() - c.Assert(builder.targetPeers, HasLen, 3) - c.Assert(builder.targetPeers[2], DeepEquals, m[2]) - c.Assert(builder.targetPeers[3], DeepEquals, m[3]) - c.Assert(builder.targetPeers[4], DeepEquals, m[4]) - c.Assert(builder.targetLeaderStoreID, Equals, uint64(0)) - c.Assert(builder.lightWeight, IsTrue) + builder := suite.newBuilder().SetPeers(m).EnableLightWeight() + suite.Len(builder.targetPeers, 3) + suite.Equal(m[2], builder.targetPeers[2]) + suite.Equal(m[3], builder.targetPeers[3]) + suite.Equal(m[4], builder.targetPeers[4]) + suite.Equal(uint64(0), builder.targetLeaderStoreID) + suite.True(builder.lightWeight) } -func (s *testBuilderSuite) TestPrepareBuild(c *C) { +func (suite *operatorBuilderTestSuite) TestPrepareBuild() { // no voter. - _, err := s.newBuilder().SetPeers(map[uint64]*metapb.Peer{4: {StoreId: 4, Role: metapb.PeerRole_Learner}}).prepareBuild() - c.Assert(err, NotNil) + _, err := suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{4: {StoreId: 4, Role: metapb.PeerRole_Learner}}).prepareBuild() + suite.Error(err) // use joint consensus - builder := s.newBuilder().SetPeers(map[uint64]*metapb.Peer{ + builder := suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{ 1: {StoreId: 1, Role: metapb.PeerRole_Learner}, 3: {StoreId: 3}, 4: {StoreId: 4, Id: 14}, 5: {StoreId: 5, Role: metapb.PeerRole_Learner}, }) _, err = builder.prepareBuild() - c.Assert(err, IsNil) - c.Assert(builder.toAdd, HasLen, 2) - c.Assert(builder.toAdd[4].GetRole(), Not(Equals), metapb.PeerRole_Learner) - c.Assert(builder.toAdd[4].GetId(), Equals, uint64(14)) - c.Assert(builder.toAdd[5].GetRole(), Equals, metapb.PeerRole_Learner) - c.Assert(builder.toAdd[5].GetId(), Not(Equals), uint64(0)) - c.Assert(builder.toRemove, HasLen, 1) - c.Assert(builder.toRemove[2], NotNil) - c.Assert(builder.toPromote, HasLen, 1) - c.Assert(builder.toPromote[3], NotNil) - c.Assert(builder.toDemote, HasLen, 1) - c.Assert(builder.toDemote[1], NotNil) - c.Assert(builder.currentLeaderStoreID, Equals, uint64(1)) + suite.NoError(err) + suite.Len(builder.toAdd, 2) + suite.NotEqual(metapb.PeerRole_Learner, builder.toAdd[4].GetRole()) + suite.Equal(uint64(14), builder.toAdd[4].GetId()) + suite.Equal(metapb.PeerRole_Learner, builder.toAdd[5].GetRole()) + suite.NotEqual(uint64(0), builder.toAdd[5].GetId()) + suite.Len(builder.toRemove, 1) + suite.NotNil(builder.toRemove[2]) + suite.Len(builder.toPromote, 1) + suite.NotNil(builder.toPromote[3]) + suite.Len(builder.toDemote, 1) + suite.NotNil(builder.toDemote[1]) + suite.Equal(uint64(1), builder.currentLeaderStoreID) // do not use joint consensus - builder = s.newBuilder().SetPeers(map[uint64]*metapb.Peer{ + builder = suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{ 1: {StoreId: 1, Role: metapb.PeerRole_Learner}, 2: {StoreId: 2}, 3: {StoreId: 3}, @@ -150,22 +155,22 @@ func (s *testBuilderSuite) TestPrepareBuild(c *C) { }) builder.useJointConsensus = false _, err = builder.prepareBuild() - c.Assert(err, IsNil) - c.Assert(builder.toAdd, HasLen, 3) - c.Assert(builder.toAdd[1].GetRole(), Equals, metapb.PeerRole_Learner) - c.Assert(builder.toAdd[1].GetId(), Not(Equals), uint64(0)) - c.Assert(builder.toAdd[4].GetRole(), Not(Equals), metapb.PeerRole_Learner) - c.Assert(builder.toAdd[4].GetId(), Equals, uint64(14)) - c.Assert(builder.toAdd[5].GetRole(), Equals, metapb.PeerRole_Learner) - c.Assert(builder.toAdd[5].GetId(), Not(Equals), uint64(0)) - c.Assert(builder.toRemove, HasLen, 1) - c.Assert(builder.toRemove[1], NotNil) - c.Assert(builder.toPromote, HasLen, 1) - c.Assert(builder.toPromote[3], NotNil) - c.Assert(builder.currentLeaderStoreID, Equals, uint64(1)) + suite.NoError(err) + suite.Len(builder.toAdd, 3) + suite.Equal(metapb.PeerRole_Learner, builder.toAdd[1].GetRole()) + suite.NotEqual(uint64(0), builder.toAdd[1].GetId()) + suite.NotEqual(metapb.PeerRole_Learner, builder.toAdd[4].GetRole()) + suite.Equal(uint64(14), builder.toAdd[4].GetId()) + suite.Equal(metapb.PeerRole_Learner, builder.toAdd[5].GetRole()) + suite.NotEqual(uint64(0), builder.toAdd[5].GetId()) + suite.Len(builder.toRemove, 1) + suite.NotNil(builder.toRemove[1]) + suite.Len(builder.toPromote, 1) + suite.NotNil(builder.toPromote[3]) + suite.Equal(uint64(1), builder.currentLeaderStoreID) } -func (s *testBuilderSuite) TestBuild(c *C) { +func (suite *operatorBuilderTestSuite) TestBuild() { type testCase struct { name string useJointConsensus bool @@ -530,9 +535,9 @@ func (s *testBuilderSuite) TestBuild(c *C) { } for _, tc := range cases { - c.Log(tc.name) + suite.T().Log(tc.name) region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: tc.originPeers}, tc.originPeers[0]) - builder := NewBuilder("test", s.cluster, region) + builder := NewBuilder("test", suite.cluster, region) builder.useJointConsensus = tc.useJointConsensus m := make(map[uint64]*metapb.Peer) for _, p := range tc.targetPeers { @@ -541,71 +546,69 @@ func (s *testBuilderSuite) TestBuild(c *C) { builder.SetPeers(m).SetLeader(tc.targetPeers[0].GetStoreId()) op, err := builder.Build(0) if len(tc.steps) == 0 { - c.Assert(err, NotNil) + suite.Error(err) continue } - c.Assert(err, IsNil) - c.Assert(op.Kind(), Equals, tc.kind) - c.Assert(op.Len(), Equals, len(tc.steps)) + suite.NoError(err) + suite.Equal(tc.kind, op.Kind()) + suite.Len(tc.steps, op.Len()) for i := 0; i < op.Len(); i++ { switch step := op.Step(i).(type) { case TransferLeader: - c.Assert(step.FromStore, Equals, tc.steps[i].(TransferLeader).FromStore) - c.Assert(step.ToStore, Equals, tc.steps[i].(TransferLeader).ToStore) + suite.Equal(tc.steps[i].(TransferLeader).FromStore, step.FromStore) + suite.Equal(tc.steps[i].(TransferLeader).ToStore, step.ToStore) case AddPeer: - c.Assert(step.ToStore, Equals, tc.steps[i].(AddPeer).ToStore) + suite.Equal(tc.steps[i].(AddPeer).ToStore, step.ToStore) case RemovePeer: - c.Assert(step.FromStore, Equals, tc.steps[i].(RemovePeer).FromStore) + suite.Equal(tc.steps[i].(RemovePeer).FromStore, step.FromStore) case AddLearner: - c.Assert(step.ToStore, Equals, tc.steps[i].(AddLearner).ToStore) + suite.Equal(tc.steps[i].(AddLearner).ToStore, step.ToStore) case PromoteLearner: - c.Assert(step.ToStore, Equals, tc.steps[i].(PromoteLearner).ToStore) + suite.Equal(tc.steps[i].(PromoteLearner).ToStore, step.ToStore) case ChangePeerV2Enter: - c.Assert(len(step.PromoteLearners), Equals, len(tc.steps[i].(ChangePeerV2Enter).PromoteLearners)) - c.Assert(len(step.DemoteVoters), Equals, len(tc.steps[i].(ChangePeerV2Enter).DemoteVoters)) + suite.Len(tc.steps[i].(ChangePeerV2Enter).PromoteLearners, len(step.PromoteLearners)) + suite.Len(tc.steps[i].(ChangePeerV2Enter).DemoteVoters, len(step.DemoteVoters)) for j, p := range tc.steps[i].(ChangePeerV2Enter).PromoteLearners { - c.Assert(step.PromoteLearners[j].ToStore, Equals, p.ToStore) + suite.Equal(p.ToStore, step.PromoteLearners[j].ToStore) } for j, d := range tc.steps[i].(ChangePeerV2Enter).DemoteVoters { - c.Assert(step.DemoteVoters[j].ToStore, Equals, d.ToStore) + suite.Equal(d.ToStore, step.DemoteVoters[j].ToStore) } case ChangePeerV2Leave: - c.Assert(len(step.PromoteLearners), Equals, len(tc.steps[i].(ChangePeerV2Leave).PromoteLearners)) - c.Assert(len(step.DemoteVoters), Equals, len(tc.steps[i].(ChangePeerV2Leave).DemoteVoters)) + suite.Len(tc.steps[i].(ChangePeerV2Leave).PromoteLearners, len(step.PromoteLearners)) + suite.Len(tc.steps[i].(ChangePeerV2Leave).DemoteVoters, len(step.DemoteVoters)) for j, p := range tc.steps[i].(ChangePeerV2Leave).PromoteLearners { - c.Assert(step.PromoteLearners[j].ToStore, Equals, p.ToStore) + suite.Equal(p.ToStore, step.PromoteLearners[j].ToStore) } for j, d := range tc.steps[i].(ChangePeerV2Leave).DemoteVoters { - c.Assert(step.DemoteVoters[j].ToStore, Equals, d.ToStore) + suite.Equal(d.ToStore, step.DemoteVoters[j].ToStore) } } } } } -// Test for not set unhealthy peer as target for promote learner and transfer leader -func (s *testBuilderSuite) TestTargetUnhealthyPeer(c *C) { +func (suite *operatorBuilderTestSuite) TestTargetUnhealthyPeer() { p := &metapb.Peer{Id: 2, StoreId: 2, Role: metapb.PeerRole_Learner} region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithPendingPeers([]*metapb.Peer{p})) - builder := NewBuilder("test", s.cluster, region) + builder := NewBuilder("test", suite.cluster, region) builder.PromoteLearner(2) - c.Assert(builder.err, NotNil) + suite.Error(builder.err) region = core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithDownPeers([]*pdpb.PeerStats{{Peer: p}})) - builder = NewBuilder("test", s.cluster, region) + builder = NewBuilder("test", suite.cluster, region) builder.PromoteLearner(2) - c.Assert(builder.err, NotNil) - + suite.Error(builder.err) p = &metapb.Peer{Id: 2, StoreId: 2, Role: metapb.PeerRole_Voter} region = core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithPendingPeers([]*metapb.Peer{p})) - builder = NewBuilder("test", s.cluster, region) + builder = NewBuilder("test", suite.cluster, region) builder.SetLeader(2) - c.Assert(builder.err, NotNil) + suite.Error(builder.err) region = core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithDownPeers([]*pdpb.PeerStats{{Peer: p}})) - builder = NewBuilder("test", s.cluster, region) + builder = NewBuilder("test", suite.cluster, region) builder.SetLeader(2) - c.Assert(builder.err, NotNil) + suite.Error(builder.err) } diff --git a/server/schedule/operator/status_test.go b/server/schedule/operator/status_test.go index 42502e1e096..6bdf2710657 100644 --- a/server/schedule/operator/status_test.go +++ b/server/schedule/operator/status_test.go @@ -15,21 +15,20 @@ package operator import ( - . "github.com/pingcap/check" -) - -var _ = Suite(&testOpStatusSuite{}) + "testing" -type testOpStatusSuite struct{} + "github.com/stretchr/testify/require" +) -func (s *testOpStatusSuite) TestIsEndStatus(c *C) { +func TestIsEndStatus(t *testing.T) { + re := require.New(t) for st := OpStatus(0); st < firstEndStatus; st++ { - c.Assert(IsEndStatus(st), IsFalse) + re.False(IsEndStatus(st)) } for st := firstEndStatus; st < statusCount; st++ { - c.Assert(IsEndStatus(st), IsTrue) + re.True(IsEndStatus(st)) } for st := statusCount; st < statusCount+100; st++ { - c.Assert(IsEndStatus(st), IsFalse) + re.False(IsEndStatus(st)) } } diff --git a/server/schedule/operator/status_tracker_test.go b/server/schedule/operator/status_tracker_test.go index 8ada8b386f2..d4441b0e7b6 100644 --- a/server/schedule/operator/status_tracker_test.go +++ b/server/schedule/operator/status_tracker_test.go @@ -15,64 +15,64 @@ package operator import ( + "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testOpStatusTrackerSuite{}) - -type testOpStatusTrackerSuite struct{} - -func (s *testOpStatusTrackerSuite) TestCreate(c *C) { +func TestCreate(t *testing.T) { + re := require.New(t) before := time.Now() trk := NewOpStatusTracker() - c.Assert(trk.Status(), Equals, CREATED) - c.Assert(trk.ReachTime(), DeepEquals, trk.ReachTimeOf(CREATED)) - checkTimeOrder(c, before, trk.ReachTime(), time.Now()) - checkReachTime(c, &trk, CREATED) + re.Equal(CREATED, trk.Status()) + re.Equal(trk.ReachTimeOf(CREATED), trk.ReachTime()) + checkTimeOrder(re, before, trk.ReachTime(), time.Now()) + checkReachTime(re, &trk, CREATED) } -func (s *testOpStatusTrackerSuite) TestNonEndTrans(c *C) { +func TestNonEndTrans(t *testing.T) { + re := require.New(t) { trk := NewOpStatusTracker() - checkInvalidTrans(c, &trk, SUCCESS, REPLACED, TIMEOUT) - checkValidTrans(c, &trk, STARTED) - checkInvalidTrans(c, &trk, EXPIRED) - checkValidTrans(c, &trk, SUCCESS) - checkReachTime(c, &trk, CREATED, STARTED, SUCCESS) + checkInvalidTrans(re, &trk, SUCCESS, REPLACED, TIMEOUT) + checkValidTrans(re, &trk, STARTED) + checkInvalidTrans(re, &trk, EXPIRED) + checkValidTrans(re, &trk, SUCCESS) + checkReachTime(re, &trk, CREATED, STARTED, SUCCESS) } { trk := NewOpStatusTracker() - checkValidTrans(c, &trk, CANCELED) - checkReachTime(c, &trk, CREATED, CANCELED) + checkValidTrans(re, &trk, CANCELED) + checkReachTime(re, &trk, CREATED, CANCELED) } { trk := NewOpStatusTracker() - checkValidTrans(c, &trk, STARTED) - checkValidTrans(c, &trk, CANCELED) - checkReachTime(c, &trk, CREATED, STARTED, CANCELED) + checkValidTrans(re, &trk, STARTED) + checkValidTrans(re, &trk, CANCELED) + checkReachTime(re, &trk, CREATED, STARTED, CANCELED) } { trk := NewOpStatusTracker() - checkValidTrans(c, &trk, STARTED) - checkValidTrans(c, &trk, REPLACED) - checkReachTime(c, &trk, CREATED, STARTED, REPLACED) + checkValidTrans(re, &trk, STARTED) + checkValidTrans(re, &trk, REPLACED) + checkReachTime(re, &trk, CREATED, STARTED, REPLACED) } { trk := NewOpStatusTracker() - checkValidTrans(c, &trk, EXPIRED) - checkReachTime(c, &trk, CREATED, EXPIRED) + checkValidTrans(re, &trk, EXPIRED) + checkReachTime(re, &trk, CREATED, EXPIRED) } { trk := NewOpStatusTracker() - checkValidTrans(c, &trk, STARTED) - checkValidTrans(c, &trk, TIMEOUT) - checkReachTime(c, &trk, CREATED, STARTED, TIMEOUT) + checkValidTrans(re, &trk, STARTED) + checkValidTrans(re, &trk, TIMEOUT) + checkReachTime(re, &trk, CREATED, STARTED, TIMEOUT) } } -func (s *testOpStatusTrackerSuite) TestEndStatusTrans(c *C) { +func TestEndStatusTrans(t *testing.T) { + re := require.New(t) allStatus := make([]OpStatus, 0, statusCount) for st := OpStatus(0); st < statusCount; st++ { allStatus = append(allStatus, st) @@ -80,41 +80,43 @@ func (s *testOpStatusTrackerSuite) TestEndStatusTrans(c *C) { for from := firstEndStatus; from < statusCount; from++ { trk := NewOpStatusTracker() trk.current = from - c.Assert(trk.IsEnd(), IsTrue) - checkInvalidTrans(c, &trk, allStatus...) + re.True(trk.IsEnd()) + checkInvalidTrans(re, &trk, allStatus...) } } -func (s *testOpStatusTrackerSuite) TestCheckExpired(c *C) { +func TestCheckExpired(t *testing.T) { + re := require.New(t) { // Not expired before := time.Now() trk := NewOpStatusTracker() after := time.Now() - c.Assert(trk.CheckExpired(10*time.Second), IsFalse) - c.Assert(trk.Status(), Equals, CREATED) - checkTimeOrder(c, before, trk.ReachTime(), after) + re.False(trk.CheckExpired(10 * time.Second)) + re.Equal(CREATED, trk.Status()) + checkTimeOrder(re, before, trk.ReachTime(), after) } { // Expired but status not changed trk := NewOpStatusTracker() trk.setTime(CREATED, time.Now().Add(-10*time.Second)) - c.Assert(trk.CheckExpired(5*time.Second), IsTrue) - c.Assert(trk.Status(), Equals, EXPIRED) + re.True(trk.CheckExpired(5 * time.Second)) + re.Equal(EXPIRED, trk.Status()) } { // Expired and status changed trk := NewOpStatusTracker() before := time.Now() - c.Assert(trk.To(EXPIRED), IsTrue) + re.True(trk.To(EXPIRED)) after := time.Now() - c.Assert(trk.CheckExpired(0), IsTrue) - c.Assert(trk.Status(), Equals, EXPIRED) - checkTimeOrder(c, before, trk.ReachTime(), after) + re.True(trk.CheckExpired(0)) + re.Equal(EXPIRED, trk.Status()) + checkTimeOrder(re, before, trk.ReachTime(), after) } } -func (s *testOpStatusTrackerSuite) TestCheckStepTimeout(c *C) { +func TestCheckStepTimeout(t *testing.T) { + re := require.New(t) testdata := []struct { step OpStep start time.Time @@ -133,45 +135,45 @@ func (s *testOpStatusTrackerSuite) TestCheckStepTimeout(c *C) { // Timeout and status changed trk := NewOpStatusTracker() trk.To(STARTED) - c.Assert(trk.CheckStepTimeout(v.start, v.step, 0), Equals, v.status == TIMEOUT) - c.Assert(trk.Status(), Equals, v.status) + re.Equal(v.status == TIMEOUT, trk.CheckStepTimeout(v.start, v.step, 0)) + re.Equal(v.status, trk.Status()) } } -func checkTimeOrder(c *C, t1, t2, t3 time.Time) { - c.Assert(t1.Before(t2), IsTrue) - c.Assert(t3.After(t2), IsTrue) +func checkTimeOrder(re *require.Assertions, t1, t2, t3 time.Time) { + re.True(t1.Before(t2)) + re.True(t3.After(t2)) } -func checkValidTrans(c *C, trk *OpStatusTracker, st OpStatus) { +func checkValidTrans(re *require.Assertions, trk *OpStatusTracker, st OpStatus) { before := time.Now() - c.Assert(trk.To(st), IsTrue) - c.Assert(trk.Status(), Equals, st) - c.Assert(trk.ReachTime(), DeepEquals, trk.ReachTimeOf(st)) - checkTimeOrder(c, before, trk.ReachTime(), time.Now()) + re.True(trk.To(st)) + re.Equal(st, trk.Status()) + re.Equal(trk.ReachTimeOf(st), trk.ReachTime()) + checkTimeOrder(re, before, trk.ReachTime(), time.Now()) } -func checkInvalidTrans(c *C, trk *OpStatusTracker, sts ...OpStatus) { +func checkInvalidTrans(re *require.Assertions, trk *OpStatusTracker, sts ...OpStatus) { origin := trk.Status() originTime := trk.ReachTime() sts = append(sts, statusCount, statusCount+1, statusCount+10) for _, st := range sts { - c.Assert(trk.To(st), IsFalse) - c.Assert(trk.Status(), Equals, origin) - c.Assert(trk.ReachTime(), DeepEquals, originTime) + re.False(trk.To(st)) + re.Equal(origin, trk.Status()) + re.Equal(originTime, trk.ReachTime()) } } -func checkReachTime(c *C, trk *OpStatusTracker, reached ...OpStatus) { +func checkReachTime(re *require.Assertions, trk *OpStatusTracker, reached ...OpStatus) { reachedMap := make(map[OpStatus]struct{}, len(reached)) for _, st := range reached { - c.Assert(trk.ReachTimeOf(st).IsZero(), IsFalse) + re.False(trk.ReachTimeOf(st).IsZero()) reachedMap[st] = struct{}{} } for st := OpStatus(0); st <= statusCount+10; st++ { if _, ok := reachedMap[st]; ok { continue } - c.Assert(trk.ReachTimeOf(st).IsZero(), IsTrue) + re.True(trk.ReachTimeOf(st).IsZero()) } } diff --git a/server/schedule/operator/step_test.go b/server/schedule/operator/step_test.go index f4bd9865b25..aa2f18c7220 100644 --- a/server/schedule/operator/step_test.go +++ b/server/schedule/operator/step_test.go @@ -16,38 +16,43 @@ package operator import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" ) -type testStepSuite struct { +type operatorStepTestSuite struct { + suite.Suite + cluster *mockcluster.Cluster } -var _ = Suite(&testStepSuite{}) +func TestOperatorStepTestSuite(t *testing.T) { + suite.Run(t, new(operatorStepTestSuite)) +} type testCase struct { - Peers []*metapb.Peer // first is leader - ConfVerChanged uint64 - IsFinish bool - CheckInProgres Checker + Peers []*metapb.Peer // first is leader + ConfVerChanged uint64 + IsFinish bool + CheckInProgress func(err error, msgAndArgs ...interface{}) bool } -func (s *testStepSuite) SetUpTest(c *C) { - s.cluster = mockcluster.NewCluster(context.Background(), config.NewTestOptions()) +func (suite *operatorStepTestSuite) SetupTest() { + suite.cluster = mockcluster.NewCluster(context.Background(), config.NewTestOptions()) for i := 1; i <= 10; i++ { - s.cluster.PutStoreWithLabels(uint64(i)) + suite.cluster.PutStoreWithLabels(uint64(i)) } - s.cluster.SetStoreDown(8) - s.cluster.SetStoreDown(9) - s.cluster.SetStoreDown(10) + suite.cluster.SetStoreDown(8) + suite.cluster.SetStoreDown(9) + suite.cluster.SetStoreDown(10) } -func (s *testStepSuite) TestTransferLeader(c *C) { +func (suite *operatorStepTestSuite) TestTransferLeader() { step := TransferLeader{FromStore: 1, ToStore: 2} cases := []testCase{ { @@ -58,7 +63,7 @@ func (s *testStepSuite) TestTransferLeader(c *C) { }, 0, false, - IsNil, + suite.NoError, }, { []*metapb.Peer{ @@ -68,7 +73,7 @@ func (s *testStepSuite) TestTransferLeader(c *C) { }, 0, true, - IsNil, + suite.NoError, }, { []*metapb.Peer{ @@ -78,10 +83,10 @@ func (s *testStepSuite) TestTransferLeader(c *C) { }, 0, false, - IsNil, + suite.NoError, }, } - s.check(c, step, "transfer leader from store 1 to store 2", cases) + suite.check(step, "transfer leader from store 1 to store 2", cases) step = TransferLeader{FromStore: 1, ToStore: 9} // 9 is down cases = []testCase{ @@ -93,13 +98,13 @@ func (s *testStepSuite) TestTransferLeader(c *C) { }, 0, false, - NotNil, + suite.Error, }, } - s.check(c, step, "transfer leader from store 1 to store 9", cases) + suite.check(step, "transfer leader from store 1 to store 9", cases) } -func (s *testStepSuite) TestAddPeer(c *C) { +func (suite *operatorStepTestSuite) TestAddPeer() { step := AddPeer{ToStore: 2, PeerID: 2} cases := []testCase{ { @@ -108,7 +113,7 @@ func (s *testStepSuite) TestAddPeer(c *C) { }, 0, false, - IsNil, + suite.NoError, }, { []*metapb.Peer{ @@ -117,10 +122,10 @@ func (s *testStepSuite) TestAddPeer(c *C) { }, 1, true, - IsNil, + suite.NoError, }, } - s.check(c, step, "add peer 2 on store 2", cases) + suite.check(step, "add peer 2 on store 2", cases) step = AddPeer{ToStore: 9, PeerID: 9} cases = []testCase{ @@ -130,13 +135,13 @@ func (s *testStepSuite) TestAddPeer(c *C) { }, 0, false, - NotNil, + suite.Error, }, } - s.check(c, step, "add peer 9 on store 9", cases) + suite.check(step, "add peer 9 on store 9", cases) } -func (s *testStepSuite) TestAddLearner(c *C) { +func (suite *operatorStepTestSuite) TestAddLearner() { step := AddLearner{ToStore: 2, PeerID: 2} cases := []testCase{ { @@ -145,7 +150,7 @@ func (s *testStepSuite) TestAddLearner(c *C) { }, 0, false, - IsNil, + suite.NoError, }, { []*metapb.Peer{ @@ -154,10 +159,10 @@ func (s *testStepSuite) TestAddLearner(c *C) { }, 1, true, - IsNil, + suite.NoError, }, } - s.check(c, step, "add learner peer 2 on store 2", cases) + suite.check(step, "add learner peer 2 on store 2", cases) step = AddLearner{ToStore: 9, PeerID: 9} cases = []testCase{ @@ -167,13 +172,13 @@ func (s *testStepSuite) TestAddLearner(c *C) { }, 0, false, - NotNil, + suite.Error, }, } - s.check(c, step, "add learner peer 9 on store 9", cases) + suite.check(step, "add learner peer 9 on store 9", cases) } -func (s *testStepSuite) TestChangePeerV2Enter(c *C) { +func (suite *operatorStepTestSuite) TestChangePeerV2Enter() { cpe := ChangePeerV2Enter{ PromoteLearners: []PromoteLearner{{PeerID: 3, ToStore: 3}, {PeerID: 4, ToStore: 4}}, DemoteVoters: []DemoteVoter{{PeerID: 1, ToStore: 1}, {PeerID: 2, ToStore: 2}}, @@ -188,7 +193,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - IsNil, + suite.NoError, }, { // after step []*metapb.Peer{ @@ -199,7 +204,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 4, true, - IsNil, + suite.NoError, }, { // miss peer id []*metapb.Peer{ @@ -210,7 +215,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // miss store id []*metapb.Peer{ @@ -221,7 +226,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // miss peer id []*metapb.Peer{ @@ -232,7 +237,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // change is not atomic []*metapb.Peer{ @@ -243,7 +248,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // change is not atomic []*metapb.Peer{ @@ -254,7 +259,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // there are other peers in the joint state []*metapb.Peer{ @@ -266,7 +271,7 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 4, true, - NotNil, + suite.Error, }, { // there are other peers in the joint state []*metapb.Peer{ @@ -279,16 +284,16 @@ func (s *testStepSuite) TestChangePeerV2Enter(c *C) { }, 0, false, - NotNil, + suite.Error, }, } desc := "use joint consensus, " + "promote learner peer 3 on store 3 to voter, promote learner peer 4 on store 4 to voter, " + "demote voter peer 1 on store 1 to learner, demote voter peer 2 on store 2 to learner" - s.check(c, cpe, desc, cases) + suite.check(cpe, desc, cases) } -func (s *testStepSuite) TestChangePeerV2Leave(c *C) { +func (suite *operatorStepTestSuite) TestChangePeerV2Leave() { cpl := ChangePeerV2Leave{ PromoteLearners: []PromoteLearner{{PeerID: 3, ToStore: 3}, {PeerID: 4, ToStore: 4}}, DemoteVoters: []DemoteVoter{{PeerID: 1, ToStore: 1}, {PeerID: 2, ToStore: 2}}, @@ -303,7 +308,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - IsNil, + suite.NoError, }, { // after step []*metapb.Peer{ @@ -314,7 +319,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 4, true, - IsNil, + suite.NoError, }, { // miss peer id []*metapb.Peer{ @@ -325,7 +330,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // miss store id []*metapb.Peer{ @@ -336,7 +341,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // miss peer id []*metapb.Peer{ @@ -347,7 +352,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // change is not atomic []*metapb.Peer{ @@ -358,7 +363,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // change is not atomic []*metapb.Peer{ @@ -369,7 +374,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // there are other peers in the joint state []*metapb.Peer{ @@ -381,7 +386,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, { // there are other peers in the joint state []*metapb.Peer{ @@ -394,7 +399,7 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 4, false, - NotNil, + suite.Error, }, { // demote leader []*metapb.Peer{ @@ -405,21 +410,22 @@ func (s *testStepSuite) TestChangePeerV2Leave(c *C) { }, 0, false, - NotNil, + suite.Error, }, } desc := "leave joint state, " + "promote learner peer 3 on store 3 to voter, promote learner peer 4 on store 4 to voter, " + "demote voter peer 1 on store 1 to learner, demote voter peer 2 on store 2 to learner" - s.check(c, cpl, desc, cases) + suite.check(cpl, desc, cases) } -func (s *testStepSuite) check(c *C, step OpStep, desc string, cases []testCase) { - c.Assert(step.String(), Equals, desc) - for _, tc := range cases { - region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: tc.Peers}, tc.Peers[0]) - c.Assert(step.ConfVerChanged(region), Equals, tc.ConfVerChanged) - c.Assert(step.IsFinish(region), Equals, tc.IsFinish) - c.Assert(step.CheckInProgress(s.cluster, region), tc.CheckInProgres) +func (suite *operatorStepTestSuite) check(step OpStep, desc string, cases []testCase) { + suite.Equal(desc, step.String()) + for _, testCase := range cases { + region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.Peers}, testCase.Peers[0]) + suite.Equal(testCase.ConfVerChanged, step.ConfVerChanged(region)) + suite.Equal(testCase.IsFinish, step.IsFinish(region)) + err := step.CheckInProgress(suite.cluster, region) + testCase.CheckInProgress(err) } } From 01b8f34a4034d4c36fc0fb56e0fc7892bb6ecfa9 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 21 Jun 2022 12:18:36 +0800 Subject: [PATCH 32/35] *: update the swagger dependency (#5183) close tikv/pd#5160 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- Makefile | 6 +- go.mod | 6 +- go.sum | 63 +++-- server/api/admin.go | 34 +-- server/api/checker.go | 34 +-- server/api/cluster.go | 22 +- server/api/config.go | 194 +++++++------- server/api/health.go | 14 +- server/api/hot_status.go | 46 ++-- server/api/label.go | 26 +- server/api/log.go | 20 +- server/api/member.go | 104 ++++---- server/api/min_resolved_ts.go | 12 +- server/api/operator.go | 80 +++--- server/api/plugin.go | 36 +-- server/api/pprof.go | 62 ++--- server/api/region.go | 390 ++++++++++++++--------------- server/api/region_label.go | 126 +++++----- server/api/replication_mode.go | 10 +- server/api/router.go | 18 +- server/api/rule.go | 294 +++++++++++----------- server/api/scheduler.go | 66 ++--- server/api/service_gc_safepoint.go | 28 +-- server/api/service_middleware.go | 44 ++-- server/api/stats.go | 14 +- server/api/status.go | 8 +- server/api/store.go | 218 ++++++++-------- server/api/trend.go | 16 +- server/api/tso.go | 22 +- server/api/unsafe_operation.go | 20 +- server/api/version.go | 8 +- tests/client/go.sum | 59 +++-- 32 files changed, 1078 insertions(+), 1022 deletions(-) diff --git a/Makefile b/Makefile index 2afd99c0734..7ac146e39eb 100644 --- a/Makefile +++ b/Makefile @@ -112,10 +112,8 @@ docker-image: #### Build utils ### swagger-spec: install-tools - go mod vendor - swag init --parseVendor --generalInfo server/api/router.go --exclude vendor/github.com/pingcap/tidb-dashboard --output docs/swagger - go mod tidy - rm -rf vendor + swag init --parseDependency --parseInternal --parseDepth 1 --dir server --generalInfo api/router.go --output docs/swagger + swag fmt --dir server dashboard-ui: ./scripts/embed-dashboard-ui.sh diff --git a/go.mod b/go.mod index 0a662f0f16b..6c2a3c4380b 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba - github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476 + github.com/swaggo/swag v1.8.3 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/tidwall/gjson v1.9.3 // indirect github.com/unrolled/render v1.0.1 @@ -50,9 +50,9 @@ require ( go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.uber.org/goleak v1.1.12 go.uber.org/zap v1.19.1 - golang.org/x/text v0.3.3 + golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.10 google.golang.org/grpc v1.26.0 gotest.tools/gotestsum v1.7.0 ) diff --git a/go.sum b/go.sum index ad3415e8116..54862125e1a 100644 --- a/go.sum +++ b/go.sum @@ -79,6 +79,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -136,20 +137,24 @@ github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= @@ -283,6 +288,8 @@ github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9q github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/joomcode/errorx v1.0.1 h1:CalpDWz14ZHd68fIqluJasJosAewpz2TFaJALrUxjrk= github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -298,13 +305,13 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -316,8 +323,9 @@ github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -365,6 +373,8 @@ github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFW github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= @@ -501,8 +511,9 @@ github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba h1:lUPlXKqgbqT github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= -github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476 h1:UjnSXdNPIG+5FJ6xLQODEdk7gSnJlMldu3sPAxxCO+4= github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc= +github.com/swaggo/swag v1.8.3 h1:3pZSSCQ//gAH88lfmxM3Cd1+JCsxV8Md6f36b9hrZ5s= +github.com/swaggo/swag v1.8.3/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/thoas/go-funk v0.8.0 h1:JP9tKSvnpFVclYgDM0Is7FD9M4fhPvqA0s0BsXmzSRQ= @@ -535,8 +546,9 @@ github.com/unrolled/render v1.0.1 h1:VDDnQQVfBMsOsp3VaCJszSO0nkBIVEYoPWeRThk9spY github.com/unrolled/render v1.0.1/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM= github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= @@ -549,6 +561,7 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -600,8 +613,9 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg= golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -615,8 +629,9 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -637,8 +652,12 @@ golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -677,16 +696,23 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= @@ -720,8 +746,9 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -748,8 +775,9 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -770,6 +798,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.0.6 h1:mA0XRPjIKi4bkE9nv+NKs6qj6QWOchqUSdWOcpd3x1E= diff --git a/server/api/admin.go b/server/api/admin.go index 2954874d7fd..1fa63c8ad9a 100644 --- a/server/api/admin.go +++ b/server/api/admin.go @@ -38,13 +38,13 @@ func newAdminHandler(svr *server.Server, rd *render.Render) *adminHandler { } } -// @Tags admin -// @Summary Drop a specific region from cache. -// @Param id path integer true "Region Id" -// @Produce json -// @Success 200 {string} string "The region is removed from server cache." -// @Failure 400 {string} string "The input is invalid." -// @Router /admin/cache/region/{id} [delete] +// @Tags admin +// @Summary Drop a specific region from cache. +// @Param id path integer true "Region Id" +// @Produce json +// @Success 200 {string} string "The region is removed from server cache." +// @Failure 400 {string} string "The input is invalid." +// @Router /admin/cache/region/{id} [delete] func (h *adminHandler) DeleteRegionCache(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -59,16 +59,16 @@ func (h *adminHandler) DeleteRegionCache(w http.ResponseWriter, r *http.Request) } // FIXME: details of input json body params -// @Tags admin -// @Summary Reset the ts. -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Reset ts successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 403 {string} string "Reset ts is forbidden." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /admin/reset-ts [post] +// @Tags admin +// @Summary Reset the ts. +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Reset ts successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 403 {string} string "Reset ts is forbidden." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /admin/reset-ts [post] func (h *adminHandler) ResetTS(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() var input map[string]interface{} diff --git a/server/api/checker.go b/server/api/checker.go index 9a01ad9c83f..a62cedcf74c 100644 --- a/server/api/checker.go +++ b/server/api/checker.go @@ -36,16 +36,16 @@ func newCheckerHandler(svr *server.Server, r *render.Render) *checkerHandler { } // FIXME: details of input json body params -// @Tags checker -// @Summary Pause or resume region merge. -// @Accept json -// @Param name path string true "The name of the checker." -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Pause or resume the scheduler successfully." -// @Failure 400 {string} string "Bad format request." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /checker/{name} [post] +// @Tags checker +// @Summary Pause or resume region merge. +// @Accept json +// @Param name path string true "The name of the checker." +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Pause or resume the scheduler successfully." +// @Failure 400 {string} string "Bad format request." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /checker/{name} [post] func (c *checkerHandler) PauseOrResumeChecker(w http.ResponseWriter, r *http.Request) { var input map[string]int if err := apiutil.ReadJSONRespondError(c.r, w, r.Body, &input); err != nil { @@ -74,13 +74,13 @@ func (c *checkerHandler) PauseOrResumeChecker(w http.ResponseWriter, r *http.Req } // FIXME: details of input json body params -// @Tags checker -// @Summary Get if checker is paused -// @Param name path string true "The name of the scheduler." -// @Produce json -// @Success 200 {string} string "Pause or resume the scheduler successfully." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /checker/{name} [get] +// @Tags checker +// @Summary Get if checker is paused +// @Param name path string true "The name of the scheduler." +// @Produce json +// @Success 200 {string} string "Pause or resume the scheduler successfully." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /checker/{name} [get] func (c *checkerHandler) GetCheckerStatus(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] isPaused, err := c.IsCheckerPaused(name) diff --git a/server/api/cluster.go b/server/api/cluster.go index f7ff6251353..fcf972d56a7 100644 --- a/server/api/cluster.go +++ b/server/api/cluster.go @@ -33,21 +33,21 @@ func newClusterHandler(svr *server.Server, rd *render.Render) *clusterHandler { } } -// @Tags cluster -// @Summary Get cluster info. -// @Produce json -// @Success 200 {object} metapb.Cluster -// @Router /cluster [get] +// @Tags cluster +// @Summary Get cluster info. +// @Produce json +// @Success 200 {object} metapb.Cluster +// @Router /cluster [get] func (h *clusterHandler) GetCluster(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetCluster()) } -// @Tags cluster -// @Summary Get cluster status. -// @Produce json -// @Success 200 {object} cluster.Status -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /cluster/status [get] +// @Tags cluster +// @Summary Get cluster status. +// @Produce json +// @Success 200 {object} cluster.Status +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /cluster/status [get] func (h *clusterHandler) GetClusterStatus(w http.ResponseWriter, r *http.Request) { status, err := h.svr.GetClusterStatus() if err != nil { diff --git a/server/api/config.go b/server/api/config.go index 7ed3a9de56c..61cd27cd595 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -48,23 +48,23 @@ func newConfHandler(svr *server.Server, rd *render.Render) *confHandler { } } -// @Tags config -// @Summary Get full config. -// @Produce json -// @Success 200 {object} config.Config -// @Router /config [get] +// @Tags config +// @Summary Get full config. +// @Produce json +// @Success 200 {object} config.Config +// @Router /config [get] func (h *confHandler) GetConfig(w http.ResponseWriter, r *http.Request) { cfg := h.svr.GetConfig() cfg.Schedule.MaxMergeRegionKeys = cfg.Schedule.GetMaxMergeRegionKeys() h.rd.JSON(w, http.StatusOK, cfg) } -// @Tags config -// @Summary Get default config. -// @Produce json -// @Success 200 {object} config.Config -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/default [get] +// @Tags config +// @Summary Get default config. +// @Produce json +// @Success 200 {object} config.Config +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/default [get] func (h *confHandler) GetDefaultConfig(w http.ResponseWriter, r *http.Request) { config := config.NewConfig() err := config.Adjust(nil, false) @@ -76,16 +76,16 @@ func (h *confHandler) GetDefaultConfig(w http.ResponseWriter, r *http.Request) { } // FIXME: details of input json body params -// @Tags config -// @Summary Update a config item. -// @Accept json -// @Param ttlSecond query integer false "ttl". ttl param is only for BR and lightning now. Don't use it. -// @Param body body object false "json params" -// @Produce json -// @Success 200 {string} string "The config is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config [post] +// @Tags config +// @Summary Update a config item. +// @Accept json +// @Param ttlSecond query integer false "ttl param is only for BR and lightning now. Don't use it." +// @Param body body object false "json params" +// @Produce json +// @Success 200 {string} string "The config is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config [post] func (h *confHandler) SetConfig(w http.ResponseWriter, r *http.Request) { cfg := h.svr.GetConfig() data, err := io.ReadAll(r.Body) @@ -272,27 +272,27 @@ func getConfigMap(cfg map[string]interface{}, key []string, value interface{}) m return cfg } -// @Tags config -// @Summary Get schedule config. -// @Produce json -// @Success 200 {object} config.ScheduleConfig -// @Router /config/schedule [get] +// @Tags config +// @Summary Get schedule config. +// @Produce json +// @Success 200 {object} config.ScheduleConfig +// @Router /config/schedule [get] func (h *confHandler) GetScheduleConfig(w http.ResponseWriter, r *http.Request) { cfg := h.svr.GetScheduleConfig() cfg.MaxMergeRegionKeys = cfg.GetMaxMergeRegionKeys() h.rd.JSON(w, http.StatusOK, cfg) } -// @Tags config -// @Summary Update a schedule config item. -// @Accept json -// @Param body body object string "json params" -// @Produce json -// @Success 200 {string} string "The config is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Failure 503 {string} string "PD server has no leader." -// @Router /config/schedule [post] +// @Tags config +// @Summary Update a schedule config item. +// @Accept json +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string "The config is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Failure 503 {string} string "PD server has no leader." +// @Router /config/schedule [post] func (h *confHandler) SetScheduleConfig(w http.ResponseWriter, r *http.Request) { data, err := io.ReadAll(r.Body) r.Body.Close() @@ -335,25 +335,25 @@ func (h *confHandler) SetScheduleConfig(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, "The config is updated.") } -// @Tags config -// @Summary Get replication config. -// @Produce json -// @Success 200 {object} config.ReplicationConfig -// @Router /config/replicate [get] +// @Tags config +// @Summary Get replication config. +// @Produce json +// @Success 200 {object} config.ReplicationConfig +// @Router /config/replicate [get] func (h *confHandler) GetReplicationConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetReplicationConfig()) } -// @Tags config -// @Summary Update a replication config item. -// @Accept json -// @Param body body object string "json params" -// @Produce json -// @Success 200 {string} string "The config is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Failure 503 {string} string "PD server has no leader." -// @Router /config/replicate [post] +// @Tags config +// @Summary Update a replication config item. +// @Accept json +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string "The config is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Failure 503 {string} string "PD server has no leader." +// @Router /config/replicate [post] func (h *confHandler) SetReplicationConfig(w http.ResponseWriter, r *http.Request) { config := h.svr.GetReplicationConfig() if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &config); err != nil { @@ -367,24 +367,24 @@ func (h *confHandler) SetReplicationConfig(w http.ResponseWriter, r *http.Reques h.rd.JSON(w, http.StatusOK, "The config is updated.") } -// @Tags config -// @Summary Get label property config. -// @Produce json -// @Success 200 {object} config.LabelPropertyConfig -// @Router /config/label-property [get] +// @Tags config +// @Summary Get label property config. +// @Produce json +// @Success 200 {object} config.LabelPropertyConfig +// @Router /config/label-property [get] func (h *confHandler) GetLabelPropertyConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetLabelProperty()) } -// @Tags config -// @Summary Update label property config item. -// @Accept json -// @Param body body object string "json params" -// @Produce json -// @Success 200 {string} string "The config is updated." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Failure 503 {string} string "PD server has no leader." -// @Router /config/label-property [post] +// @Tags config +// @Summary Update label property config item. +// @Accept json +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string "The config is updated." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Failure 503 {string} string "PD server has no leader." +// @Router /config/label-property [post] func (h *confHandler) SetLabelPropertyConfig(w http.ResponseWriter, r *http.Request) { input := make(map[string]string) if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { @@ -407,24 +407,24 @@ func (h *confHandler) SetLabelPropertyConfig(w http.ResponseWriter, r *http.Requ h.rd.JSON(w, http.StatusOK, "The config is updated.") } -// @Tags config -// @Summary Get cluster version. -// @Produce json -// @Success 200 {object} semver.Version -// @Router /config/cluster-version [get] +// @Tags config +// @Summary Get cluster version. +// @Produce json +// @Success 200 {object} semver.Version +// @Router /config/cluster-version [get] func (h *confHandler) GetClusterVersion(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetClusterVersion()) } -// @Tags config -// @Summary Update cluster version. -// @Accept json -// @Param body body object string "json params" -// @Produce json -// @Success 200 {string} string "The cluster version is updated." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Failure 503 {string} string "PD server has no leader." -// @Router /config/cluster-version [post] +// @Tags config +// @Summary Update cluster version. +// @Accept json +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string "The cluster version is updated." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Failure 503 {string} string "PD server has no leader." +// @Router /config/cluster-version [post] func (h *confHandler) SetClusterVersion(w http.ResponseWriter, r *http.Request) { input := make(map[string]string) if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { @@ -444,23 +444,23 @@ func (h *confHandler) SetClusterVersion(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, "The cluster version is updated.") } -// @Tags config -// @Summary Get replication mode config. -// @Produce json -// @Success 200 {object} config.ReplicationModeConfig -// @Router /config/replication-mode [get] +// @Tags config +// @Summary Get replication mode config. +// @Produce json +// @Success 200 {object} config.ReplicationModeConfig +// @Router /config/replication-mode [get] func (h *confHandler) GetReplicationModeConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetReplicationModeConfig()) } -// @Tags config -// @Summary Set replication mode config. -// @Accept json -// @Param body body object string "json params" -// @Produce json -// @Success 200 {string} string "The replication mode config is updated." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/replication-mode [post] +// @Tags config +// @Summary Set replication mode config. +// @Accept json +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string "The replication mode config is updated." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/replication-mode [post] func (h *confHandler) SetReplicationModeConfig(w http.ResponseWriter, r *http.Request) { config := h.svr.GetReplicationModeConfig() if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &config); err != nil { @@ -474,11 +474,11 @@ func (h *confHandler) SetReplicationModeConfig(w http.ResponseWriter, r *http.Re h.rd.JSON(w, http.StatusOK, "The replication mode config is updated.") } -// @Tags config -// @Summary Get PD server config. -// @Produce json -// @Success 200 {object} config.PDServerConfig -// @Router /config/pd-server [get] +// @Tags config +// @Summary Get PD server config. +// @Produce json +// @Success 200 {object} config.PDServerConfig +// @Router /config/pd-server [get] func (h *confHandler) GetPDServerConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetPDServerConfig()) } diff --git a/server/api/health.go b/server/api/health.go index 982a663e934..fbbc4a3672f 100644 --- a/server/api/health.go +++ b/server/api/health.go @@ -43,11 +43,11 @@ func newHealthHandler(svr *server.Server, rd *render.Render) *healthHandler { } } -// @Summary Health status of PD servers. -// @Produce json -// @Success 200 {array} Health -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /health [get] +// @Summary Health status of PD servers. +// @Produce json +// @Success 200 {array} Health +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /health [get] func (h *healthHandler) GetHealthStatus(w http.ResponseWriter, r *http.Request) { client := h.svr.GetClient() members, err := cluster.GetMembers(client) @@ -73,6 +73,6 @@ func (h *healthHandler) GetHealthStatus(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, healths) } -// @Summary Ping PD servers. -// @Router /ping [get] +// @Summary Ping PD servers. +// @Router /ping [get] func (h *healthHandler) Ping(w http.ResponseWriter, r *http.Request) {} diff --git a/server/api/hot_status.go b/server/api/hot_status.go index cbd537c7ac7..1b04638a94d 100644 --- a/server/api/hot_status.go +++ b/server/api/hot_status.go @@ -62,11 +62,11 @@ func newHotStatusHandler(handler *server.Handler, rd *render.Render) *hotStatusH } } -// @Tags hotspot -// @Summary List the hot write regions. -// @Produce json -// @Success 200 {object} statistics.StoreHotPeersInfos -// @Router /hotspot/regions/write [get] +// @Tags hotspot +// @Summary List the hot write regions. +// @Produce json +// @Success 200 {object} statistics.StoreHotPeersInfos +// @Router /hotspot/regions/write [get] func (h *hotStatusHandler) GetHotWriteRegions(w http.ResponseWriter, r *http.Request) { storeIDs := r.URL.Query()["store_id"] if len(storeIDs) < 1 { @@ -98,11 +98,11 @@ func (h *hotStatusHandler) GetHotWriteRegions(w http.ResponseWriter, r *http.Req h.rd.JSON(w, http.StatusOK, rc.GetHotWriteRegions(ids...)) } -// @Tags hotspot -// @Summary List the hot read regions. -// @Produce json -// @Success 200 {object} statistics.StoreHotPeersInfos -// @Router /hotspot/regions/read [get] +// @Tags hotspot +// @Summary List the hot read regions. +// @Produce json +// @Success 200 {object} statistics.StoreHotPeersInfos +// @Router /hotspot/regions/read [get] func (h *hotStatusHandler) GetHotReadRegions(w http.ResponseWriter, r *http.Request) { storeIDs := r.URL.Query()["store_id"] if len(storeIDs) < 1 { @@ -134,11 +134,11 @@ func (h *hotStatusHandler) GetHotReadRegions(w http.ResponseWriter, r *http.Requ h.rd.JSON(w, http.StatusOK, rc.GetHotReadRegions(ids...)) } -// @Tags hotspot -// @Summary List the hot stores. -// @Produce json -// @Success 200 {object} HotStoreStats -// @Router /hotspot/stores [get] +// @Tags hotspot +// @Summary List the hot stores. +// @Produce json +// @Success 200 {object} HotStoreStats +// @Router /hotspot/stores [get] func (h *hotStatusHandler) GetHotStores(w http.ResponseWriter, r *http.Request) { stats := HotStoreStats{ BytesWriteStats: make(map[uint64]float64), @@ -169,14 +169,14 @@ func (h *hotStatusHandler) GetHotStores(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, stats) } -// @Tags hotspot -// @Summary List the history hot regions. -// @Accept json -// @Produce json -// @Success 200 {object} storage.HistoryHotRegions -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /hotspot/regions/history [get] +// @Tags hotspot +// @Summary List the history hot regions. +// @Accept json +// @Produce json +// @Success 200 {object} storage.HistoryHotRegions +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /hotspot/regions/history [get] func (h *hotStatusHandler) GetHistoryHotRegions(w http.ResponseWriter, r *http.Request) { data, err := io.ReadAll(r.Body) r.Body.Close() diff --git a/server/api/label.go b/server/api/label.go index f9cbc49c882..abaad02a4e3 100644 --- a/server/api/label.go +++ b/server/api/label.go @@ -37,11 +37,11 @@ func newLabelsHandler(svr *server.Server, rd *render.Render) *labelsHandler { } } -// @Tags label -// @Summary List all label values. -// @Produce json -// @Success 200 {array} metapb.StoreLabel -// @Router /labels [get] +// @Tags label +// @Summary List all label values. +// @Produce json +// @Success 200 {array} metapb.StoreLabel +// @Router /labels [get] func (h *labelsHandler) GetLabels(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) var labels []*metapb.StoreLabel @@ -59,14 +59,14 @@ func (h *labelsHandler) GetLabels(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, labels) } -// @Tags label -// @Summary List stores that have specific label values. -// @Param name query string true "name of store label filter" -// @Param value query string true "value of store label filter" -// @Produce json -// @Success 200 {object} StoresInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /labels/stores [get] +// @Tags label +// @Summary List stores that have specific label values. +// @Param name query string true "name of store label filter" +// @Param value query string true "value of store label filter" +// @Produce json +// @Success 200 {object} StoresInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /labels/stores [get] func (h *labelsHandler) GetStoresByLabel(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) name := r.URL.Query().Get("name") diff --git a/server/api/log.go b/server/api/log.go index 793338aba4c..ed7a07e5279 100644 --- a/server/api/log.go +++ b/server/api/log.go @@ -37,16 +37,16 @@ func newLogHandler(svr *server.Server, rd *render.Render) *logHandler { } } -// @Tags admin -// @Summary Set log level. -// @Accept json -// @Param level body string true "json params" -// @Produce json -// @Success 200 {string} string "The log level is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Failure 503 {string} string "PD server has no leader." -// @Router /admin/log [post] +// @Tags admin +// @Summary Set log level. +// @Accept json +// @Param level body string true "json params" +// @Produce json +// @Success 200 {string} string "The log level is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Failure 503 {string} string "PD server has no leader." +// @Router /admin/log [post] func (h *logHandler) SetLogLevel(w http.ResponseWriter, r *http.Request) { var level string data, err := io.ReadAll(r.Body) diff --git a/server/api/member.go b/server/api/member.go index a6c5b7156f3..eaf743c0493 100644 --- a/server/api/member.go +++ b/server/api/member.go @@ -45,12 +45,12 @@ func newMemberHandler(svr *server.Server, rd *render.Render) *memberHandler { } } -// @Tags member -// @Summary List all PD servers in the cluster. -// @Produce json -// @Success 200 {object} pdpb.GetMembersResponse -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /members [get] +// @Tags member +// @Summary List all PD servers in the cluster. +// @Produce json +// @Success 200 {object} pdpb.GetMembersResponse +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /members [get] func (h *memberHandler) GetMembers(w http.ResponseWriter, r *http.Request) { members, err := getMembers(h.svr) if err != nil { @@ -107,15 +107,15 @@ func getMembers(svr *server.Server) (*pdpb.GetMembersResponse, error) { return members, nil } -// @Tags member -// @Summary Remove a PD server from the cluster. -// @Param name path string true "PD server name" -// @Produce json -// @Success 200 {string} string "The PD server is successfully removed." -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The member does not exist." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /members/name/{name} [delete] +// @Tags member +// @Summary Remove a PD server from the cluster. +// @Param name path string true "PD server name" +// @Produce json +// @Success 200 {string} string "The PD server is successfully removed." +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The member does not exist." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /members/name/{name} [delete] func (h *memberHandler) DeleteMemberByName(w http.ResponseWriter, r *http.Request) { client := h.svr.GetClient() @@ -161,14 +161,14 @@ func (h *memberHandler) DeleteMemberByName(w http.ResponseWriter, r *http.Reques h.rd.JSON(w, http.StatusOK, fmt.Sprintf("removed, pd: %s", name)) } -// @Tags member -// @Summary Remove a PD server from the cluster. -// @Param id path integer true "PD server Id" -// @Produce json -// @Success 200 {string} string "The PD server is successfully removed." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /members/id/{id} [delete] +// @Tags member +// @Summary Remove a PD server from the cluster. +// @Param id path integer true "PD server Id" +// @Produce json +// @Success 200 {string} string "The PD server is successfully removed." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /members/id/{id} [delete] func (h *memberHandler) DeleteMemberByID(w http.ResponseWriter, r *http.Request) { idStr := mux.Vars(r)["id"] id, err := strconv.ParseUint(idStr, 10, 64) @@ -201,17 +201,17 @@ func (h *memberHandler) DeleteMemberByID(w http.ResponseWriter, r *http.Request) } // FIXME: details of input json body params -// @Tags member -// @Summary Set leader priority of a PD member. -// @Accept json -// @Param name path string true "PD server name" -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "The leader priority is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The member does not exist." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /members/name/{name} [post] +// @Tags member +// @Summary Set leader priority of a PD member. +// @Accept json +// @Param name path string true "PD server name" +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "The leader priority is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The member does not exist." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /members/name/{name} [post] func (h *memberHandler) SetMemberPropertyByName(w http.ResponseWriter, r *http.Request) { members, membersErr := getMembers(h.svr) if membersErr != nil { @@ -265,21 +265,21 @@ func newLeaderHandler(svr *server.Server, rd *render.Render) *leaderHandler { } } -// @Tags leader -// @Summary Get the leader PD server of the cluster. -// @Produce json -// @Success 200 {object} pdpb.Member -// @Router /leader [get] +// @Tags leader +// @Summary Get the leader PD server of the cluster. +// @Produce json +// @Success 200 {object} pdpb.Member +// @Router /leader [get] func (h *leaderHandler) GetLeader(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetLeader()) } -// @Tags leader -// @Summary Transfer etcd leadership to another PD server. -// @Produce json -// @Success 200 {string} string "The resign command is submitted." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /leader/resign [post] +// @Tags leader +// @Summary Transfer etcd leadership to another PD server. +// @Produce json +// @Success 200 {string} string "The resign command is submitted." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /leader/resign [post] func (h *leaderHandler) ResignLeader(w http.ResponseWriter, r *http.Request) { err := h.svr.GetMember().ResignEtcdLeader(h.svr.Context(), h.svr.Name(), "") if err != nil { @@ -290,13 +290,13 @@ func (h *leaderHandler) ResignLeader(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, "The resign command is submitted.") } -// @Tags leader -// @Summary Transfer etcd leadership to the specific PD server. -// @Param nextLeader path string true "PD server that transfer leader to" -// @Produce json -// @Success 200 {string} string "The transfer command is submitted." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /leader/transfer/{nextLeader} [post] +// @Tags leader +// @Summary Transfer etcd leadership to the specific PD server. +// @Param nextLeader path string true "PD server that transfer leader to" +// @Produce json +// @Success 200 {string} string "The transfer command is submitted." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /leader/transfer/{nextLeader} [post] func (h *leaderHandler) TransferLeader(w http.ResponseWriter, r *http.Request) { err := h.svr.GetMember().ResignEtcdLeader(h.svr.Context(), h.svr.Name(), mux.Vars(r)["next_leader"]) if err != nil { diff --git a/server/api/min_resolved_ts.go b/server/api/min_resolved_ts.go index c717f0a3b42..c367aabdd1f 100644 --- a/server/api/min_resolved_ts.go +++ b/server/api/min_resolved_ts.go @@ -41,12 +41,12 @@ type minResolvedTS struct { PersistInterval typeutil.Duration `json:"persist_interval,omitempty"` } -// @Tags min_resolved_ts -// @Summary Get cluster-level min resolved ts. -// @Produce json -// @Success 200 {array} minResolvedTS -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /min-resolved-ts [get] +// @Tags min_resolved_ts +// @Summary Get cluster-level min resolved ts. +// @Produce json +// @Success 200 {array} minResolvedTS +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /min-resolved-ts [get] func (h *minResolvedTSHandler) GetMinResolvedTS(w http.ResponseWriter, r *http.Request) { c := h.svr.GetRaftCluster() value := c.GetMinResolvedTS() diff --git a/server/api/operator.go b/server/api/operator.go index d41c04f8292..dc0af9f8eda 100644 --- a/server/api/operator.go +++ b/server/api/operator.go @@ -40,14 +40,14 @@ func newOperatorHandler(handler *server.Handler, r *render.Render) *operatorHand } } -// @Tags operator -// @Summary Get a Region's pending operator. -// @Param region_id path int true "A Region's Id" -// @Produce json -// @Success 200 {object} schedule.OperatorWithStatus -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /operators/{region_id} [get] +// @Tags operator +// @Summary Get a Region's pending operator. +// @Param region_id path int true "A Region's Id" +// @Produce json +// @Success 200 {object} schedule.OperatorWithStatus +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /operators/{region_id} [get] func (h *operatorHandler) GetOperatorsByRegion(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["region_id"] @@ -66,13 +66,13 @@ func (h *operatorHandler) GetOperatorsByRegion(w http.ResponseWriter, r *http.Re h.r.JSON(w, http.StatusOK, op) } -// @Tags operator -// @Summary List pending operators. -// @Param kind query string false "Specify the operator kind." Enums(admin, leader, region) -// @Produce json -// @Success 200 {array} operator.Operator -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /operators [get] +// @Tags operator +// @Summary List pending operators. +// @Param kind query string false "Specify the operator kind." Enums(admin, leader, region) +// @Produce json +// @Success 200 {array} operator.Operator +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /operators [get] func (h *operatorHandler) GetOperators(w http.ResponseWriter, r *http.Request) { var ( results []*operator.Operator @@ -111,15 +111,15 @@ func (h *operatorHandler) GetOperators(w http.ResponseWriter, r *http.Request) { } // FIXME: details of input json body params -// @Tags operator -// @Summary Create an operator. -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "The operator is created." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /operators [post] +// @Tags operator +// @Summary Create an operator. +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "The operator is created." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /operators [post] func (h *operatorHandler) CreateOperator(w http.ResponseWriter, r *http.Request) { var input map[string]interface{} if err := apiutil.ReadJSONRespondError(h.r, w, r.Body, &input); err != nil { @@ -320,14 +320,14 @@ func (h *operatorHandler) CreateOperator(w http.ResponseWriter, r *http.Request) h.r.JSON(w, http.StatusOK, "The operator is created.") } -// @Tags operator -// @Summary Cancel a Region's pending operator. -// @Param region_id path int true "A Region's Id" -// @Produce json -// @Success 200 {string} string "The pending operator is canceled." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /operators/{region_id} [delete] +// @Tags operator +// @Summary Cancel a Region's pending operator. +// @Param region_id path int true "A Region's Id" +// @Produce json +// @Success 200 {string} string "The pending operator is canceled." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /operators/{region_id} [delete] func (h *operatorHandler) DeleteOperatorByRegion(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["region_id"] @@ -345,14 +345,14 @@ func (h *operatorHandler) DeleteOperatorByRegion(w http.ResponseWriter, r *http. h.r.JSON(w, http.StatusOK, "The pending operator is canceled.") } -// @Tags operator -// @Summary lists the finished operators since the given timestamp in second. -// @Param from query integer false "From Unix timestamp" -// @Produce json -// @Success 200 {object} []operator.OpRecord -// @Failure 400 {string} string "The request is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /operators/records [get] +// @Tags operator +// @Summary lists the finished operators since the given timestamp in second. +// @Param from query integer false "From Unix timestamp" +// @Produce json +// @Success 200 {object} []operator.OpRecord +// @Failure 400 {string} string "The request is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /operators/records [get] func (h *operatorHandler) GetOperatorRecords(w http.ResponseWriter, r *http.Request) { var from time.Time if fromStr := r.URL.Query()["from"]; len(fromStr) > 0 { diff --git a/server/api/plugin.go b/server/api/plugin.go index 3e1372ba8f5..16894304e9b 100644 --- a/server/api/plugin.go +++ b/server/api/plugin.go @@ -38,29 +38,29 @@ func newPluginHandler(handler *server.Handler, rd *render.Render) *pluginHandler } // FIXME: details of input json body params -// @Tags plugin -// @Summary Load plugin. -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Load plugin success." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /plugin [post] +// @Tags plugin +// @Summary Load plugin. +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Load plugin success." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /plugin [post] func (h *pluginHandler) LoadPlugin(w http.ResponseWriter, r *http.Request) { h.processPluginCommand(w, r, cluster.PluginLoad) } // FIXME: details of input json body params -// @Tags plugin -// @Summary Unload plugin. -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Load/Unload plugin successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /plugin [delete] +// @Tags plugin +// @Summary Unload plugin. +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Load/Unload plugin successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /plugin [delete] func (h *pluginHandler) UnloadPlugin(w http.ResponseWriter, r *http.Request) { h.processPluginCommand(w, r, cluster.PluginUnload) } diff --git a/server/api/pprof.go b/server/api/pprof.go index 9dd371badb1..0c180dda24c 100644 --- a/server/api/pprof.go +++ b/server/api/pprof.go @@ -47,10 +47,10 @@ func newPprofHandler(svr *server.Server, rd *render.Render) *pprofHandler { } } -// @Tags debug -// @Summary debug zip of PD servers. -// @Produce application/octet-stream -// @Router /debug/pprof/zip [get] +// @Tags debug +// @Summary debug zip of PD servers. +// @Produce application/octet-stream +// @Router /debug/pprof/zip [get] func (h *pprofHandler) PProfZip(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="pd_debug"`+time.Now().Format("20060102_150405")+".zip")) @@ -145,65 +145,65 @@ func (h *pprofHandler) PProfZip(w http.ResponseWriter, r *http.Request) { } } -// @Tags debug -// @Summary debug profile of PD servers. -// @Router /debug/pprof/profile [get] +// @Tags debug +// @Summary debug profile of PD servers. +// @Router /debug/pprof/profile [get] func (h *pprofHandler) PProfProfile(w http.ResponseWriter, r *http.Request) { pp.Profile(w, r) } -// @Tags debug -// @Summary debug trace of PD servers. -// @Router /debug/pprof/trace [get] +// @Tags debug +// @Summary debug trace of PD servers. +// @Router /debug/pprof/trace [get] func (h *pprofHandler) PProfTrace(w http.ResponseWriter, r *http.Request) { pp.Trace(w, r) } -// @Tags debug -// @Summary debug symbol of PD servers. -// @Router /debug/pprof/symbol [get] +// @Tags debug +// @Summary debug symbol of PD servers. +// @Router /debug/pprof/symbol [get] func (h *pprofHandler) PProfSymbol(w http.ResponseWriter, r *http.Request) { pp.Symbol(w, r) } -// @Tags debug -// @Summary debug heap of PD servers. -// @Router /debug/pprof/heap [get] +// @Tags debug +// @Summary debug heap of PD servers. +// @Router /debug/pprof/heap [get] func (h *pprofHandler) PProfHeap(w http.ResponseWriter, r *http.Request) { pp.Handler("heap").ServeHTTP(w, r) } -// @Tags debug -// @Summary debug mutex of PD servers. -// @Router /debug/pprof/mutex [get] +// @Tags debug +// @Summary debug mutex of PD servers. +// @Router /debug/pprof/mutex [get] func (h *pprofHandler) PProfMutex(w http.ResponseWriter, r *http.Request) { pp.Handler("mutex").ServeHTTP(w, r) } -// @Tags debug -// @Summary debug allocs of PD servers. -// @Router /debug/pprof/allocs [get] +// @Tags debug +// @Summary debug allocs of PD servers. +// @Router /debug/pprof/allocs [get] func (h *pprofHandler) PProfAllocs(w http.ResponseWriter, r *http.Request) { pp.Handler("allocs").ServeHTTP(w, r) } -// @Tags debug -// @Summary debug block of PD servers. -// @Router /debug/pprof/block [get] +// @Tags debug +// @Summary debug block of PD servers. +// @Router /debug/pprof/block [get] func (h *pprofHandler) PProfBlock(w http.ResponseWriter, r *http.Request) { pp.Handler("block").ServeHTTP(w, r) } -// @Tags debug -// @Summary debug goroutine of PD servers. -// @Router /debug/pprof/goroutine [get] +// @Tags debug +// @Summary debug goroutine of PD servers. +// @Router /debug/pprof/goroutine [get] func (h *pprofHandler) PProfGoroutine(w http.ResponseWriter, r *http.Request) { pp.Handler("goroutine").ServeHTTP(w, r) } -// @Tags debug -// @Summary debug threadcreate of PD servers. -// @Router /debug/pprof/threadcreate [get] +// @Tags debug +// @Summary debug threadcreate of PD servers. +// @Router /debug/pprof/threadcreate [get] func (h *pprofHandler) PProfThreadcreate(w http.ResponseWriter, r *http.Request) { pp.Handler("threadcreate").ServeHTTP(w, r) } diff --git a/server/api/region.go b/server/api/region.go index fa25ca1bd17..a4ae3fe0df9 100644 --- a/server/api/region.go +++ b/server/api/region.go @@ -209,13 +209,13 @@ func newRegionHandler(svr *server.Server, rd *render.Render) *regionHandler { } } -// @Tags region -// @Summary Search for a region by region ID. -// @Param id path integer true "Region Id" -// @Produce json -// @Success 200 {object} RegionInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /region/id/{id} [get] +// @Tags region +// @Summary Search for a region by region ID. +// @Param id path integer true "Region Id" +// @Produce json +// @Success 200 {object} RegionInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /region/id/{id} [get] func (h *regionHandler) GetRegionByID(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) @@ -231,12 +231,12 @@ func (h *regionHandler) GetRegionByID(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, NewRegionInfo(regionInfo)) } -// @Tags region -// @Summary Search for a region by a key. GetRegion is named to be consistent with gRPC -// @Param key path string true "Region key" -// @Produce json -// @Success 200 {object} RegionInfo -// @Router /region/key/{key} [get] +// @Tags region +// @Summary Search for a region by a key. GetRegion is named to be consistent with gRPC +// @Param key path string true "Region key" +// @Produce json +// @Success 200 {object} RegionInfo +// @Router /region/key/{key} [get] func (h *regionHandler) GetRegion(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -250,14 +250,14 @@ func (h *regionHandler) GetRegion(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, NewRegionInfo(regionInfo)) } -// @Tags region -// @Summary Check if regions in the given key ranges are replicated. Returns 'REPLICATED', 'INPROGRESS', or 'PENDING'. 'PENDING' means that there is at least one region pending for scheduling. Similarly, 'INPROGRESS' means there is at least one region in scheduling. -// @Param startKey query string true "Regions start key, hex encoded" -// @Param endKey query string true "Regions end key, hex encoded" -// @Produce plain -// @Success 200 {string} string "INPROGRESS" -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/replicated [get] +// @Tags region +// @Summary Check if regions in the given key ranges are replicated. Returns 'REPLICATED', 'INPROGRESS', or 'PENDING'. 'PENDING' means that there is at least one region pending for scheduling. Similarly, 'INPROGRESS' means there is at least one region in scheduling. +// @Param startKey query string true "Regions start key, hex encoded" +// @Param endKey query string true "Regions end key, hex encoded" +// @Produce plain +// @Success 200 {string} string "INPROGRESS" +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/replicated [get] func (h *regionsHandler) CheckRegionsReplicated(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) @@ -318,11 +318,11 @@ func convertToAPIRegions(regions []*core.RegionInfo) *RegionsInfo { } } -// @Tags region -// @Summary List all regions in the cluster. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Router /regions [get] +// @Tags region +// @Summary List all regions in the cluster. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Router /regions [get] func (h *regionsHandler) GetRegions(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) regions := rc.GetRegions() @@ -330,15 +330,15 @@ func (h *regionsHandler) GetRegions(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List regions in a given range [startKey, endKey). -// @Param key query string true "Region range start key" -// @Param endkey query string true "Region range end key" -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/key [get] +// @Tags region +// @Summary List regions in a given range [startKey, endKey). +// @Param key query string true "Region range start key" +// @Param endkey query string true "Region range end key" +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/key [get] func (h *regionsHandler) ScanRegions(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) startKey := r.URL.Query().Get("key") @@ -361,24 +361,24 @@ func (h *regionsHandler) ScanRegions(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary Get count of regions. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Router /regions/count [get] +// @Tags region +// @Summary Get count of regions. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Router /regions/count [get] func (h *regionsHandler) GetRegionCount(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) count := rc.GetRegionCount() h.rd.JSON(w, http.StatusOK, &RegionsInfo{Count: count}) } -// @Tags region -// @Summary List all regions of a specific store. -// @Param id path integer true "Store Id" -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/store/{id} [get] +// @Tags region +// @Summary List all regions of a specific store. +// @Param id path integer true "Store Id" +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/store/{id} [get] func (h *regionsHandler) GetStoreRegions(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) @@ -393,12 +393,12 @@ func (h *regionsHandler) GetStoreRegions(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that miss peer. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/miss-peer [get] +// @Tags region +// @Summary List all regions that miss peer. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/miss-peer [get] func (h *regionsHandler) GetMissPeerRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.MissPeer) @@ -410,12 +410,12 @@ func (h *regionsHandler) GetMissPeerRegions(w http.ResponseWriter, r *http.Reque h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that has extra peer. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/extra-peer [get] +// @Tags region +// @Summary List all regions that has extra peer. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/extra-peer [get] func (h *regionsHandler) GetExtraPeerRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.ExtraPeer) @@ -427,12 +427,12 @@ func (h *regionsHandler) GetExtraPeerRegions(w http.ResponseWriter, r *http.Requ h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that has pending peer. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/pending-peer [get] +// @Tags region +// @Summary List all regions that has pending peer. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/pending-peer [get] func (h *regionsHandler) GetPendingPeerRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.PendingPeer) @@ -444,12 +444,12 @@ func (h *regionsHandler) GetPendingPeerRegions(w http.ResponseWriter, r *http.Re h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that has down peer. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/down-peer [get] +// @Tags region +// @Summary List all regions that has down peer. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/down-peer [get] func (h *regionsHandler) GetDownPeerRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.DownPeer) @@ -461,12 +461,12 @@ func (h *regionsHandler) GetDownPeerRegions(w http.ResponseWriter, r *http.Reque h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that has learner peer. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/learner-peer [get] +// @Tags region +// @Summary List all regions that has learner peer. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/learner-peer [get] func (h *regionsHandler) GetLearnerPeerRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.LearnerPeer) @@ -478,12 +478,12 @@ func (h *regionsHandler) GetLearnerPeerRegions(w http.ResponseWriter, r *http.Re h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that has offline peer. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/offline-peer [get] +// @Tags region +// @Summary List all regions that has offline peer. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/offline-peer [get] func (h *regionsHandler) GetOfflinePeerRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetOfflinePeer(statistics.OfflinePeer) @@ -495,12 +495,12 @@ func (h *regionsHandler) GetOfflinePeerRegions(w http.ResponseWriter, r *http.Re h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that are oversized. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/oversized-region [get] +// @Tags region +// @Summary List all regions that are oversized. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/oversized-region [get] func (h *regionsHandler) GetOverSizedRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.OversizedRegion) @@ -512,12 +512,12 @@ func (h *regionsHandler) GetOverSizedRegions(w http.ResponseWriter, r *http.Requ h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all regions that are undersized. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/undersized-region [get] +// @Tags region +// @Summary List all regions that are undersized. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/undersized-region [get] func (h *regionsHandler) GetUndersizedRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.UndersizedRegion) @@ -529,12 +529,12 @@ func (h *regionsHandler) GetUndersizedRegions(w http.ResponseWriter, r *http.Req h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary List all empty regions. -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /regions/check/empty-region [get] +// @Tags region +// @Summary List all empty regions. +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /regions/check/empty-region [get] func (h *regionsHandler) GetEmptyRegions(w http.ResponseWriter, r *http.Request) { handler := h.svr.GetHandler() regions, err := handler.GetRegionsByType(statistics.EmptyRegion) @@ -566,13 +566,13 @@ func (hist histSlice) Less(i, j int) bool { return hist[i].Start < hist[j].Start } -// @Tags region -// @Summary Get size of histogram. -// @Param bound query integer false "Size bound of region histogram" minimum(1) -// @Produce json -// @Success 200 {array} histItem -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/check/hist-size [get] +// @Tags region +// @Summary Get size of histogram. +// @Param bound query integer false "Size bound of region histogram" minimum(1) +// @Produce json +// @Success 200 {array} histItem +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/check/hist-size [get] func (h *regionsHandler) GetSizeHistogram(w http.ResponseWriter, r *http.Request) { bound := minRegionHistogramSize bound, err := calBound(bound, r) @@ -590,13 +590,13 @@ func (h *regionsHandler) GetSizeHistogram(w http.ResponseWriter, r *http.Request h.rd.JSON(w, http.StatusOK, histItems) } -// @Tags region -// @Summary Get keys of histogram. -// @Param bound query integer false "Key bound of region histogram" minimum(1000) -// @Produce json -// @Success 200 {array} histItem -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/check/hist-keys [get] +// @Tags region +// @Summary Get keys of histogram. +// @Param bound query integer false "Key bound of region histogram" minimum(1000) +// @Produce json +// @Success 200 {array} histItem +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/check/hist-keys [get] func (h *regionsHandler) GetKeysHistogram(w http.ResponseWriter, r *http.Request) { bound := minRegionHistogramKeys bound, err := calBound(bound, r) @@ -649,24 +649,24 @@ func calHist(bound int, list *[]int64) *[]*histItem { return &histItems } -// @Tags region -// @Summary List all range holes whitout any region info. -// @Produce json -// @Success 200 {object} [][]string -// @Router /regions/range-holes [get] +// @Tags region +// @Summary List all range holes whitout any region info. +// @Produce json +// @Success 200 {object} [][]string +// @Router /regions/range-holes [get] func (h *regionsHandler) GetRangeHoles(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) h.rd.JSON(w, http.StatusOK, rc.GetRangeHoles()) } -// @Tags region -// @Summary List sibling regions of a specific region. -// @Param id path integer true "Region Id" -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The region does not exist." -// @Router /regions/sibling/{id} [get] +// @Tags region +// @Summary List sibling regions of a specific region. +// @Param id path integer true "Region Id" +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The region does not exist." +// @Router /regions/sibling/{id} [get] func (h *regionsHandler) GetRegionSiblings(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) @@ -694,89 +694,89 @@ const ( minRegionHistogramKeys = 1000 ) -// @Tags region -// @Summary List regions with the highest write flow. -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/writeflow [get] +// @Tags region +// @Summary List regions with the highest write flow. +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/writeflow [get] func (h *regionsHandler) GetTopWriteFlowRegions(w http.ResponseWriter, r *http.Request) { h.GetTopNRegions(w, r, func(a, b *core.RegionInfo) bool { return a.GetBytesWritten() < b.GetBytesWritten() }) } -// @Tags region -// @Summary List regions with the highest read flow. -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/readflow [get] +// @Tags region +// @Summary List regions with the highest read flow. +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/readflow [get] func (h *regionsHandler) GetTopReadFlowRegions(w http.ResponseWriter, r *http.Request) { h.GetTopNRegions(w, r, func(a, b *core.RegionInfo) bool { return a.GetBytesRead() < b.GetBytesRead() }) } -// @Tags region -// @Summary List regions with the largest conf version. -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/confver [get] +// @Tags region +// @Summary List regions with the largest conf version. +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/confver [get] func (h *regionsHandler) GetTopConfVerRegions(w http.ResponseWriter, r *http.Request) { h.GetTopNRegions(w, r, func(a, b *core.RegionInfo) bool { return a.GetMeta().GetRegionEpoch().GetConfVer() < b.GetMeta().GetRegionEpoch().GetConfVer() }) } -// @Tags region -// @Summary List regions with the largest version. -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/version [get] +// @Tags region +// @Summary List regions with the largest version. +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/version [get] func (h *regionsHandler) GetTopVersionRegions(w http.ResponseWriter, r *http.Request) { h.GetTopNRegions(w, r, func(a, b *core.RegionInfo) bool { return a.GetMeta().GetRegionEpoch().GetVersion() < b.GetMeta().GetRegionEpoch().GetVersion() }) } -// @Tags region -// @Summary List regions with the largest size. -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/size [get] +// @Tags region +// @Summary List regions with the largest size. +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/size [get] func (h *regionsHandler) GetTopSizeRegions(w http.ResponseWriter, r *http.Request) { h.GetTopNRegions(w, r, func(a, b *core.RegionInfo) bool { return a.GetApproximateSize() < b.GetApproximateSize() }) } -// @Tags region -// @Summary List regions with the largest keys. -// @Param limit query integer false "Limit count" default(16) -// @Produce json -// @Success 200 {object} RegionsInfo -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/keys [get] +// @Tags region +// @Summary List regions with the largest keys. +// @Param limit query integer false "Limit count" default(16) +// @Produce json +// @Success 200 {object} RegionsInfo +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/keys [get] func (h *regionsHandler) GetTopKeysRegions(w http.ResponseWriter, r *http.Request) { h.GetTopNRegions(w, r, func(a, b *core.RegionInfo) bool { return a.GetApproximateKeys() < b.GetApproximateKeys() }) } -// @Tags region -// @Summary Accelerate regions scheduling a in given range, only receive hex format for keys -// @Accept json -// @Param body body object true "json params" -// @Param limit query integer false "Limit count" default(256) -// @Produce json -// @Success 200 {string} string "Accelerate regions scheduling in a given range [startKey, endKey)" -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/accelerate-schedule [post] +// @Tags region +// @Summary Accelerate regions scheduling a in given range, only receive hex format for keys +// @Accept json +// @Param body body object true "json params" +// @Param limit query integer false "Limit count" default(256) +// @Produce json +// @Success 200 {string} string "Accelerate regions scheduling in a given range [startKey, endKey)" +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/accelerate-schedule [post] func (h *regionsHandler) AccelerateRegionsScheduleInRange(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) var input map[string]interface{} @@ -838,14 +838,14 @@ func (h *regionsHandler) GetTopNRegions(w http.ResponseWriter, r *http.Request, h.rd.JSON(w, http.StatusOK, regionsInfo) } -// @Tags region -// @Summary Scatter regions by given key ranges or regions id distributed by given group with given retry limit -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Scatter regions by given key ranges or regions id distributed by given group with given retry limit" -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/scatter [post] +// @Tags region +// @Summary Scatter regions by given key ranges or regions id distributed by given group with given retry limit +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Scatter regions by given key ranges or regions id distributed by given group with given retry limit" +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/scatter [post] func (h *regionsHandler) ScatterRegions(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) var input map[string]interface{} @@ -919,14 +919,14 @@ func (h *regionsHandler) ScatterRegions(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, &s) } -// @Tags region -// @Summary Split regions with given split keys -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Split regions with given split keys" -// @Failure 400 {string} string "The input is invalid." -// @Router /regions/split [post] +// @Tags region +// @Summary Split regions with given split keys +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Split regions with given split keys" +// @Failure 400 {string} string "The input is invalid." +// @Router /regions/split [post] func (h *regionsHandler) SplitRegions(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) var input map[string]interface{} diff --git a/server/api/region_label.go b/server/api/region_label.go index 6eae4206914..539f4126c1f 100644 --- a/server/api/region_label.go +++ b/server/api/region_label.go @@ -39,26 +39,26 @@ func newRegionLabelHandler(s *server.Server, rd *render.Render) *regionLabelHand } } -// @Tags region_label -// @Summary List all label rules of cluster. -// @Produce json -// @Success 200 {array} labeler.LabelRule -// @Router /config/region-label/rules [get] +// @Tags region_label +// @Summary List all label rules of cluster. +// @Produce json +// @Success 200 {array} labeler.LabelRule +// @Router /config/region-label/rules [get] func (h *regionLabelHandler) GetAllRegionLabelRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) rules := cluster.GetRegionLabeler().GetAllLabelRules() h.rd.JSON(w, http.StatusOK, rules) } -// @Tags region_label -// @Summary Update region label rules in batch. -// @Accept json -// @Param patch body labeler.LabelRulePatch true "Patch to update rules" -// @Produce json -// @Success 200 {string} string "Update region label rules successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/region-label/rules [patch] +// @Tags region_label +// @Summary Update region label rules in batch. +// @Accept json +// @Param patch body labeler.LabelRulePatch true "Patch to update rules" +// @Produce json +// @Success 200 {string} string "Update region label rules successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/region-label/rules [patch] func (h *regionLabelHandler) PatchRegionLabelRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) var patch labeler.LabelRulePatch @@ -76,14 +76,14 @@ func (h *regionLabelHandler) PatchRegionLabelRules(w http.ResponseWriter, r *htt h.rd.JSON(w, http.StatusOK, "Update region label rules successfully.") } -// @Tags region_label -// @Summary Get label rules of cluster by ids. -// @Param body body []string true "IDs of query rules" -// @Produce json -// @Success 200 {array} labeler.LabelRule -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/region-label/rule/ids [get] +// @Tags region_label +// @Summary Get label rules of cluster by ids. +// @Param body body []string true "IDs of query rules" +// @Produce json +// @Success 200 {array} labeler.LabelRule +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/region-label/rule/ids [get] func (h *regionLabelHandler) GetRegionLabelRulesByIDs(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) var ids []string @@ -98,13 +98,13 @@ func (h *regionLabelHandler) GetRegionLabelRulesByIDs(w http.ResponseWriter, r * h.rd.JSON(w, http.StatusOK, rules) } -// @Tags region_label -// @Summary Get label rule of cluster by id. -// @Param id path string true "Rule Id" -// @Produce json -// @Success 200 {object} labeler.LabelRule -// @Failure 404 {string} string "The rule does not exist." -// @Router /config/region-label/rule/{id} [get] +// @Tags region_label +// @Summary Get label rule of cluster by id. +// @Param id path string true "Rule Id" +// @Produce json +// @Success 200 {object} labeler.LabelRule +// @Failure 404 {string} string "The rule does not exist." +// @Router /config/region-label/rule/{id} [get] func (h *regionLabelHandler) GetRegionLabelRuleByID(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) id, err := url.PathUnescape(mux.Vars(r)["id"]) @@ -120,14 +120,14 @@ func (h *regionLabelHandler) GetRegionLabelRuleByID(w http.ResponseWriter, r *ht h.rd.JSON(w, http.StatusOK, rule) } -// @Tags region_label -// @Summary Delete label rule of cluster by id. -// @Param id path string true "Rule Id" -// @Produce json -// @Success 200 {string} string "Delete rule successfully." -// @Failure 404 {string} string "The rule does not exist." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/region-label/rule/{id} [delete] +// @Tags region_label +// @Summary Delete label rule of cluster by id. +// @Param id path string true "Rule Id" +// @Produce json +// @Success 200 {string} string "Delete rule successfully." +// @Failure 404 {string} string "The rule does not exist." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/region-label/rule/{id} [delete] func (h *regionLabelHandler) DeleteRegionLabelRule(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) id, err := url.PathUnescape(mux.Vars(r)["id"]) @@ -147,15 +147,15 @@ func (h *regionLabelHandler) DeleteRegionLabelRule(w http.ResponseWriter, r *htt h.rd.Text(w, http.StatusOK, "Delete rule successfully.") } -// @Tags region_label -// @Summary Update region label rule of cluster. -// @Accept json -// @Param rule body labeler.LabelRule true "Parameters of label rule" -// @Produce json -// @Success 200 {string} string "Update rule successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/region-label/rule [post] +// @Tags region_label +// @Summary Update region label rule of cluster. +// @Accept json +// @Param rule body labeler.LabelRule true "Parameters of label rule" +// @Produce json +// @Success 200 {string} string "Update rule successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/region-label/rule [post] func (h *regionLabelHandler) SetRegionLabelRule(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) var rule labeler.LabelRule @@ -173,15 +173,15 @@ func (h *regionLabelHandler) SetRegionLabelRule(w http.ResponseWriter, r *http.R h.rd.JSON(w, http.StatusOK, "Update region label rule successfully.") } -// @Tags region_label -// @Summary Get label of a region. -// @Param id path integer true "Region Id" -// @Param key path string true "Label key" -// @Produce json -// @Success 200 {string} string -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The region does not exist." -// @Router /region/id/{id}/label/{key} [get] +// @Tags region_label +// @Summary Get label of a region. +// @Param id path integer true "Region Id" +// @Param key path string true "Label key" +// @Produce json +// @Success 200 {string} string +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The region does not exist." +// @Router /region/id/{id}/label/{key} [get] func (h *regionLabelHandler) GetRegionLabelByKey(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) regionID, labelKey := mux.Vars(r)["id"], mux.Vars(r)["key"] @@ -199,14 +199,14 @@ func (h *regionLabelHandler) GetRegionLabelByKey(w http.ResponseWriter, r *http. h.rd.JSON(w, http.StatusOK, labelValue) } -// @Tags region_label -// @Summary Get labels of a region. -// @Param id path integer true "Region Id" -// @Produce json -// @Success 200 {string} string -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The region does not exist." -// @Router /region/id/{id}/labels [get] +// @Tags region_label +// @Summary Get labels of a region. +// @Param id path integer true "Region Id" +// @Produce json +// @Success 200 {string} string +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The region does not exist." +// @Router /region/id/{id}/labels [get] func (h *regionLabelHandler) GetRegionLabels(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) regionID, err := strconv.ParseUint(mux.Vars(r)["id"], 10, 64) diff --git a/server/api/replication_mode.go b/server/api/replication_mode.go index 9ba7050dc24..4fe2ef5da09 100644 --- a/server/api/replication_mode.go +++ b/server/api/replication_mode.go @@ -33,11 +33,11 @@ func newReplicationModeHandler(svr *server.Server, rd *render.Render) *replicati } } -// @Tags replication_mode -// @Summary Get status of replication mode -// @Produce json -// @Success 200 {object} replication.HTTPReplicationStatus -// @Router /replication_mode/status [get] +// @Tags replication_mode +// @Summary Get status of replication mode +// @Produce json +// @Success 200 {object} replication.HTTPReplicationStatus +// @Router /replication_mode/status [get] func (h *replicationModeHandler) GetReplicationModeStatus(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, getCluster(r).GetReplicationMode().GetReplicationStatusHTTP()) } diff --git a/server/api/router.go b/server/api/router.go index 3e8061fd74e..155a33845f2 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -72,15 +72,15 @@ func getFunctionName(f interface{}) string { } // The returned function is used as a lazy router to avoid the data race problem. -// @title Placement Driver Core API -// @version 1.0 -// @description This is placement driver. -// @contact.name Placement Driver Support -// @contact.url https://github.com/tikv/pd/issues -// @contact.email info@pingcap.com -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html -// @BasePath /pd/api/v1 +// @title Placement Driver Core API +// @version 1.0 +// @description This is placement driver. +// @contact.name Placement Driver Support +// @contact.url https://github.com/tikv/pd/issues +// @contact.email info@pingcap.com +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +// @BasePath /pd/api/v1 func createRouter(prefix string, svr *server.Server) *mux.Router { serviceMiddle := newServiceMiddlewareBuilder(svr) registerPrefix := func(router *mux.Router, prefixPath string, diff --git a/server/api/rule.go b/server/api/rule.go index f0eb43128f2..4148cfcb668 100644 --- a/server/api/rule.go +++ b/server/api/rule.go @@ -44,12 +44,12 @@ func newRulesHandler(svr *server.Server, rd *render.Render) *ruleHandler { } } -// @Tags rule -// @Summary List all rules of cluster. -// @Produce json -// @Success 200 {array} placement.Rule -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rules [get] +// @Tags rule +// @Summary List all rules of cluster. +// @Produce json +// @Success 200 {array} placement.Rule +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rules [get] func (h *ruleHandler) GetAllRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -60,15 +60,15 @@ func (h *ruleHandler) GetAllRules(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, rules) } -// @Tags rule -// @Summary Set all rules for the cluster. If there is an error, modifications are promised to be rollback in memory, but may fail to rollback disk. You probably want to request again to make rules in memory/disk consistent. -// @Produce json -// @Param rules body []placement.Rule true "Parameters of rules" -// @Success 200 {string} string "Update rules successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/rules [get] +// @Tags rule +// @Summary Set all rules for the cluster. If there is an error, modifications are promised to be rollback in memory, but may fail to rollback disk. You probably want to request again to make rules in memory/disk consistent. +// @Produce json +// @Param rules body []placement.Rule true "Parameters of rules" +// @Success 200 {string} string "Update rules successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/rules [get] func (h *ruleHandler) SetAllRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -97,13 +97,13 @@ func (h *ruleHandler) SetAllRules(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, "Update rules successfully.") } -// @Tags rule -// @Summary List all rules of cluster by group. -// @Param group path string true "The name of group" -// @Produce json -// @Success 200 {array} placement.Rule -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rules/group/{group} [get] +// @Tags rule +// @Summary List all rules of cluster by group. +// @Param group path string true "The name of group" +// @Produce json +// @Success 200 {array} placement.Rule +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rules/group/{group} [get] func (h *ruleHandler) GetRuleByGroup(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -115,15 +115,15 @@ func (h *ruleHandler) GetRuleByGroup(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, rules) } -// @Tags rule -// @Summary List all rules of cluster by region. -// @Param region path string true "The name of region" -// @Produce json -// @Success 200 {array} placement.Rule -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The region does not exist." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rules/region/{region} [get] +// @Tags rule +// @Summary List all rules of cluster by region. +// @Param region path string true "The name of region" +// @Produce json +// @Success 200 {array} placement.Rule +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The region does not exist." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rules/region/{region} [get] func (h *ruleHandler) GetRulesByRegion(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -145,14 +145,14 @@ func (h *ruleHandler) GetRulesByRegion(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, rules) } -// @Tags rule -// @Summary List all rules of cluster by key. -// @Param key path string true "The name of key" -// @Produce json -// @Success 200 {array} placement.Rule -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rules/key/{key} [get] +// @Tags rule +// @Summary List all rules of cluster by key. +// @Param key path string true "The name of key" +// @Produce json +// @Success 200 {array} placement.Rule +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rules/key/{key} [get] func (h *ruleHandler) GetRulesByKey(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -169,15 +169,15 @@ func (h *ruleHandler) GetRulesByKey(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, rules) } -// @Tags rule -// @Summary Get rule of cluster by group and id. -// @Param group path string true "The name of group" -// @Param id path string true "Rule Id" -// @Produce json -// @Success 200 {object} placement.Rule -// @Failure 404 {string} string "The rule does not exist." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rule/{group}/{id} [get] +// @Tags rule +// @Summary Get rule of cluster by group and id. +// @Param group path string true "The name of group" +// @Param id path string true "Rule Id" +// @Produce json +// @Success 200 {object} placement.Rule +// @Failure 404 {string} string "The rule does not exist." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rule/{group}/{id} [get] func (h *ruleHandler) GetRuleByGroupAndID(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -193,16 +193,16 @@ func (h *ruleHandler) GetRuleByGroupAndID(w http.ResponseWriter, r *http.Request h.rd.JSON(w, http.StatusOK, rule) } -// @Tags rule -// @Summary Update rule of cluster. -// @Accept json -// @Param rule body placement.Rule true "Parameters of rule" -// @Produce json -// @Success 200 {string} string "Update rule successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/rule [post] +// @Tags rule +// @Summary Update rule of cluster. +// @Accept json +// @Param rule body placement.Rule true "Parameters of rule" +// @Produce json +// @Success 200 {string} string "Update rule successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/rule [post] func (h *ruleHandler) SetRule(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -247,15 +247,15 @@ func (h *ruleHandler) syncReplicateConfigWithDefaultRule(rule *placement.Rule) e return nil } -// @Tags rule -// @Summary Delete rule of cluster. -// @Param group path string true "The name of group" -// @Param id path string true "Rule Id" -// @Produce json -// @Success 200 {string} string "Delete rule successfully." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/rule/{group}/{id} [delete] +// @Tags rule +// @Summary Delete rule of cluster. +// @Param group path string true "The name of group" +// @Param id path string true "Rule Id" +// @Produce json +// @Success 200 {string} string "Delete rule successfully." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/rule/{group}/{id} [delete] func (h *ruleHandler) DeleteRuleByGroup(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -275,15 +275,15 @@ func (h *ruleHandler) DeleteRuleByGroup(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, "Delete rule successfully.") } -// @Tags rule -// @Summary Batch operations for the cluster. Operations should be independent(different ID). If there is an error, modifications are promised to be rollback in memory, but may fail to rollback disk. You probably want to request again to make rules in memory/disk consistent. -// @Produce json -// @Param operations body []placement.RuleOp true "Parameters of rule operations" -// @Success 200 {string} string "Batch operations successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/rules/batch [post] +// @Tags rule +// @Summary Batch operations for the cluster. Operations should be independent(different ID). If there is an error, modifications are promised to be rollback in memory, but may fail to rollback disk. You probably want to request again to make rules in memory/disk consistent. +// @Produce json +// @Param operations body []placement.RuleOp true "Parameters of rule operations" +// @Success 200 {string} string "Batch operations successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/rules/batch [post] func (h *ruleHandler) BatchRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -306,14 +306,14 @@ func (h *ruleHandler) BatchRules(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, "Batch operations successfully.") } -// @Tags rule -// @Summary Get rule group config by group id. -// @Param id path string true "Group Id" -// @Produce json -// @Success 200 {object} placement.RuleGroup -// @Failure 404 {string} string "The RuleGroup does not exist." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rule_group/{id} [get] +// @Tags rule +// @Summary Get rule group config by group id. +// @Param id path string true "Group Id" +// @Produce json +// @Success 200 {object} placement.RuleGroup +// @Failure 404 {string} string "The RuleGroup does not exist." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rule_group/{id} [get] func (h *ruleHandler) GetGroupConfig(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -329,16 +329,16 @@ func (h *ruleHandler) GetGroupConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, group) } -// @Tags rule -// @Summary Update rule group config. -// @Accept json -// @Param rule body placement.RuleGroup true "Parameters of rule group" -// @Produce json -// @Success 200 {string} string "Update rule group config successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/rule_group [post] +// @Tags rule +// @Summary Update rule group config. +// @Accept json +// @Param rule body placement.RuleGroup true "Parameters of rule group" +// @Produce json +// @Success 200 {string} string "Update rule group config successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/rule_group [post] func (h *ruleHandler) SetGroupConfig(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -359,14 +359,14 @@ func (h *ruleHandler) SetGroupConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, "Update rule group successfully.") } -// @Tags rule -// @Summary Delete rule group config. -// @Param id path string true "Group Id" -// @Produce json -// @Success 200 {string} string "Delete rule group config successfully." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/rule_group/{id} [delete] +// @Tags rule +// @Summary Delete rule group config. +// @Param id path string true "Group Id" +// @Produce json +// @Success 200 {string} string "Delete rule group config successfully." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/rule_group/{id} [delete] func (h *ruleHandler) DeleteGroupConfig(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -385,12 +385,12 @@ func (h *ruleHandler) DeleteGroupConfig(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, "Delete rule group successfully.") } -// @Tags rule -// @Summary List all rule group configs. -// @Produce json -// @Success 200 {array} placement.RuleGroup -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/rule_groups [get] +// @Tags rule +// @Summary List all rule group configs. +// @Produce json +// @Success 200 {array} placement.RuleGroup +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/rule_groups [get] func (h *ruleHandler) GetAllGroupConfigs(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -401,12 +401,12 @@ func (h *ruleHandler) GetAllGroupConfigs(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, ruleGroups) } -// @Tags rule -// @Summary List all rules and groups configuration. -// @Produce json -// @Success 200 {array} placement.GroupBundle -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/placement-rule [get] +// @Tags rule +// @Summary List all rules and groups configuration. +// @Produce json +// @Success 200 {array} placement.GroupBundle +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/placement-rule [get] func (h *ruleHandler) GetPlacementRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -417,15 +417,15 @@ func (h *ruleHandler) GetPlacementRules(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, bundles) } -// @Tags rule -// @Summary Update all rules and groups configuration. -// @Param partial query bool false "if partially update rules" default(false) -// @Produce json -// @Success 200 {string} string "Update rules and groups successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/placement-rule [post] +// @Tags rule +// @Summary Update all rules and groups configuration. +// @Param partial query bool false "if partially update rules" default(false) +// @Produce json +// @Success 200 {string} string "Update rules and groups successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/placement-rule [post] func (h *ruleHandler) SetPlacementRules(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -449,13 +449,13 @@ func (h *ruleHandler) SetPlacementRules(w http.ResponseWriter, r *http.Request) h.rd.JSON(w, http.StatusOK, "Update rules and groups successfully.") } -// @Tags rule -// @Summary Get group config and all rules belong to the group. -// @Param group path string true "The name of group" -// @Produce json -// @Success 200 {object} placement.GroupBundle -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/placement-rule/{group} [get] +// @Tags rule +// @Summary Get group config and all rules belong to the group. +// @Param group path string true "The name of group" +// @Produce json +// @Success 200 {object} placement.GroupBundle +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/placement-rule/{group} [get] func (h *ruleHandler) GetPlacementRuleByGroup(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -466,15 +466,15 @@ func (h *ruleHandler) GetPlacementRuleByGroup(w http.ResponseWriter, r *http.Req h.rd.JSON(w, http.StatusOK, group) } -// @Tags rule -// @Summary Get group config and all rules belong to the group. -// @Param group path string true "The name or name pattern of group" -// @Param regexp query bool false "Use regular expression" default(false) -// @Produce plain -// @Success 200 {string} string "Delete group and rules successfully." -// @Failure 400 {string} string "Bad request." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Router /config/placement-rule [delete] +// @Tags rule +// @Summary Get group config and all rules belong to the group. +// @Param group path string true "The name or name pattern of group" +// @Param regexp query bool false "Use regular expression" default(false) +// @Produce plain +// @Success 200 {string} string "Delete group and rules successfully." +// @Failure 400 {string} string "Bad request." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Router /config/placement-rule [delete] func (h *ruleHandler) DeletePlacementRuleByGroup(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { @@ -495,14 +495,14 @@ func (h *ruleHandler) DeletePlacementRuleByGroup(w http.ResponseWriter, r *http. h.rd.JSON(w, http.StatusOK, "Delete group and rules successfully.") } -// @Tags rule -// @Summary Update group and all rules belong to it. -// @Produce json -// @Success 200 {string} string "Update group and rules successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 412 {string} string "Placement rules feature is disabled." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /config/placement-rule/{group} [post] +// @Tags rule +// @Summary Update group and all rules belong to it. +// @Produce json +// @Success 200 {string} string "Update group and rules successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 412 {string} string "Placement rules feature is disabled." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /config/placement-rule/{group} [post] func (h *ruleHandler) SetPlacementRuleByGroup(w http.ResponseWriter, r *http.Request) { cluster := getCluster(r) if !cluster.GetOpts().IsPlacementRulesEnabled() { diff --git a/server/api/scheduler.go b/server/api/scheduler.go index 5faa01c764b..9b690a93249 100644 --- a/server/api/scheduler.go +++ b/server/api/scheduler.go @@ -51,12 +51,12 @@ type schedulerPausedPeriod struct { ResumeAt time.Time `json:"resume_at"` } -// @Tags scheduler -// @Summary List all created schedulers by status. -// @Produce json -// @Success 200 {array} string -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /schedulers [get] +// @Tags scheduler +// @Summary List all created schedulers by status. +// @Produce json +// @Success 200 {array} string +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /schedulers [get] func (h *schedulerHandler) GetSchedulers(w http.ResponseWriter, r *http.Request) { schedulers, err := h.Handler.GetSchedulers() if err != nil { @@ -128,15 +128,15 @@ func (h *schedulerHandler) GetSchedulers(w http.ResponseWriter, r *http.Request) } // FIXME: details of input json body params -// @Tags scheduler -// @Summary Create a scheduler. -// @Accept json -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "The scheduler is created." -// @Failure 400 {string} string "Bad format request." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /schedulers [post] +// @Tags scheduler +// @Summary Create a scheduler. +// @Accept json +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "The scheduler is created." +// @Failure 400 {string} string "Bad format request." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /schedulers [post] func (h *schedulerHandler) CreateScheduler(w http.ResponseWriter, r *http.Request) { var input map[string]interface{} if err := apiutil.ReadJSONRespondError(h.r, w, r.Body, &input); err != nil { @@ -269,14 +269,14 @@ func (h *schedulerHandler) addEvictOrGrant(w http.ResponseWriter, input map[stri } } -// @Tags scheduler -// @Summary Delete a scheduler. -// @Param name path string true "The name of the scheduler." -// @Produce json -// @Success 200 {string} string "The scheduler is removed." -// @Failure 404 {string} string "The scheduler is not found." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /schedulers/{name} [delete] +// @Tags scheduler +// @Summary Delete a scheduler. +// @Param name path string true "The name of the scheduler." +// @Produce json +// @Success 200 {string} string "The scheduler is removed." +// @Failure 404 {string} string "The scheduler is not found." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /schedulers/{name} [delete] func (h *schedulerHandler) DeleteScheduler(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] switch { @@ -316,16 +316,16 @@ func (h *schedulerHandler) redirectSchedulerDelete(w http.ResponseWriter, name, } // FIXME: details of input json body params -// @Tags scheduler -// @Summary Pause or resume a scheduler. -// @Accept json -// @Param name path string true "The name of the scheduler." -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Pause or resume the scheduler successfully." -// @Failure 400 {string} string "Bad format request." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /schedulers/{name} [post] +// @Tags scheduler +// @Summary Pause or resume a scheduler. +// @Accept json +// @Param name path string true "The name of the scheduler." +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Pause or resume the scheduler successfully." +// @Failure 400 {string} string "Bad format request." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /schedulers/{name} [post] func (h *schedulerHandler) PauseOrResumeScheduler(w http.ResponseWriter, r *http.Request) { var input map[string]int64 if err := apiutil.ReadJSONRespondError(h.r, w, r.Body, &input); err != nil { diff --git a/server/api/service_gc_safepoint.go b/server/api/service_gc_safepoint.go index 40c3aff1076..9df3700a30b 100644 --- a/server/api/service_gc_safepoint.go +++ b/server/api/service_gc_safepoint.go @@ -41,12 +41,12 @@ type listServiceGCSafepoint struct { GCSafePoint uint64 `json:"gc_safe_point"` } -// @Tags service_gc_safepoint -// @Summary Get all service GC safepoint. -// @Produce json -// @Success 200 {array} listServiceGCSafepoint -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /gc/safepoint [get] +// @Tags service_gc_safepoint +// @Summary Get all service GC safepoint. +// @Produce json +// @Success 200 {array} listServiceGCSafepoint +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /gc/safepoint [get] func (h *serviceGCSafepointHandler) GetGCSafePoint(w http.ResponseWriter, r *http.Request) { storage := h.svr.GetStorage() gcSafepoint, err := storage.LoadGCSafePoint() @@ -66,14 +66,14 @@ func (h *serviceGCSafepointHandler) GetGCSafePoint(w http.ResponseWriter, r *htt h.rd.JSON(w, http.StatusOK, list) } -// @Tags service_gc_safepoint -// @Summary Delete a service GC safepoint. -// @Param service_id path string true "Service ID" -// @Produce json -// @Success 200 {string} string "Delete service GC safepoint successfully." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /gc/safepoint/{service_id} [delete] -// @Tags rule +// @Tags service_gc_safepoint +// @Summary Delete a service GC safepoint. +// @Param service_id path string true "Service ID" +// @Produce json +// @Success 200 {string} string "Delete service GC safepoint successfully." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /gc/safepoint/{service_id} [delete] +// @Tags rule func (h *serviceGCSafepointHandler) DeleteGCSafePoint(w http.ResponseWriter, r *http.Request) { storage := h.svr.GetStorage() serviceID := mux.Vars(r)["service_id"] diff --git a/server/api/service_middleware.go b/server/api/service_middleware.go index 426399a1d6e..c4489e93fa1 100644 --- a/server/api/service_middleware.go +++ b/server/api/service_middleware.go @@ -45,24 +45,24 @@ func newServiceMiddlewareHandler(svr *server.Server, rd *render.Render) *service } } -// @Tags service_middleware -// @Summary Get Service Middleware config. -// @Produce json -// @Success 200 {object} config.Config -// @Router /service-middleware/config [get] +// @Tags service_middleware +// @Summary Get Service Middleware config. +// @Produce json +// @Success 200 {object} config.Config +// @Router /service-middleware/config [get] func (h *serviceMiddlewareHandler) GetServiceMiddlewareConfig(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, h.svr.GetServiceMiddlewareConfig()) } -// @Tags service_middleware -// @Summary Update some service-middleware's config items. -// @Accept json -// @Param body body object false "json params" -// @Produce json -// @Success 200 {string} string "The config is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /service-middleware/config [post] +// @Tags service_middleware +// @Summary Update some service-middleware's config items. +// @Accept json +// @Param body body object false "json params" +// @Produce json +// @Success 200 {string} string "The config is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /service-middleware/config [post] func (h *serviceMiddlewareHandler) SetServiceMiddlewareConfig(w http.ResponseWriter, r *http.Request) { cfg := h.svr.GetServiceMiddlewareConfig() data, err := io.ReadAll(r.Body) @@ -131,14 +131,14 @@ func (h *serviceMiddlewareHandler) updateAudit(config *config.ServiceMiddlewareC return err } -// @Tags service_middleware -// @Summary update ratelimit config -// @Param body body object string "json params" -// @Produce json -// @Success 200 {string} string -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "config item not found" -// @Router /service-middleware/config/rate-limit [POST] +// @Tags service_middleware +// @Summary update ratelimit config +// @Param body body object string "json params" +// @Produce json +// @Success 200 {string} string +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "config item not found" +// @Router /service-middleware/config/rate-limit [POST] func (h *serviceMiddlewareHandler) SetRatelimitConfig(w http.ResponseWriter, r *http.Request) { var input map[string]interface{} if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { diff --git a/server/api/stats.go b/server/api/stats.go index d0a14e9a051..9a5983f43ba 100644 --- a/server/api/stats.go +++ b/server/api/stats.go @@ -33,13 +33,13 @@ func newStatsHandler(svr *server.Server, rd *render.Render) *statsHandler { } } -// @Tags stats -// @Summary Get region statistics of a specified range. -// @Param start_key query string true "Start key" -// @Param end_key query string true "End key" -// @Produce json -// @Success 200 {object} statistics.RegionStats -// @Router /stats/region [get] +// @Tags stats +// @Summary Get region statistics of a specified range. +// @Param start_key query string true "Start key" +// @Param end_key query string true "End key" +// @Produce json +// @Success 200 {object} statistics.RegionStats +// @Router /stats/region [get] func (h *statsHandler) GetRegionStatus(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) startKey, endKey := r.URL.Query().Get("start_key"), r.URL.Query().Get("end_key") diff --git a/server/api/status.go b/server/api/status.go index 1f318fbb546..12a25554f46 100644 --- a/server/api/status.go +++ b/server/api/status.go @@ -42,10 +42,10 @@ func newStatusHandler(svr *server.Server, rd *render.Render) *statusHandler { } } -// @Summary Get the build info of PD server. -// @Produce json -// @Success 200 {object} status -// @Router /status [get] +// @Summary Get the build info of PD server. +// @Produce json +// @Success 200 {object} status +// @Router /status [get] func (h *statusHandler) GetPDStatus(w http.ResponseWriter, r *http.Request) { version := status{ BuildTS: versioninfo.PDBuildTS, diff --git a/server/api/store.go b/server/api/store.go index 27aa7b59655..9ee784eaa29 100644 --- a/server/api/store.go +++ b/server/api/store.go @@ -140,15 +140,15 @@ func newStoreHandler(handler *server.Handler, rd *render.Render) *storeHandler { } } -// @Tags store -// @Summary Get a store's information. -// @Param id path integer true "Store Id" -// @Produce json -// @Success 200 {object} StoreInfo -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The store does not exist." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /store/{id} [get] +// @Tags store +// @Summary Get a store's information. +// @Param id path integer true "Store Id" +// @Produce json +// @Success 200 {object} StoreInfo +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The store does not exist." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /store/{id} [get] func (h *storeHandler) GetStore(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -168,17 +168,17 @@ func (h *storeHandler) GetStore(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, storeInfo) } -// @Tags store -// @Summary Take down a store from the cluster. -// @Param id path integer true "Store Id" -// @Param force query string true "force" Enums(true, false), when force is true it means the store is physically destroyed and can never up gain -// @Produce json -// @Success 200 {string} string "The store is set as Offline." -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The store does not exist." -// @Failure 410 {string} string "The store has already been removed." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /store/{id} [delete] +// @Tags store +// @Summary Take down a store from the cluster. +// @Param id path integer true "Store Id" +// @Param force query string true "force" Enums(true, false) +// @Produce json +// @Success 200 {string} string "The store is set as Offline." +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The store does not exist." +// @Failure 410 {string} string "The store has already been removed." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /store/{id} [delete] func (h *storeHandler) DeleteStore(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -199,16 +199,16 @@ func (h *storeHandler) DeleteStore(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, "The store is set as Offline.") } -// @Tags store -// @Summary Set the store's state. -// @Param id path integer true "Store Id" -// @Param state query string true "state" Enums(Up, Offline) -// @Produce json -// @Success 200 {string} string "The store's state is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The store does not exist." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /store/{id}/state [post] +// @Tags store +// @Summary Set the store's state. +// @Param id path integer true "Store Id" +// @Param state query string true "state" Enums(Up, Offline) +// @Produce json +// @Success 200 {string} string "The store's state is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The store does not exist." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /store/{id}/state [post] func (h *storeHandler) SetStoreState(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -252,15 +252,15 @@ func (h *storeHandler) responseStoreErr(w http.ResponseWriter, err error, storeI } // FIXME: details of input json body params -// @Tags store -// @Summary Set the store's label. -// @Param id path integer true "Store Id" -// @Param body body object true "Labels in json format" -// @Produce json -// @Success 200 {string} string "The store's label is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /store/{id}/label [post] +// @Tags store +// @Summary Set the store's label. +// @Param id path integer true "Store Id" +// @Param body body object true "Labels in json format" +// @Produce json +// @Success 200 {string} string "The store's label is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /store/{id}/label [post] func (h *storeHandler) SetStoreLabel(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -298,15 +298,15 @@ func (h *storeHandler) SetStoreLabel(w http.ResponseWriter, r *http.Request) { } // FIXME: details of input json body params -// @Tags store -// @Summary Set the store's leader/region weight. -// @Param id path integer true "Store Id" -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "The store's label is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /store/{id}/weight [post] +// @Tags store +// @Summary Set the store's leader/region weight. +// @Param id path integer true "Store Id" +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "The store's label is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /store/{id}/weight [post] func (h *storeHandler) SetStoreWeight(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -351,16 +351,16 @@ func (h *storeHandler) SetStoreWeight(w http.ResponseWriter, r *http.Request) { } // FIXME: details of input json body params -// @Tags store -// @Summary Set the store's limit. -// @Param ttlSecond query integer false "ttl". ttl param is only for BR and lightning now. Don't use it. -// @Param id path integer true "Store Id" -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "The store's label is updated." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /store/{id}/limit [post] +// @Tags store +// @Summary Set the store's limit. +// @Param ttlSecond query integer false "ttl param is only for BR and lightning now. Don't use it." +// @Param id path integer true "Store Id" +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "The store's label is updated." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /store/{id}/limit [post] func (h *storeHandler) SetStoreLimit(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) vars := mux.Vars(r) @@ -435,12 +435,12 @@ func newStoresHandler(handler *server.Handler, rd *render.Render) *storesHandler } } -// @Tags store -// @Summary Remove tombstone records in the cluster. -// @Produce json -// @Success 200 {string} string "Remove tombstone successfully." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores/remove-tombstone [delete] +// @Tags store +// @Summary Remove tombstone records in the cluster. +// @Produce json +// @Success 200 {string} string "Remove tombstone successfully." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /stores/remove-tombstone [delete] func (h *storesHandler) RemoveTombStone(w http.ResponseWriter, r *http.Request) { err := getCluster(r).RemoveTombStoneRecords() if err != nil { @@ -452,16 +452,16 @@ func (h *storesHandler) RemoveTombStone(w http.ResponseWriter, r *http.Request) } // FIXME: details of input json body params -// @Tags store -// @Summary Set limit of all stores in the cluster. -// @Accept json -// @Param ttlSecond query integer false "ttl". ttl param is only for BR and lightning now. Don't use it. -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "Set store limit successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores/limit [post] +// @Tags store +// @Summary Set limit of all stores in the cluster. +// @Accept json +// @Param ttlSecond query integer false "ttl param is only for BR and lightning now. Don't use it." +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Set store limit successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /stores/limit [post] func (h *storesHandler) SetAllStoresLimit(w http.ResponseWriter, r *http.Request) { var input map[string]interface{} if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &input); err != nil { @@ -535,13 +535,13 @@ func (h *storesHandler) SetAllStoresLimit(w http.ResponseWriter, r *http.Request } // FIXME: details of output json body -// @Tags store -// @Summary Get limit of all stores in the cluster. -// @Param include_tombstone query bool false "include Tombstone" default(false) -// @Produce json -// @Success 200 {object} string -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores/limit [get] +// @Tags store +// @Summary Get limit of all stores in the cluster. +// @Param include_tombstone query bool false "include Tombstone" default(false) +// @Produce json +// @Success 200 {object} string +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /stores/limit [get] func (h *storesHandler) GetAllStoresLimit(w http.ResponseWriter, r *http.Request) { limits := h.GetScheduleConfig().StoreLimit includeTombstone := false @@ -569,15 +569,15 @@ func (h *storesHandler) GetAllStoresLimit(w http.ResponseWriter, r *http.Request h.rd.JSON(w, http.StatusOK, limits) } -// @Tags store -// @Summary Set limit scene in the cluster. -// @Accept json -// @Param body body storelimit.Scene true "Store limit scene" -// @Produce json -// @Success 200 {string} string "Set store limit scene successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores/limit/scene [post] +// @Tags store +// @Summary Set limit scene in the cluster. +// @Accept json +// @Param body body storelimit.Scene true "Store limit scene" +// @Produce json +// @Success 200 {string} string "Set store limit scene successfully." +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /stores/limit/scene [post] func (h *storesHandler) SetStoreLimitScene(w http.ResponseWriter, r *http.Request) { typeName := r.URL.Query().Get("type") typeValue, err := parseStoreLimitType(typeName) @@ -593,11 +593,11 @@ func (h *storesHandler) SetStoreLimitScene(w http.ResponseWriter, r *http.Reques h.rd.JSON(w, http.StatusOK, "Set store limit scene successfully.") } -// @Tags store -// @Summary Get limit scene in the cluster. -// @Produce json -// @Success 200 {string} string "Get store limit scene successfully." -// @Router /stores/limit/scene [get] +// @Tags store +// @Summary Get limit scene in the cluster. +// @Produce json +// @Success 200 {string} string "Get store limit scene successfully." +// @Router /stores/limit/scene [get] func (h *storesHandler) GetStoreLimitScene(w http.ResponseWriter, r *http.Request) { typeName := r.URL.Query().Get("type") typeValue, err := parseStoreLimitType(typeName) @@ -618,13 +618,13 @@ type Progress struct { LeftSeconds float64 `json:"left_seconds"` } -// @Tags stores -// @Summary Get store progress in the cluster. -// @Produce json -// @Success 200 {object} Progress -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores/progress [get] +// @Tags stores +// @Summary Get store progress in the cluster. +// @Produce json +// @Success 200 {object} Progress +// @Failure 400 {string} string "The input is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /stores/progress [get] func (h *storesHandler) GetStoresProgress(w http.ResponseWriter, r *http.Request) { if v := r.URL.Query().Get("id"); v != "" { storeID, err := strconv.ParseUint(v, 10, 64) @@ -668,13 +668,13 @@ func (h *storesHandler) GetStoresProgress(w http.ResponseWriter, r *http.Request h.rd.JSON(w, http.StatusBadRequest, "need query parameters") } -// @Tags store -// @Summary Get stores in the cluster. -// @Param state query array true "Specify accepted store states." -// @Produce json -// @Success 200 {object} StoresInfo -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores [get] +// @Tags store +// @Summary Get stores in the cluster. +// @Param state query array true "Specify accepted store states." +// @Produce json +// @Success 200 {object} StoresInfo +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /stores [get] func (h *storesHandler) GetStores(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) stores := rc.GetMetaStores() diff --git a/server/api/trend.go b/server/api/trend.go index 537167705fc..e100b31cbd9 100644 --- a/server/api/trend.go +++ b/server/api/trend.go @@ -80,14 +80,14 @@ func newTrendHandler(s *server.Server, rd *render.Render) *trendHandler { } } -// @Tags trend -// @Summary Get the growth and changes of data in the most recent period of time. -// @Param from query integer false "From Unix timestamp" -// @Produce json -// @Success 200 {object} Trend -// @Failure 400 {string} string "The request is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /trend [get] +// @Tags trend +// @Summary Get the growth and changes of data in the most recent period of time. +// @Param from query integer false "From Unix timestamp" +// @Produce json +// @Success 200 {object} Trend +// @Failure 400 {string} string "The request is invalid." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /trend [get] func (h *trendHandler) GetTrend(w http.ResponseWriter, r *http.Request) { var from time.Time if fromStr := r.URL.Query()["from"]; len(fromStr) > 0 { diff --git a/server/api/tso.go b/server/api/tso.go index df7db500899..cbe30887b73 100644 --- a/server/api/tso.go +++ b/server/api/tso.go @@ -35,17 +35,17 @@ func newTSOHandler(svr *server.Server, rd *render.Render) *tsoHandler { } } -// @Tags tso -// @Summary Transfer Local TSO Allocator -// @Accept json -// @Param name path string true "PD server name" -// @Param body body object true "json params" -// @Produce json -// @Success 200 {string} string "The transfer command is submitted." -// @Failure 400 {string} string "The input is invalid." -// @Failure 404 {string} string "The member does not exist." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /tso/allocator/transfer/{name} [post] +// @Tags tso +// @Summary Transfer Local TSO Allocator +// @Accept json +// @Param name path string true "PD server name" +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "The transfer command is submitted." +// @Failure 400 {string} string "The input is invalid." +// @Failure 404 {string} string "The member does not exist." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /tso/allocator/transfer/{name} [post] func (h *tsoHandler) TransferLocalTSOAllocator(w http.ResponseWriter, r *http.Request) { members, membersErr := getMembers(h.svr) if membersErr != nil { diff --git a/server/api/unsafe_operation.go b/server/api/unsafe_operation.go index 83912c120a0..c45771619b0 100644 --- a/server/api/unsafe_operation.go +++ b/server/api/unsafe_operation.go @@ -35,15 +35,15 @@ func newUnsafeOperationHandler(svr *server.Server, rd *render.Render) *unsafeOpe } } -// @Tags unsafe -// @Summary Remove failed stores unsafely. -// @Accept json -// @Param body body object true "json params" -// @Produce json +// @Tags unsafe +// @Summary Remove failed stores unsafely. +// @Accept json +// @Param body body object true "json params" +// @Produce json // Success 200 {string} string "Request has been accepted." // Failure 400 {string} string "The input is invalid." // Failure 500 {string} string "PD server failed to proceed the request." -// @Router /admin/unsafe/remove-failed-stores [POST] +// @Router /admin/unsafe/remove-failed-stores [POST] func (h *unsafeOperationHandler) RemoveFailedStores(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) var input map[string]interface{} @@ -72,11 +72,11 @@ func (h *unsafeOperationHandler) RemoveFailedStores(w http.ResponseWriter, r *ht h.rd.JSON(w, http.StatusOK, "Request has been accepted.") } -// @Tags unsafe -// @Summary Show the current status of failed stores removal. -// @Produce json +// @Tags unsafe +// @Summary Show the current status of failed stores removal. +// @Produce json // Success 200 {object} []StageOutput -// @Router /admin/unsafe/remove-failed-stores/show [GET] +// @Router /admin/unsafe/remove-failed-stores/show [GET] func (h *unsafeOperationHandler) GetFailedStoresRemovalStatus(w http.ResponseWriter, r *http.Request) { rc := getCluster(r) h.rd.JSON(w, http.StatusOK, rc.GetUnsafeRecoveryController().Show()) diff --git a/server/api/version.go b/server/api/version.go index 2a1d7e36fbd..38e5a12d8c2 100644 --- a/server/api/version.go +++ b/server/api/version.go @@ -39,10 +39,10 @@ func newVersionHandler(rd *render.Render) *versionHandler { } } -// @Summary Get the version of PD server. -// @Produce json -// @Success 200 {object} version -// @Router /version [get] +// @Summary Get the version of PD server. +// @Produce json +// @Success 200 {object} version +// @Router /version [get] func (h *versionHandler) GetVersion(w http.ResponseWriter, r *http.Request) { version := &version{ Version: versioninfo.PDReleaseVersion, diff --git a/tests/client/go.sum b/tests/client/go.sum index b9d76d704a8..ad49f3bc358 100644 --- a/tests/client/go.sum +++ b/tests/client/go.sum @@ -79,6 +79,7 @@ github.com/corona10/goimagehash v1.0.2/go.mod h1:/l9umBhvcHQXVtQO1V6Gp1yD20STawk github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -133,20 +134,24 @@ github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= @@ -287,6 +292,8 @@ github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9q github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/joomcode/errorx v1.0.1 h1:CalpDWz14ZHd68fIqluJasJosAewpz2TFaJALrUxjrk= github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -308,13 +315,13 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -326,8 +333,9 @@ github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -371,6 +379,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= @@ -515,8 +525,9 @@ github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba h1:lUPlXKqgbqT github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= -github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476 h1:UjnSXdNPIG+5FJ6xLQODEdk7gSnJlMldu3sPAxxCO+4= github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc= +github.com/swaggo/swag v1.8.3 h1:3pZSSCQ//gAH88lfmxM3Cd1+JCsxV8Md6f36b9hrZ5s= +github.com/swaggo/swag v1.8.3/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/thoas/go-funk v0.8.0 h1:JP9tKSvnpFVclYgDM0Is7FD9M4fhPvqA0s0BsXmzSRQ= @@ -549,6 +560,7 @@ github.com/unrolled/render v1.0.1 h1:VDDnQQVfBMsOsp3VaCJszSO0nkBIVEYoPWeRThk9spY github.com/unrolled/render v1.0.1/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= @@ -561,6 +573,7 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -614,8 +627,9 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg= golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -629,8 +643,9 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -653,8 +668,12 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -700,16 +719,23 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= @@ -743,8 +769,9 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -779,8 +806,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -804,6 +832,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.0.6 h1:mA0XRPjIKi4bkE9nv+NKs6qj6QWOchqUSdWOcpd3x1E= From 3d53b06e1664ae176f33e4ee7636f077b8447da8 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Tue, 21 Jun 2022 13:30:36 +0800 Subject: [PATCH 33/35] Makefile: increase the golangci-lint timeout (#5192) close tikv/pd#5078 Increase the golangci-lint timeout. Signed-off-by: JmPotato Co-authored-by: Ti Chi Robot --- .golangci.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 778b8d2b047..bfb9954fff9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - deadline: 120s + timeout: 3m linters: enable: - misspell diff --git a/Makefile b/Makefile index 7ac146e39eb..e76488e2b2d 100644 --- a/Makefile +++ b/Makefile @@ -148,7 +148,7 @@ static: install-tools @ echo "gofmt ..." @ gofmt -s -l -d $(PACKAGE_DIRECTORIES) 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." - @ golangci-lint run $(PACKAGE_DIRECTORIES) + @ golangci-lint run --verbose $(PACKAGE_DIRECTORIES) @ echo "revive ..." @ revive -formatter friendly -config revive.toml $(PACKAGES) From 3d1e6c5336fecc6ae299b14aa0f805a01e557b26 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Tue, 21 Jun 2022 15:06:37 +0800 Subject: [PATCH 34/35] bucket: migrate test framework to testify (#5195) ref tikv/pd#4813 Signed-off-by: lhy1024 Co-authored-by: Ti Chi Robot --- .../buckets/hot_bucket_cache_test.go | 83 +++++++++---------- .../buckets/hot_bucket_task_test.go | 58 ++++++------- 2 files changed, 69 insertions(+), 72 deletions(-) diff --git a/server/statistics/buckets/hot_bucket_cache_test.go b/server/statistics/buckets/hot_bucket_cache_test.go index 7c8cc85e99c..a55a505957b 100644 --- a/server/statistics/buckets/hot_bucket_cache_test.go +++ b/server/statistics/buckets/hot_bucket_cache_test.go @@ -18,19 +18,12 @@ import ( "context" "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testHotBucketCache{}) - -type testHotBucketCache struct{} - -func (t *testHotBucketCache) TestPutItem(c *C) { +func TestPutItem(t *testing.T) { + re := require.New(t) cache := NewBucketsCache(context.Background()) testdata := []struct { regionID uint64 @@ -90,17 +83,18 @@ func (t *testHotBucketCache) TestPutItem(c *C) { }} for _, v := range testdata { bucket := convertToBucketTreeItem(newTestBuckets(v.regionID, v.version, v.keys, 10)) - c.Assert(bucket.GetStartKey(), BytesEquals, v.keys[0]) - c.Assert(bucket.GetEndKey(), BytesEquals, v.keys[len(v.keys)-1]) + re.Equal(v.keys[0], bucket.GetStartKey()) + re.Equal(v.keys[len(v.keys)-1], bucket.GetEndKey()) cache.putItem(bucket, cache.getBucketsByKeyRange(bucket.GetStartKey(), bucket.GetEndKey())) - c.Assert(cache.bucketsOfRegion, HasLen, v.regionCount) - c.Assert(cache.tree.Len(), Equals, v.treeLen) - c.Assert(cache.bucketsOfRegion[v.regionID], NotNil) - c.Assert(cache.getBucketsByKeyRange([]byte("10"), nil), NotNil) + re.Len(cache.bucketsOfRegion, v.regionCount) + re.Equal(v.treeLen, cache.tree.Len()) + re.NotNil(cache.bucketsOfRegion[v.regionID]) + re.NotNil(cache.getBucketsByKeyRange([]byte("10"), nil)) } } -func (t *testHotBucketCache) TestConvertToBucketTreeStat(c *C) { +func TestConvertToBucketTreeStat(t *testing.T) { + re := require.New(t) buckets := &metapb.Buckets{ RegionId: 1, Version: 0, @@ -116,14 +110,15 @@ func (t *testHotBucketCache) TestConvertToBucketTreeStat(c *C) { PeriodInMs: 1000, } item := convertToBucketTreeItem(buckets) - c.Assert(item.startKey, BytesEquals, []byte{'1'}) - c.Assert(item.endKey, BytesEquals, []byte{'5'}) - c.Assert(item.regionID, Equals, uint64(1)) - c.Assert(item.version, Equals, uint64(0)) - c.Assert(item.stats, HasLen, 4) + re.Equal([]byte{'1'}, item.startKey) + re.Equal([]byte{'5'}, item.endKey) + re.Equal(uint64(1), item.regionID) + re.Equal(uint64(0), item.version) + re.Len(item.stats, 4) } -func (t *testHotBucketCache) TestGetBucketsByKeyRange(c *C) { +func TestGetBucketsByKeyRange(t *testing.T) { + re := require.New(t) cache := NewBucketsCache(context.Background()) bucket1 := newTestBuckets(1, 1, [][]byte{[]byte(""), []byte("015")}, 0) bucket2 := newTestBuckets(2, 1, [][]byte{[]byte("015"), []byte("020")}, 0) @@ -131,15 +126,16 @@ func (t *testHotBucketCache) TestGetBucketsByKeyRange(c *C) { cache.putItem(cache.checkBucketsFlow(bucket1)) cache.putItem(cache.checkBucketsFlow(bucket2)) cache.putItem(cache.checkBucketsFlow(bucket3)) - c.Assert(cache.getBucketsByKeyRange([]byte(""), []byte("100")), HasLen, 3) - c.Assert(cache.getBucketsByKeyRange([]byte("030"), []byte("100")), HasLen, 1) - c.Assert(cache.getBucketsByKeyRange([]byte("010"), []byte("030")), HasLen, 3) - c.Assert(cache.getBucketsByKeyRange([]byte("015"), []byte("020")), HasLen, 1) - c.Assert(cache.getBucketsByKeyRange([]byte("001"), []byte("")), HasLen, 3) - c.Assert(cache.bucketsOfRegion, HasLen, 3) + re.Len(cache.getBucketsByKeyRange([]byte(""), []byte("100")), 3) + re.Len(cache.getBucketsByKeyRange([]byte("030"), []byte("100")), 1) + re.Len(cache.getBucketsByKeyRange([]byte("010"), []byte("030")), 3) + re.Len(cache.getBucketsByKeyRange([]byte("015"), []byte("020")), 1) + re.Len(cache.getBucketsByKeyRange([]byte("001"), []byte("")), 3) + re.Len(cache.bucketsOfRegion, 3) } -func (t *testHotBucketCache) TestInherit(c *C) { +func TestInherit(t *testing.T) { + re := require.New(t) originBucketItem := convertToBucketTreeItem(newTestBuckets(1, 1, [][]byte{[]byte(""), []byte("20"), []byte("50"), []byte("")}, 0)) originBucketItem.stats[0].HotDegree = 3 originBucketItem.stats[1].HotDegree = 2 @@ -173,15 +169,15 @@ func (t *testHotBucketCache) TestInherit(c *C) { for _, v := range testdata { buckets := convertToBucketTreeItem(v.buckets) buckets.inherit([]*BucketTreeItem{originBucketItem}) - c.Assert(buckets.stats, HasLen, len(v.expect)) + re.Len(buckets.stats, len(v.expect)) for k, v := range v.expect { - c.Assert(buckets.stats[k].HotDegree, Equals, v) + re.Equal(v, buckets.stats[k].HotDegree) } } } -func (t *testHotBucketCache) TestBucketTreeItemClone(c *C) { - // bucket range: [010,020][020,100] +func TestBucketTreeItemClone(t *testing.T) { + re := require.New(t) origin := convertToBucketTreeItem(newTestBuckets(1, 1, [][]byte{[]byte("010"), []byte("020"), []byte("100")}, uint64(0))) testdata := []struct { startKey []byte @@ -221,30 +217,31 @@ func (t *testHotBucketCache) TestBucketTreeItemClone(c *C) { }} for _, v := range testdata { copy := origin.cloneBucketItemByRange(v.startKey, v.endKey) - c.Assert(copy.startKey, BytesEquals, v.startKey) - c.Assert(copy.endKey, BytesEquals, v.endKey) - c.Assert(copy.stats, HasLen, v.count) + re.Equal(v.startKey, copy.startKey) + re.Equal(v.endKey, copy.endKey) + re.Len(copy.stats, v.count) if v.count > 0 && v.strict { - c.Assert(copy.stats[0].StartKey, BytesEquals, v.startKey) - c.Assert(copy.stats[len(copy.stats)-1].EndKey, BytesEquals, v.endKey) + re.Equal(v.startKey, copy.stats[0].StartKey) + re.Equal(v.endKey, copy.stats[len(copy.stats)-1].EndKey) } } } -func (t *testHotBucketCache) TestCalculateHotDegree(c *C) { +func TestCalculateHotDegree(t *testing.T) { + re := require.New(t) origin := convertToBucketTreeItem(newTestBuckets(1, 1, [][]byte{[]byte("010"), []byte("100")}, uint64(0))) origin.calculateHotDegree() - c.Assert(origin.stats[0].HotDegree, Equals, -1) + re.Equal(-1, origin.stats[0].HotDegree) // case1: the dimension of read will be hot origin.stats[0].Loads = []uint64{minHotThresholds[0] + 1, minHotThresholds[1] + 1, 0, 0, 0, 0} origin.calculateHotDegree() - c.Assert(origin.stats[0].HotDegree, Equals, 0) + re.Equal(0, origin.stats[0].HotDegree) // case1: the dimension of write will be hot origin.stats[0].Loads = []uint64{0, 0, 0, minHotThresholds[3] + 1, minHotThresholds[4] + 1, 0} origin.calculateHotDegree() - c.Assert(origin.stats[0].HotDegree, Equals, 1) + re.Equal(1, origin.stats[0].HotDegree) } func newTestBuckets(regionID uint64, version uint64, keys [][]byte, flow uint64) *metapb.Buckets { diff --git a/server/statistics/buckets/hot_bucket_task_test.go b/server/statistics/buckets/hot_bucket_task_test.go index a5fe0d7ad8c..49f60116c9d 100644 --- a/server/statistics/buckets/hot_bucket_task_test.go +++ b/server/statistics/buckets/hot_bucket_task_test.go @@ -18,24 +18,21 @@ import ( "context" "math" "strconv" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testHotBucketTaskCache{}) - -type testHotBucketTaskCache struct { -} - func getAllBucketStats(ctx context.Context, hotCache *HotBucketCache) map[uint64][]*BucketStat { task := NewCollectBucketStatsTask(minHotDegree) hotCache.CheckAsync(task) return task.WaitRet(ctx) } -func (s *testHotBucketTaskCache) TestColdHot(c *C) { +func TestColdHot(t *testing.T) { + re := require.New(t) ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() hotCache := NewBucketsCache(ctx) @@ -52,60 +49,63 @@ func (s *testHotBucketTaskCache) TestColdHot(c *C) { for _, v := range testdata { for i := 0; i < 20; i++ { task := NewCheckPeerTask(v.buckets) - c.Assert(hotCache.CheckAsync(task), IsTrue) + re.True(hotCache.CheckAsync(task)) hotBuckets := getAllBucketStats(ctx, hotCache) time.Sleep(time.Millisecond * 10) item := hotBuckets[v.buckets.RegionId] - c.Assert(item, NotNil) + re.NotNil(item) if v.isHot { - c.Assert(item[0].HotDegree, Equals, i+1) + re.Equal(i+1, item[0].HotDegree) } else { - c.Assert(item[0].HotDegree, Equals, -i-1) + re.Equal(-i-1, item[0].HotDegree) } } } } -func (s *testHotBucketTaskCache) TestCheckBucketsTask(c *C) { +func TestCheckBucketsTask(t *testing.T) { + re := require.New(t) ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() hotCache := NewBucketsCache(ctx) // case1: add bucket successfully buckets := newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20"), []byte("30")}, 0) task := NewCheckPeerTask(buckets) - c.Assert(hotCache.CheckAsync(task), IsTrue) + re.True(hotCache.CheckAsync(task)) time.Sleep(time.Millisecond * 10) hotBuckets := getAllBucketStats(ctx, hotCache) - c.Assert(hotBuckets, HasLen, 1) + re.Len(hotBuckets, 1) item := hotBuckets[uint64(1)] - c.Assert(item, NotNil) - c.Assert(item, HasLen, 2) - c.Assert(item[0].HotDegree, Equals, -1) - c.Assert(item[1].HotDegree, Equals, -1) + re.NotNil(item) + + re.Len(item, 2) + re.Equal(-1, item[0].HotDegree) + re.Equal(-1, item[1].HotDegree) // case2: add bucket successful and the hot degree should inherit from the old one. buckets = newTestBuckets(2, 1, [][]byte{[]byte("20"), []byte("30")}, 0) task = NewCheckPeerTask(buckets) - c.Assert(hotCache.CheckAsync(task), IsTrue) + re.True(hotCache.CheckAsync(task)) hotBuckets = getAllBucketStats(ctx, hotCache) time.Sleep(time.Millisecond * 10) item = hotBuckets[uint64(2)] - c.Assert(item, HasLen, 1) - c.Assert(item[0].HotDegree, Equals, -2) + re.Len(item, 1) + re.Equal(-2, item[0].HotDegree) // case3:add bucket successful and the hot degree should inherit from the old one. buckets = newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20")}, 0) task = NewCheckPeerTask(buckets) - c.Assert(hotCache.CheckAsync(task), IsTrue) + re.True(hotCache.CheckAsync(task)) hotBuckets = getAllBucketStats(ctx, hotCache) time.Sleep(time.Millisecond * 10) item = hotBuckets[uint64(1)] - c.Assert(item, HasLen, 1) - c.Assert(item[0].HotDegree, Equals, -2) + re.Len(item, 1) + re.Equal(-2, item[0].HotDegree) } -func (s *testHotBucketTaskCache) TestCollectBucketStatsTask(c *C) { +func TestCollectBucketStatsTask(t *testing.T) { + re := require.New(t) ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() hotCache := NewBucketsCache(ctx) @@ -117,11 +117,11 @@ func (s *testHotBucketTaskCache) TestCollectBucketStatsTask(c *C) { } time.Sleep(time.Millisecond * 10) task := NewCollectBucketStatsTask(-100) - c.Assert(hotCache.CheckAsync(task), IsTrue) + re.True(hotCache.CheckAsync(task)) stats := task.WaitRet(ctx) - c.Assert(stats, HasLen, 10) + re.Len(stats, 10) task = NewCollectBucketStatsTask(1) - c.Assert(hotCache.CheckAsync(task), IsTrue) + re.True(hotCache.CheckAsync(task)) stats = task.WaitRet(ctx) - c.Assert(stats, HasLen, 0) + re.Len(stats, 0) } From b100049b4548ac84c8da2566cb65b1f204c59a10 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Tue, 21 Jun 2022 21:14:37 +0800 Subject: [PATCH 35/35] *: replace `testcase` with `testCase` (#5206) ref tikv/pd#4399 Signed-off-by: lhy1024 --- server/core/region_test.go | 24 +++++++-------- server/schedule/checker/rule_checker_test.go | 12 ++++---- .../placement/region_rule_cache_test.go | 14 ++++----- server/schedule/placement/rule_test.go | 10 +++---- server/schedule/region_scatterer_test.go | 30 +++++++++---------- server/schedulers/hot_region_test.go | 24 +++++++-------- server/schedulers/scheduler_test.go | 10 +++---- 7 files changed, 62 insertions(+), 62 deletions(-) diff --git a/server/core/region_test.go b/server/core/region_test.go index edf55c8ac7b..a86a8490bd8 100644 --- a/server/core/region_test.go +++ b/server/core/region_test.go @@ -67,7 +67,7 @@ func TestNeedMerge(t *testing.T) { func TestSortedEqual(t *testing.T) { re := require.New(t) - testcases := []struct { + testCases := []struct { idsA []int idsB []int isEqual bool @@ -146,7 +146,7 @@ func TestSortedEqual(t *testing.T) { return peers } // test NewRegionInfo - for _, test := range testcases { + for _, test := range testCases { regionA := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(test.idsA)}, nil) regionB := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(test.idsB)}, nil) re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) @@ -154,7 +154,7 @@ func TestSortedEqual(t *testing.T) { } // test RegionFromHeartbeat - for _, test := range testcases { + for _, test := range testCases { regionA := RegionFromHeartbeat(&pdpb.RegionHeartbeatRequest{ Region: &metapb.Region{Id: 100, Peers: pickPeers(test.idsA)}, DownPeers: pickPeerStats(test.idsA), @@ -173,7 +173,7 @@ func TestSortedEqual(t *testing.T) { // test Clone region := NewRegionInfo(meta, meta.Peers[0]) - for _, test := range testcases { + for _, test := range testCases { downPeersA := pickPeerStats(test.idsA) downPeersB := pickPeerStats(test.idsB) pendingPeersA := pickPeers(test.idsA) @@ -190,7 +190,7 @@ func TestInherit(t *testing.T) { re := require.New(t) // size in MB // case for approximateSize - testcases := []struct { + testCases := []struct { originExists bool originSize uint64 size uint64 @@ -202,7 +202,7 @@ func TestInherit(t *testing.T) { {true, 1, 2, 2}, {true, 2, 0, 2}, } - for _, test := range testcases { + for _, test := range testCases { var origin *RegionInfo if test.originExists { origin = NewRegionInfo(&metapb.Region{Id: 100}, nil) @@ -240,7 +240,7 @@ func TestInherit(t *testing.T) { func TestRegionRoundingFlow(t *testing.T) { re := require.New(t) - testcases := []struct { + testCases := []struct { flow uint64 digit int expect uint64 @@ -254,7 +254,7 @@ func TestRegionRoundingFlow(t *testing.T) { {252623, math.MaxInt64, 0}, {252623, math.MinInt64, 252623}, } - for _, test := range testcases { + for _, test := range testCases { r := NewRegionInfo(&metapb.Region{Id: 100}, nil, WithFlowRoundByDigit(test.digit)) r.readBytes = test.flow r.writtenBytes = test.flow @@ -264,7 +264,7 @@ func TestRegionRoundingFlow(t *testing.T) { func TestRegionWriteRate(t *testing.T) { re := require.New(t) - testcases := []struct { + testCases := []struct { bytes uint64 keys uint64 interval uint64 @@ -280,7 +280,7 @@ func TestRegionWriteRate(t *testing.T) { {0, 0, 500, 0, 0}, {10, 3, 500, 0, 0}, } - for _, test := range testcases { + for _, test := range testCases { r := NewRegionInfo(&metapb.Region{Id: 100}, nil, SetWrittenBytes(test.bytes), SetWrittenKeys(test.keys), SetReportInterval(test.interval)) bytesRate, keysRate := r.GetWriteRate() re.Equal(test.expectBytesRate, bytesRate) @@ -304,7 +304,7 @@ func TestNeedSync(t *testing.T) { } region := NewRegionInfo(meta, meta.Peers[0]) - testcases := []struct { + testCases := []struct { optionsA []RegionCreateOption optionsB []RegionCreateOption needSync bool @@ -357,7 +357,7 @@ func TestNeedSync(t *testing.T) { }, } - for _, test := range testcases { + for _, test := range testCases { regionA := region.Clone(test.optionsA...) regionB := region.Clone(test.optionsB...) _, _, _, needSync := RegionGuide(regionA, regionB) diff --git a/server/schedule/checker/rule_checker_test.go b/server/schedule/checker/rule_checker_test.go index ea9a369348e..3eb01d3655a 100644 --- a/server/schedule/checker/rule_checker_test.go +++ b/server/schedule/checker/rule_checker_test.go @@ -605,7 +605,7 @@ func (suite *ruleCheckerTestSuite) TestRuleCache() { region = region.Clone(core.WithIncConfVer(), core.WithIncVersion()) suite.Nil(suite.rc.Check(region)) - testcases := []struct { + testCases := []struct { name string region *core.RegionInfo stillCached bool @@ -643,15 +643,15 @@ func (suite *ruleCheckerTestSuite) TestRuleCache() { stillCached: false, }, } - for _, testcase := range testcases { - suite.T().Log(testcase.name) - if testcase.stillCached { + for _, testCase := range testCases { + suite.T().Log(testCase.name) + if testCase.stillCached { suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldCache", "return(true)")) - suite.rc.Check(testcase.region) + suite.rc.Check(testCase.region) suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldCache")) } else { suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache", "return(true)")) - suite.rc.Check(testcase.region) + suite.rc.Check(testCase.region) suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache")) } } diff --git a/server/schedule/placement/region_rule_cache_test.go b/server/schedule/placement/region_rule_cache_test.go index f38d13eba87..7c578a11e34 100644 --- a/server/schedule/placement/region_rule_cache_test.go +++ b/server/schedule/placement/region_rule_cache_test.go @@ -30,7 +30,7 @@ func TestRegionRuleFitCache(t *testing.T) { originRules := addExtraRules(0) originStores := mockStores(3) cache := mockRegionRuleFitCache(originRegion, originRules, originStores) - testcases := []struct { + testCases := []struct { name string region *core.RegionInfo rules []*Rule @@ -175,13 +175,13 @@ func TestRegionRuleFitCache(t *testing.T) { unchanged: false, }, } - for _, testcase := range testcases { - t.Log(testcase.name) - re.Equal(testcase.unchanged, cache.IsUnchanged(testcase.region, testcase.rules, mockStores(3))) + for _, testCase := range testCases { + t.Log(testCase.name) + re.Equal(testCase.unchanged, cache.IsUnchanged(testCase.region, testCase.rules, mockStores(3))) } - for _, testcase := range testcases { - t.Log(testcase.name) - re.Equal(false, cache.IsUnchanged(testcase.region, testcase.rules, mockStoresNoHeartbeat(3))) + for _, testCase := range testCases { + t.Log(testCase.name) + re.Equal(false, cache.IsUnchanged(testCase.region, testCase.rules, mockStoresNoHeartbeat(3))) } // Invalid Input4 re.False(cache.IsUnchanged(mockRegion(3, 0), addExtraRules(0), nil)) diff --git a/server/schedule/placement/rule_test.go b/server/schedule/placement/rule_test.go index 94f623ef93d..ba2d1bf50e4 100644 --- a/server/schedule/placement/rule_test.go +++ b/server/schedule/placement/rule_test.go @@ -132,7 +132,7 @@ func TestBuildRuleList(t *testing.T) { Count: 5, } - testcases := []struct { + testCases := []struct { name string rules map[[2]string]*Rule expect ruleList @@ -178,11 +178,11 @@ func TestBuildRuleList(t *testing.T) { }, } - for _, testcase := range testcases { - t.Log(testcase.name) - config := &ruleConfig{rules: testcase.rules} + for _, testCase := range testCases { + t.Log(testCase.name) + config := &ruleConfig{rules: testCase.rules} result, err := buildRuleList(config) re.NoError(err) - re.Equal(testcase.expect.ranges, result.ranges) + re.Equal(testCase.expect.ranges, result.ranges) } } diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 373681f9f11..bfbd99f1e4e 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -255,7 +255,7 @@ func (s *testScatterRegionSuite) TestScatterCheck(c *C) { for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) } - testcases := []struct { + testCases := []struct { name string checkRegion *core.RegionInfo needFix bool @@ -276,11 +276,11 @@ func (s *testScatterRegionSuite) TestScatterCheck(c *C) { needFix: true, }, } - for _, testcase := range testcases { - c.Logf(testcase.name) + for _, testCase := range testCases { + c.Logf(testCase.name) scatterer := NewRegionScatterer(ctx, tc) - _, err := scatterer.Scatter(testcase.checkRegion, "") - if testcase.needFix { + _, err := scatterer.Scatter(testCase.checkRegion, "") + if testCase.needFix { c.Assert(err, NotNil) c.Assert(tc.CheckRegionUnderSuspect(1), IsTrue) } else { @@ -303,7 +303,7 @@ func (s *testScatterRegionSuite) TestScatterGroupInConcurrency(c *C) { tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute) } - testcases := []struct { + testCases := []struct { name string groupCount int }{ @@ -322,12 +322,12 @@ func (s *testScatterRegionSuite) TestScatterGroupInConcurrency(c *C) { } // We send scatter interweave request for each group to simulate scattering multiple region groups in concurrency. - for _, testcase := range testcases { - c.Logf(testcase.name) + for _, testCase := range testCases { + c.Logf(testCase.name) scatterer := NewRegionScatterer(ctx, tc) regionID := 1 for i := 0; i < 100; i++ { - for j := 0; j < testcase.groupCount; j++ { + for j := 0; j < testCase.groupCount; j++ { scatterer.scatterRegion(tc.AddLeaderRegion(uint64(regionID), 1, 2, 3), fmt.Sprintf("group-%v", j)) regionID++ @@ -335,7 +335,7 @@ func (s *testScatterRegionSuite) TestScatterGroupInConcurrency(c *C) { } checker := func(ss *selectedStores, expected uint64, delta float64) { - for i := 0; i < testcase.groupCount; i++ { + for i := 0; i < testCase.groupCount; i++ { // comparing the leader distribution group := fmt.Sprintf("group-%v", i) max := uint64(0) @@ -369,7 +369,7 @@ func (s *testScatterRegionSuite) TestScattersGroup(c *C) { for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) } - testcases := []struct { + testCases := []struct { name string failure bool }{ @@ -383,15 +383,15 @@ func (s *testScatterRegionSuite) TestScattersGroup(c *C) { }, } group := "group" - for _, testcase := range testcases { + for _, testCase := range testCases { scatterer := NewRegionScatterer(ctx, tc) regions := map[uint64]*core.RegionInfo{} for i := 1; i <= 100; i++ { regions[uint64(i)] = tc.AddLeaderRegion(uint64(i), 1, 2, 3) } - c.Log(testcase.name) + c.Log(testCase.name) failures := map[uint64]error{} - if testcase.failure { + if testCase.failure { c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/scatterFail", `return(true)`), IsNil) } @@ -412,7 +412,7 @@ func (s *testScatterRegionSuite) TestScattersGroup(c *C) { c.Assert(min, LessEqual, uint64(20)) c.Assert(max, GreaterEqual, uint64(20)) c.Assert(max-min, LessEqual, uint64(3)) - if testcase.failure { + if testCase.failure { c.Assert(failures, HasLen, 1) _, ok := failures[1] c.Assert(ok, IsTrue) diff --git a/server/schedulers/hot_region_test.go b/server/schedulers/hot_region_test.go index 66ed9ec3c9c..a0abd95e2ba 100644 --- a/server/schedulers/hot_region_test.go +++ b/server/schedulers/hot_region_test.go @@ -1479,7 +1479,7 @@ func addRegionLeaderReadInfo(tc *mockcluster.Cluster, regions []testRegionInfo) } func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { - testcases := []struct { + testCases := []struct { kind statistics.RWType onlyLeader bool DegreeAfterTransferLeader int @@ -1501,7 +1501,7 @@ func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { }, } - for _, testcase := range testcases { + for _, testCase := range testCases { ctx, cancel := context.WithCancel(context.Background()) opt := config.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) @@ -1512,8 +1512,8 @@ func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { c.Assert(err, IsNil) hb := sche.(*hotScheduler) heartbeat := tc.AddLeaderRegionWithWriteInfo - if testcase.kind == statistics.Read { - if testcase.onlyLeader { + if testCase.kind == statistics.Read { + if testCase.onlyLeader { heartbeat = tc.AddRegionLeaderWithReadInfo } else { heartbeat = tc.AddRegionWithReadInfo @@ -1522,7 +1522,7 @@ func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { tc.AddRegionStore(2, 20) tc.UpdateStorageReadStats(2, 9.5*MB*statistics.StoreHeartBeatReportInterval, 9.5*MB*statistics.StoreHeartBeatReportInterval) reportInterval := uint64(statistics.WriteReportInterval) - if testcase.kind == statistics.Read { + if testCase.kind == statistics.Read { reportInterval = uint64(statistics.ReadReportInterval) } // hot degree increase @@ -1537,15 +1537,15 @@ func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { items = heartbeat(1, 2, 512*KB*reportInterval, 0, 0, reportInterval, []uint64{1, 3}, 1) for _, item := range items { if item.StoreID == 2 { - c.Check(item.HotDegree, Equals, testcase.DegreeAfterTransferLeader) + c.Check(item.HotDegree, Equals, testCase.DegreeAfterTransferLeader) } } - if testcase.DegreeAfterTransferLeader >= 3 { + if testCase.DegreeAfterTransferLeader >= 3 { // try schedule - hb.prepareForBalance(testcase.kind, tc) - leaderSolver := newBalanceSolver(hb, tc, testcase.kind, transferLeader) - leaderSolver.cur = &solution{srcStore: hb.stLoadInfos[toResourceType(testcase.kind, transferLeader)][2]} + hb.prepareForBalance(testCase.kind, tc) + leaderSolver := newBalanceSolver(hb, tc, testCase.kind, transferLeader) + leaderSolver.cur = &solution{srcStore: hb.stLoadInfos[toResourceType(testCase.kind, transferLeader)][2]} c.Check(leaderSolver.filterHotPeers(leaderSolver.cur.srcStore), HasLen, 0) // skip schedule threshold := tc.GetHotRegionCacheHitsThreshold() leaderSolver.minHotDegree = 0 @@ -1557,7 +1557,7 @@ func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { items = heartbeat(1, 2, 512*KB*reportInterval, 0, 0, reportInterval, []uint64{1, 3, 4}, 1) c.Check(len(items), Greater, 0) for _, item := range items { - c.Check(item.HotDegree, Equals, testcase.DegreeAfterTransferLeader+1) + c.Check(item.HotDegree, Equals, testCase.DegreeAfterTransferLeader+1) } items = heartbeat(1, 2, 512*KB*reportInterval, 0, 0, reportInterval, []uint64{1, 4}, 1) c.Check(len(items), Greater, 0) @@ -1566,7 +1566,7 @@ func (s *testHotCacheSuite) TestCheckRegionFlow(c *C) { c.Check(item.GetActionType(), Equals, statistics.Remove) continue } - c.Check(item.HotDegree, Equals, testcase.DegreeAfterTransferLeader+2) + c.Check(item.HotDegree, Equals, testCase.DegreeAfterTransferLeader+2) } cancel() } diff --git a/server/schedulers/scheduler_test.go b/server/schedulers/scheduler_test.go index b51fbbfd73f..64da676d0a8 100644 --- a/server/schedulers/scheduler_test.go +++ b/server/schedulers/scheduler_test.go @@ -471,7 +471,7 @@ func (s *testBalanceLeaderSchedulerWithRuleEnabledSuite) TestBalanceLeaderWithCo // Leaders: 16 0 0 // Region1: L F F s.tc.UpdateLeaderCount(1, 16) - testcases := []struct { + testCases := []struct { name string rule *placement.Rule schedule bool @@ -534,10 +534,10 @@ func (s *testBalanceLeaderSchedulerWithRuleEnabledSuite) TestBalanceLeaderWithCo }, } - for _, testcase := range testcases { - c.Logf(testcase.name) - c.Check(s.tc.SetRule(testcase.rule), IsNil) - if testcase.schedule { + for _, testCase := range testCases { + c.Logf(testCase.name) + c.Check(s.tc.SetRule(testCase.rule), IsNil) + if testCase.schedule { c.Check(len(s.schedule()), Equals, 1) } else { c.Assert(s.schedule(), HasLen, 0)