Skip to content

Commit

Permalink
make test stable
Browse files Browse the repository at this point in the history
Signed-off-by: lhy1024 <admin@liudos.us>
  • Loading branch information
lhy1024 committed Nov 22, 2023
1 parent e5c4518 commit 8a6f56d
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 58 deletions.
58 changes: 25 additions & 33 deletions tests/pdctl/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,14 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) {
"rank-formula-version": "v2",
"split-thresholds": 0.2,
}
checkHotSchedulerConfig := func(expect map[string]interface{}) {
testutil.Eventually(re, func() bool {
var conf1 map[string]interface{}
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
return reflect.DeepEqual(expect, conf1)
})
}

var conf map[string]interface{}
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "list"}, &conf)
re.Equal(expected1, conf)
Expand All @@ -370,72 +378,58 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) {
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "src-tolerance-ratio", "1.02"}, nil)
re.Contains(echo, "Success!")
expected1["src-tolerance-ratio"] = 1.02
var conf1 map[string]interface{}
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)

echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "byte,key"}, nil)
re.Contains(echo, "Success!")
expected1["read-priorities"] = []interface{}{"byte", "key"}
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)

echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key"}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key,byte"}, nil)
re.Contains(echo, "Success!")
expected1["read-priorities"] = []interface{}{"key", "byte"}
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "foo,bar"}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", ""}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key,key"}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "byte,byte"}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "key,key,byte"}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)

// write-priorities is divided into write-leader-priorities and write-peer-priorities
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "write-priorities", "key,byte"}, nil)
re.Contains(echo, "Failed!")
re.Contains(echo, "Config item is not found.")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)

echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "rank-formula-version", "v0"}, nil)
re.Contains(echo, "Failed!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
checkHotSchedulerConfig(expected1)
expected1["rank-formula-version"] = "v2"
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "rank-formula-version", "v2"}, nil)
re.Contains(echo, "Success!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)
expected1["rank-formula-version"] = "v1"
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "rank-formula-version", "v1"}, nil)
re.Contains(echo, "Success!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)

expected1["forbid-rw-type"] = "read"
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "forbid-rw-type", "read"}, nil)
re.Contains(echo, "Success!")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(expected1, conf1)
checkHotSchedulerConfig(expected1)

// test compatibility
re.Equal("2.0.0", leaderServer.GetClusterVersion().String())
Expand All @@ -446,13 +440,11 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) {
}
re.Equal("5.2.0", leaderServer.GetClusterVersion().String())
// After upgrading, we should not use query.
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(conf1["read-priorities"], []interface{}{"key", "byte"})
checkHotSchedulerConfig(expected1)
// cannot set qps as write-peer-priorities
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "write-peer-priorities", "query,byte"}, nil)
re.Contains(echo, "query is not allowed to be set in priorities for write-peer-priorities")
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler"}, &conf1)
re.Equal(conf1["write-peer-priorities"], []interface{}{"byte", "key"})
checkHotSchedulerConfig(expected1)

// test remove and add
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", "balance-hot-region-scheduler"}, nil)
Expand All @@ -462,7 +454,7 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) {

// test balance leader config
conf = make(map[string]interface{})
conf1 = make(map[string]interface{})
conf1 := make(map[string]interface{})
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-leader-scheduler", "show"}, &conf)
re.Equal(4., conf["batch"])
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-leader-scheduler", "set", "batch", "3"}, nil)
Expand Down
88 changes: 63 additions & 25 deletions tests/server/api/region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/tikv/pd/pkg/core"
"github.com/tikv/pd/pkg/schedule/placement"
"github.com/tikv/pd/pkg/utils/testutil"
tu "github.com/tikv/pd/pkg/utils/testutil"
"github.com/tikv/pd/server/config"
"github.com/tikv/pd/tests"
Expand All @@ -39,6 +38,7 @@ type regionTestSuite struct {
func TestRegionTestSuite(t *testing.T) {
suite.Run(t, new(regionTestSuite))
}

func (suite *regionTestSuite) TestSplitRegions() {
env := tests.NewSchedulingTestEnvironment(suite.T())
env.RunTestInTwoModes(suite.checkSplitRegions)
Expand All @@ -48,15 +48,17 @@ func (suite *regionTestSuite) checkSplitRegions(cluster *tests.TestCluster) {
leader := cluster.GetLeaderServer()
urlPrefix := leader.GetAddr() + "/pd/api/v1"
re := suite.Require()
r1 := core.NewTestRegionInfo(601, 13, []byte("aaa"), []byte("ggg"))
r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 5, StoreId: 14}, &metapb.Peer{Id: 6, StoreId: 15})
tests.MustPutRegionInfo(re, cluster, r1)
s1 := &metapb.Store{
Id: 13,
State: metapb.StoreState_Up,
NodeState: metapb.NodeState_Serving,
}
tests.MustPutStore(re, cluster, s1)
r1 := core.NewTestRegionInfo(601, 13, []byte("aaa"), []byte("ggg"))
r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 5, StoreId: 14}, &metapb.Peer{Id: 6, StoreId: 15})
tests.MustPutRegionInfo(re, cluster, r1)
suite.checkRegionCount(cluster, 1)

newRegionID := uint64(11)
body := fmt.Sprintf(`{"retry_limit":%v, "split_keys": ["%s","%s","%s"]}`, 3,
hex.EncodeToString([]byte("bbb")),
Expand Down Expand Up @@ -87,23 +89,30 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tes
leader := cluster.GetLeaderServer()
urlPrefix := leader.GetAddr() + "/pd/api/v1"
re := suite.Require()
for i := 13; i <= 15; i++ {
s1 := &metapb.Store{
Id: uint64(i),
State: metapb.StoreState_Up,
NodeState: metapb.NodeState_Serving,
}
tests.MustPutStore(re, cluster, s1)
}
r1 := core.NewTestRegionInfo(557, 13, []byte("a1"), []byte("a2"))
r2 := core.NewTestRegionInfo(558, 14, []byte("a2"), []byte("a3"))
r3 := core.NewTestRegionInfo(559, 15, []byte("a3"), []byte("a4"))
tests.MustPutRegionInfo(re, cluster, r1)
tests.MustPutRegionInfo(re, cluster, r2)
tests.MustPutRegionInfo(re, cluster, r3)
body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")))
suite.checkRegionCount(cluster, 3)

body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")))
err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/accelerate-schedule", urlPrefix), []byte(body), tu.StatusOK(re))
suite.NoError(err)
idList := leader.GetRaftCluster().GetSuspectRegions()
if sche := cluster.GetSchedulingPrimaryServer(); sche != nil {
idList = sche.GetCluster().GetCoordinator().GetCheckerController().GetSuspectRegions()
}
testutil.Eventually(re, func() bool {
return len(idList) == 2
})
re.Len(idList, 2, len(idList))
}

func (suite *regionTestSuite) TestAccelerateRegionsScheduleInRanges() {
Expand All @@ -115,6 +124,14 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te
leader := cluster.GetLeaderServer()
urlPrefix := leader.GetAddr() + "/pd/api/v1"
re := suite.Require()
for i := 13; i <= 17; i++ {
s1 := &metapb.Store{
Id: uint64(i),
State: metapb.StoreState_Up,
NodeState: metapb.NodeState_Serving,
}
tests.MustPutStore(re, cluster, s1)
}
r1 := core.NewTestRegionInfo(557, 13, []byte("a1"), []byte("a2"))
r2 := core.NewTestRegionInfo(558, 14, []byte("a2"), []byte("a3"))
r3 := core.NewTestRegionInfo(559, 15, []byte("a3"), []byte("a4"))
Expand All @@ -125,17 +142,17 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te
tests.MustPutRegionInfo(re, cluster, r3)
tests.MustPutRegionInfo(re, cluster, r4)
tests.MustPutRegionInfo(re, cluster, r5)
body := fmt.Sprintf(`[{"start_key":"%s", "end_key": "%s"}, {"start_key":"%s", "end_key": "%s"}]`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")), hex.EncodeToString([]byte("a4")), hex.EncodeToString([]byte("a6")))
suite.checkRegionCount(cluster, 5)

body := fmt.Sprintf(`[{"start_key":"%s", "end_key": "%s"}, {"start_key":"%s", "end_key": "%s"}]`,
hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")), hex.EncodeToString([]byte("a4")), hex.EncodeToString([]byte("a6")))
err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/accelerate-schedule/batch", urlPrefix), []byte(body), tu.StatusOK(re))
suite.NoError(err)
idList := leader.GetRaftCluster().GetSuspectRegions()
if sche := cluster.GetSchedulingPrimaryServer(); sche != nil {
idList = sche.GetCluster().GetCoordinator().GetCheckerController().GetSuspectRegions()
}
testutil.Eventually(re, func() bool {
return len(idList) == 4
})
re.Len(idList, 4)
}

func (suite *regionTestSuite) TestScatterRegions() {
Expand All @@ -147,6 +164,14 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) {
leader := cluster.GetLeaderServer()
urlPrefix := leader.GetAddr() + "/pd/api/v1"
re := suite.Require()
for i := 13; i <= 16; i++ {
s1 := &metapb.Store{
Id: uint64(i),
State: metapb.StoreState_Up,
NodeState: metapb.NodeState_Serving,
}
tests.MustPutStore(re, cluster, s1)
}
r1 := core.NewTestRegionInfo(601, 13, []byte("b1"), []byte("b2"))
r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 5, StoreId: 14}, &metapb.Peer{Id: 6, StoreId: 15})
r2 := core.NewTestRegionInfo(602, 13, []byte("b2"), []byte("b3"))
Expand All @@ -156,16 +181,9 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) {
tests.MustPutRegionInfo(re, cluster, r1)
tests.MustPutRegionInfo(re, cluster, r2)
tests.MustPutRegionInfo(re, cluster, r3)
for i := 13; i <= 16; i++ {
s1 := &metapb.Store{
Id: uint64(i),
State: metapb.StoreState_Up,
NodeState: metapb.NodeState_Serving,
}
tests.MustPutStore(re, cluster, s1)
}
body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("b1")), hex.EncodeToString([]byte("b3")))
suite.checkRegionCount(cluster, 3)

body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("b1")), hex.EncodeToString([]byte("b3")))
err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/scatter", urlPrefix), []byte(body), tu.StatusOK(re))
suite.NoError(err)
oc := leader.GetRaftCluster().GetOperatorController()
Expand All @@ -189,7 +207,6 @@ func (suite *regionTestSuite) TestCheckRegionsReplicated() {
func(conf *config.Config, serverName string) {
conf.Replication.EnablePlacementRules = true
})
// FIXME: enable this test in two modes.
env.RunTestInPDMode(suite.checkRegionsReplicated)
}

Expand All @@ -199,8 +216,15 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster)
re := suite.Require()

// add test region
s1 := &metapb.Store{
Id: 1,
State: metapb.StoreState_Up,
NodeState: metapb.NodeState_Serving,
}
tests.MustPutStore(re, cluster, s1)
r1 := core.NewTestRegionInfo(2, 1, []byte("a"), []byte("b"))
tests.MustPutRegionInfo(re, cluster, r1)
suite.checkRegionCount(cluster, 1)

// set the bundle
bundle := []placement.GroupBundle{
Expand Down Expand Up @@ -237,9 +261,11 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster)
err = tu.CheckPostJSON(testDialClient, urlPrefix+"/config/placement-rule", data, tu.StatusOK(re))
suite.NoError(err)

err = tu.ReadGetJSON(re, testDialClient, url, &status)
suite.NoError(err)
suite.Equal("REPLICATED", status)
tu.Eventually(re, func() bool {
err = tu.ReadGetJSON(re, testDialClient, url, &status)
suite.NoError(err)
return status == "REPLICATED"
})

suite.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/handler/mockPending", "return(true)"))
err = tu.ReadGetJSON(re, testDialClient, url, &status)
Expand Down Expand Up @@ -290,3 +316,15 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster)
suite.NoError(err)
suite.Equal("REPLICATED", status)
}

func (suite *regionTestSuite) checkRegionCount(cluster *tests.TestCluster, count int) {
leader := cluster.GetLeaderServer()
tu.Eventually(suite.Require(), func() bool {
return leader.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count == count
})
if sche := cluster.GetSchedulingPrimaryServer(); sche != nil {
tu.Eventually(suite.Require(), func() bool {
return sche.GetCluster().GetRegionCount([]byte{}, []byte{}) == count
})
}
}

0 comments on commit 8a6f56d

Please sign in to comment.