Skip to content

Commit

Permalink
update test
Browse files Browse the repository at this point in the history
  • Loading branch information
Connor1996 committed Apr 18, 2018
1 parent e60230b commit 80bc806
Show file tree
Hide file tree
Showing 7 changed files with 104 additions and 73 deletions.
2 changes: 1 addition & 1 deletion server/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ type ScheduleConfig struct {
// TolerantSizeRatio is the ratio of buffer size for balance scheduler.
TolerantSizeRatio float64 `toml:"tolerant-size-ratio,omitempty" json:"tolerant-size-ratio"`
//
// high space period transition period low space period
// high space stage transition stage low space stage
// |--------------------|-----------------------------|-------------------------|
// ^ ^ ^ ^
// 0 (1 - LowSpaceRatio) * capacity (1 - SpaceRatio) * capacity capacity
Expand Down
4 changes: 2 additions & 2 deletions server/coordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ func (c *testClusterInfo) addRegionStore(storeID uint64, regionCount int) {
store.LastHeartbeatTS = time.Now()
store.RegionCount = regionCount
store.RegionSize = int64(regionCount) * 10
store.Stats.Capacity = uint64(1024)
store.Stats.Available = store.Stats.Capacity
store.Stats.Capacity = 1000 * (1 << 20)
store.Stats.Available = store.Stats.Capacity - uint64(store.LeaderSize)
c.putStore(store)
}

Expand Down
89 changes: 46 additions & 43 deletions server/schedule/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,13 @@ func (mc *MockCluster) ScanRegions(startKey []byte, limit int) []*core.RegionInf
return mc.Regions.ScanRange(startKey, limit)
}

func (mc *MockCluster) LoadRegion(regionID uint64, followerIds ...uint64) {
// regions load from etcd will have no leader
r := mc.newMockRegionInfo(regionID, 0, followerIds...)
r.Leader = nil
mc.PutRegion(r)
}

// IsRegionHot checks if the region is hot
func (mc *MockCluster) IsRegionHot(id uint64) bool {
return mc.BasicCluster.IsRegionHot(id, mc.GetHotRegionLowThreshold())
Expand Down Expand Up @@ -109,9 +116,9 @@ func (mc *MockCluster) AddLeaderStore(storeID uint64, leaderCount int) {
store.Stats = &pdpb.StoreStats{}
store.LastHeartbeatTS = time.Now()
store.LeaderCount = leaderCount
store.Stats.Capacity = uint64(1024)
store.Stats.Available = store.Stats.Capacity
store.LeaderSize = int64(leaderCount) * 10
store.Stats.Capacity = 1000 * (1 << 20)
store.Stats.Available = store.Stats.Capacity - uint64(store.LeaderSize)
mc.PutStore(store)
}

Expand All @@ -121,32 +128,8 @@ func (mc *MockCluster) AddRegionStore(storeID uint64, regionCount int) {
store.LastHeartbeatTS = time.Now()
store.RegionCount = regionCount
store.RegionSize = int64(regionCount) * 10
store.Stats.Capacity = uint64(1024)
store.Stats.Available = store.Stats.Capacity
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreLeaderWeight(storeID uint64, weight float64) {
store := mc.GetStore(storeID)
store.LeaderWeight = weight
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreRegionWeight(storeID uint64, weight float64) {
store := mc.GetStore(storeID)
store.RegionWeight = weight
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreLeaderSize(storeID uint64, size int64) {
store := mc.GetStore(storeID)
store.LeaderSize = size
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreRegionSize(storeID uint64, size int64) {
store := mc.GetStore(storeID)
store.RegionSize = size
store.Stats.Capacity = 1000 * (1 << 20)
store.Stats.Available = store.Stats.Capacity - uint64(store.RegionSize)
mc.PutStore(store)
}

Expand All @@ -172,10 +155,13 @@ func (mc *MockCluster) AddLeaderRegionWithRange(regionID uint64, startKey string
mc.PutRegion(r)
}

func (mc *MockCluster) LoadRegion(regionID uint64, followerIds ...uint64) {
// regions load from etcd will have no leader
r := mc.newMockRegionInfo(regionID, 0, followerIds...)
r.Leader = nil
func (mc *MockCluster) AddLeaderRegionWithReadInfo(regionID uint64, leaderID uint64, readBytes uint64, followerIds ...uint64) {
r := mc.newMockRegionInfo(regionID, leaderID, followerIds...)
r.ReadBytes = readBytes
isUpdate, item := mc.BasicCluster.CheckReadStatus(r)
if isUpdate {
mc.HotCache.Update(regionID, item, ReadFlow)
}
mc.PutRegion(r)
}

Expand All @@ -189,6 +175,32 @@ func (mc *MockCluster) AddLeaderRegionWithWriteInfo(regionID uint64, leaderID ui
mc.PutRegion(r)
}

func (mc *MockCluster) UpdateStoreLeaderWeight(storeID uint64, weight float64) {
store := mc.GetStore(storeID)
store.LeaderWeight = weight
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreRegionWeight(storeID uint64, weight float64) {
store := mc.GetStore(storeID)
store.RegionWeight = weight
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreLeaderSize(storeID uint64, size int64) {
store := mc.GetStore(storeID)
store.LeaderSize = size
store.Stats.Available = store.Stats.Capacity - uint64(store.LeaderSize)
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStoreRegionSize(storeID uint64, size int64) {
store := mc.GetStore(storeID)
store.RegionSize = size
store.Stats.Available = store.Stats.Capacity - uint64(store.RegionSize)
mc.PutStore(store)
}

func (mc *MockCluster) UpdateLeaderCount(storeID uint64, leaderCount int) {
store := mc.GetStore(storeID)
store.LeaderCount = leaderCount
Expand Down Expand Up @@ -217,7 +229,7 @@ func (mc *MockCluster) UpdatePendingPeerCount(storeID uint64, pendingPeerCount i

func (mc *MockCluster) UpdateStorageRatio(storeID uint64, usedRatio, availableRatio float64) {
store := mc.GetStore(storeID)
store.Stats.Capacity = uint64(1024)
store.Stats.Capacity = 1000 * (1 << 20)
store.Stats.UsedSize = uint64(float64(store.Stats.Capacity) * usedRatio)
store.Stats.Available = uint64(float64(store.Stats.Capacity) * availableRatio)
mc.PutStore(store)
Expand All @@ -228,22 +240,13 @@ func (mc *MockCluster) UpdateStorageWrittenBytes(storeID uint64, BytesWritten ui
store.Stats.BytesWritten = BytesWritten
mc.PutStore(store)
}

func (mc *MockCluster) UpdateStorageReadBytes(storeID uint64, BytesRead uint64) {
store := mc.GetStore(storeID)
store.Stats.BytesRead = BytesRead
mc.PutStore(store)
}

func (mc *MockCluster) AddLeaderRegionWithReadInfo(regionID uint64, leaderID uint64, readBytes uint64, followerIds ...uint64) {
r := mc.newMockRegionInfo(regionID, leaderID, followerIds...)
r.ReadBytes = readBytes
isUpdate, item := mc.BasicCluster.CheckReadStatus(r)
if isUpdate {
mc.HotCache.Update(regionID, item, ReadFlow)
}
mc.PutRegion(r)
}

func (mc *MockCluster) newMockRegionInfo(regionID uint64, leaderID uint64, followerIds ...uint64) *core.RegionInfo {
region := &metapb.Region{
Id: regionID,
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func (s *testOperatorSuite) TestInfluence(c *C) {
MergeRegion{IsPassive: true}.Influence(opInfluence, region)
c.Assert(*opInfluence[1], DeepEquals, StoreInfluence{
LeaderSize: -10,
LeaderCount: -1,
LeaderCount: -2,
RegionSize: -10,
RegionCount: -2,
})
Expand Down
7 changes: 5 additions & 2 deletions server/schedulers/balance_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,11 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source,
}

if !shouldBalance(cluster, source, target, core.LeaderKind, region, opInfluence) {
log.Debugf("[%s] skip balance region%d, source size: %v, source score: %v, target size: %v, target score: %v, region size: %v", l.GetName(), region.GetId(),
source.LeaderSize, source.LeaderScore(0), target.LeaderSize, target.LeaderScore(0), region.ApproximateSize)
log.Debugf(`[%s] skip balance region%d, source size: %v, source score: %v, source influence: %v,
target size: %v, target score: %v, target influence: %v, region size: %v`, l.GetName(), region.GetId(),
source.LeaderSize, source.LeaderScore(0), opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.LeaderKind),
target.LeaderSize, target.LeaderScore(0), opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.LeaderKind),
region.ApproximateSize)
schedulerCounter.WithLabelValues(l.GetName(), "skip").Inc()
return nil
}
Expand Down
8 changes: 6 additions & 2 deletions server/schedulers/balance_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,13 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *
log.Debugf("[region %d] source store id is %v, target store id is %v", region.GetId(), source.GetId(), target.GetId())

if !shouldBalance(cluster, source, target, core.RegionKind, region, opInfluence) {
log.Debugf("[%s] skip balance region%d, source size: %v, source score: %v, target size: %v, target score: %v, region size: %v", s.GetName(), region.GetId(),
log.Debugf(`[%s] skip balance region%d, source size: %v, source score: %v, source influence: %v,
target size: %v, target score: %v, target influence: %v, region size: %v`, s.GetName(), region.GetId(),
source.RegionSize, source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0),
target.RegionSize, target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), region.ApproximateSize)
opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.RegionKind),
target.RegionSize, target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0),
opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.RegionKind),
region.ApproximateSize)
schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc()
return nil
}
Expand Down
65 changes: 43 additions & 22 deletions server/schedulers/balance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,37 +36,58 @@ type testBalanceSpeedSuite struct{}
type testBalanceSpeedCase struct {
sourceCount uint64
targetCount uint64
avgScore float64
regionSize int64
diff int
expectedResult bool
}

func (s *testBalanceSpeedSuite) TestShouldBalance(c *C) {
testCases := []struct {
sourceSize int64
sourceWeight float64
targetSize int64
targetWeight float64
moveSize float64
result bool
}{
{100, 1, 80, 1, 5, true},
{100, 1, 80, 1, 15, false},
{100, 1, 120, 2, 10, true},
{100, 1, 180, 2, 10, false},
{100, 0.5, 180, 1, 10, false},
{100, 0.5, 180, 1, 5, true},
{100, 1, 10, 0, 10, false}, // targetWeight=0
{100, 0, 10, 0, 10, false},
{100, 0, 500, 1, 50, true}, // sourceWeight=0
func (s *testBalanceSpeedSuite) TestBalanceSpeed(c *C) {
tests := []testBalanceSpeedCase{
// all store capacity is 1024MB
// size = count * 10

// target size is zero
{2, 0, 1, true},
{2, 0, 10, false},
// all in high space stage
{10, 5, 1, true},
{10, 5, 20, false},
{10, 10, 1, false},
{10, 10, 20, false},
// all in transition stage
{60, 50, 1, true},
{60, 50, 50, false},
{60, 60, 1, false},
// all in low space stage
{90, 80, 1, true},
{90, 80, 50, false},
{90, 90, 1, false},
// one in high space stage, other in transition stage
{55, 45, 5, true},
{55, 40, 50, false},
// one in transition space stage, other in low space stage
{80, 75, 5, true},
{80, 75, 50, false},
}

opt := schedule.NewMockSchedulerOptions()
tc := schedule.NewMockCluster(opt)

for _, t := range testCases {
c.Assert(shouldBalance(t.sourceSize, t.sourceWeight, t.targetSize, t.targetWeight, t.moveSize), Equals, t.result)
for _, t := range tests {
tc.AddLeaderStore(1, int(t.sourceCount))
tc.AddLeaderStore(2, int(t.targetCount))
source := tc.GetStore(1)
target := tc.GetStore(2)
region := &core.RegionInfo{ApproximateSize: t.regionSize}
c.Assert(shouldBalance(tc, source, target, core.LeaderKind, region, schedule.NewOpInfluence(nil, tc)), Equals, t.expectedResult)
}

for _, t := range tests {
tc.AddRegionStore(1, int(t.sourceCount))
tc.AddRegionStore(2, int(t.targetCount))
source := tc.GetStore(1)
target := tc.GetStore(2)
region := &core.RegionInfo{ApproximateSize: t.regionSize}
c.Assert(shouldBalance(tc, source, target, core.RegionKind, region, schedule.NewOpInfluence(nil, tc)), Equals, t.expectedResult)
}
}

Expand Down

0 comments on commit 80bc806

Please sign in to comment.