Skip to content

Commit

Permalink
*: remove full-width symbols (#6753)
Browse files Browse the repository at this point in the history
ref #4399

Signed-off-by: lhy1024 <admin@liudos.us>

Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com>
  • Loading branch information
lhy1024 and ti-chi-bot[bot] committed Jul 5, 2023
1 parent 693e172 commit 88aec38
Show file tree
Hide file tree
Showing 14 changed files with 21 additions and 21 deletions.
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -242,7 +242,7 @@ test-tso-consistency: install-tools
TASK_COUNT=1
TASK_ID=1

# The command should be used in daily CIit will split some tasks to run parallel.
# The command should be used in daily CI, it will split some tasks to run parallel.
# It should retain report.xml,coverage,coverage.xml and package.list to analyze.
test-with-cover-parallel: install-tools dashboard-ui split
@$(FAILPOINT_ENABLE)
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/controller.go
Expand Up @@ -60,7 +60,7 @@ type ResourceGroupKVInterceptor interface {
OnResponse(resourceGroupName string, req RequestInfo, resp ResponseInfo) (*rmpb.Consumption, error)
}

// ResourceGroupProvider provides some api to interact with resource manager server
// ResourceGroupProvider provides some api to interact with resource manager server.
type ResourceGroupProvider interface {
GetResourceGroup(ctx context.Context, resourceGroupName string) (*rmpb.ResourceGroup, error)
AddResourceGroup(ctx context.Context, metaGroup *rmpb.ResourceGroup) (string, error)
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/limiter.go
Expand Up @@ -185,7 +185,7 @@ func (r *Reservation) CancelAt(now time.Time) {

// Reserve returns a Reservation that indicates how long the caller must wait before n events happen.
// The Limiter takes this Reservation into account when allowing future events.
// The returned Reservations OK() method returns false if wait duration exceeds deadline.
// The returned Reservation's OK() method returns false if wait duration exceeds deadline.
// Usage example:
//
// r := lim.Reserve(time.Now(), 1)
Expand Down
4 changes: 2 additions & 2 deletions pkg/cgroup/cgroup.go
Expand Up @@ -234,10 +234,10 @@ func getCgroupDetails(mountInfoPath string, cRoot string, controller string) (mo
// It is possible that the controller mount and the cgroup path are not the same (both are relative to the NS root).
// So start with the mount and construct the relative path of the cgroup.
// To test:
// 1start a docker to run unit test or tidb-server
// 1. start a docker to run unit test or tidb-server
// > docker run -it --cpus=8 --memory=8g --name test --rm ubuntu:18.04 bash
//
// 2change the limit when the container is running
// 2. change the limit when the container is running
// docker update --cpus=8 <containers>
nsRelativePath := string(fields[3])
if !strings.Contains(nsRelativePath, "..") {
Expand Down
2 changes: 1 addition & 1 deletion pkg/core/rangetree/range_tree_test.go
Expand Up @@ -100,7 +100,7 @@ func TestRingPutItem(t *testing.T) {
re.Len(bucketTree.GetOverlaps(newSimpleBucketItem([]byte("010"), []byte("110"))), 2)
re.Empty(bucketTree.GetOverlaps(newSimpleBucketItem([]byte("200"), []byte("300"))))

// test1 insert one key range, the old overlaps will retain like split buckets.
// test1: insert one key range, the old overlaps will retain like split buckets.
// key range: [002,010],[010,090],[090,100],[100,200]
bucketTree.Update(newSimpleBucketItem([]byte("010"), []byte("090")))
re.Equal(4, bucketTree.Len())
Expand Down
2 changes: 1 addition & 1 deletion pkg/core/store_test.go
Expand Up @@ -174,7 +174,7 @@ func TestLowSpaceScoreV2(t *testing.T) {
bigger: newStoreInfoWithAvailable(1, 10*units.GiB, 100*units.GiB, 1.5),
small: newStoreInfoWithAvailable(2, 10*units.GiB, 100*units.GiB, 1.4),
}, {
// store1 and store2 has same capacity and regionSize40g)
// store1 and store2 has same capacity and regionSize (40g)
// but store1 has less available space size
bigger: newStoreInfoWithAvailable(1, 60*units.GiB, 100*units.GiB, 1),
small: newStoreInfoWithAvailable(2, 80*units.GiB, 100*units.GiB, 2),
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/checker/replica_checker_test.go
Expand Up @@ -162,7 +162,7 @@ func (suite *replicaCheckerTestSuite) TestDownPeer() {
suite.NotNil(op)
suite.Equal("remove-extra-down-replica", op.Desc())

// down a peer,the number of peers(except learner) is not enough.
// down a peer, the number of peers(except learner) is not enough.
op = suite.downPeerAndCheck(metapb.PeerRole_Learner)
suite.NotNil(op)
suite.Equal("replace-down-replica", op.Desc())
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/labeler/rules.go
Expand Up @@ -171,7 +171,7 @@ func (rule *LabelRule) expireBefore(t time.Time) bool {
return rule.minExpire.Before(t)
}

// initKeyRangeRulesFromLabelRuleData init and adjust []KeyRangeRule from `LabelRule.Data
// initKeyRangeRulesFromLabelRuleData init and adjust []KeyRangeRule from `LabelRule.Data`
func initKeyRangeRulesFromLabelRuleData(data interface{}) ([]*KeyRangeRule, error) {
rules, ok := data.([]interface{})
if !ok {
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/operator/operator.go
Expand Up @@ -129,7 +129,7 @@ func (o *Operator) String() string {
for i := range o.steps {
stepStrs[i] = o.steps[i].String()
}
s := fmt.Sprintf("%s {%s} (kind:%s, region:%v(%v, %v), createAt:%s, startAt:%s, currentStep:%v, size:%d, steps:[%s]timeout:[%s])",
s := fmt.Sprintf("%s {%s} (kind:%s, region:%v(%v, %v), createAt:%s, startAt:%s, currentStep:%v, size:%d, steps:[%s], timeout:[%s])",
o.desc, o.brief, o.kind, o.regionID, o.regionEpoch.GetVersion(), o.regionEpoch.GetConfVer(), o.GetCreateTime(),
o.GetStartTime(), atomic.LoadInt32(&o.currentStep), o.ApproximateSize, strings.Join(stepStrs, ", "), o.timeout.String())
if o.CheckSuccess() {
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/operator/operator_controller.go
Expand Up @@ -48,7 +48,7 @@ var (
fastNotifyInterval = 2 * time.Second
// StoreBalanceBaseTime represents the base time of balance rate.
StoreBalanceBaseTime float64 = 60
// FastOperatorFinishTime min finish time, if finish duration less than it,op will be pushed to fast operator queue
// FastOperatorFinishTime min finish time, if finish duration less than it, op will be pushed to fast operator queue
FastOperatorFinishTime = 10 * time.Second
)

Expand Down
4 changes: 2 additions & 2 deletions pkg/schedule/placement/fit_region_test.go
Expand Up @@ -326,7 +326,7 @@ func BenchmarkFitRegionWithMoreRulesAndStoreLabels(b *testing.B) {
label := &metapb.StoreLabel{Key: "exclusive", Value: "exclusive"}
labels = append(labels, label)
// 5 peers in 5 different stores,
// split the stores(peers) to three zones,make the number of peers in each zone: 2:2:1
// split the stores(peers) to three zones, make the number of peers in each zone: 2:2:1
for _, peer := range region.GetPeers() {
storeID := peer.StoreId
store := core.NewStoreInfo(&metapb.Store{Id: storeID}, core.SetLastHeartbeatTS(time.Now()), core.SetStoreLabels(labels))
Expand Down Expand Up @@ -380,7 +380,7 @@ func BenchmarkFitRegionWithLocationLabels(b *testing.B) {
// create stores
lists := make([]*core.StoreInfo, 0)
// 10 peers in 10 different stores,
// split the stores(peers) to three zones,make the number of peers in each zone: 4:3:3
// split the stores(peers) to three zones, make the number of peers in each zone: 4:3:3
for idx, peer := range region.GetPeers() {
storeID := peer.StoreId
zoneInfo := &metapb.StoreLabel{Key: "zone", Value: fmt.Sprintf("z_%02d", idx%3)}
Expand Down
6 changes: 3 additions & 3 deletions pkg/statistics/buckets/hot_bucket_task_test.go
Expand Up @@ -68,7 +68,7 @@ func TestCheckBucketsTask(t *testing.T) {
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
hotCache := NewBucketsCache(ctx)
// case1 add bucket successfully
// case1: add bucket successfully
buckets := newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20"), []byte("30")}, 0)
task := NewCheckPeerTask(buckets)
re.True(hotCache.CheckAsync(task))
Expand All @@ -93,7 +93,7 @@ func TestCheckBucketsTask(t *testing.T) {
re.Len(item, 1)
re.Equal(-2, item[0].HotDegree)

// case3add bucket successful and the hot degree should inherit from the old one.
// case3: add bucket successful and the hot degree should inherit from the old one.
buckets = newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20")}, 0)
task = NewCheckPeerTask(buckets)
re.True(hotCache.CheckAsync(task))
Expand All @@ -109,7 +109,7 @@ func TestCollectBucketStatsTask(t *testing.T) {
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
hotCache := NewBucketsCache(ctx)
// case1 add bucket successfully
// case1: add bucket successfully
for i := uint64(0); i < 10; i++ {
buckets := convertToBucketTreeItem(newTestBuckets(i, 1, [][]byte{[]byte(strconv.FormatUint(i*10, 10)),
[]byte(strconv.FormatUint((i+1)*10, 10))}, 0))
Expand Down
6 changes: 3 additions & 3 deletions server/config/store_config_test.go
Expand Up @@ -133,21 +133,21 @@ func TestMergeCheck(t *testing.T) {
mergeKeys: 200000,
pass: true,
}, {
// case 2: the smallest region is 68MiBit can't be merged again.
// case 2: the smallest region is 68MiB, it can't be merged again.
size: 144 + 20,
mergeSize: 20,
keys: 1440000 + 200000,
mergeKeys: 200000,
pass: true,
}, {
// case 3: the smallest region is 50MiBit can be merged again.
// case 3: the smallest region is 50MiB, it can be merged again.
size: 144 + 2,
mergeSize: 50,
keys: 1440000 + 20000,
mergeKeys: 500000,
pass: false,
}, {
// case4: the smallest region is 51MiBit can't be merged again.
// case4: the smallest region is 51MiB, it can't be merged again.
size: 144 + 3,
mergeSize: 50,
keys: 1440000 + 30000,
Expand Down
4 changes: 2 additions & 2 deletions tests/server/storage/hot_region_storage_test.go
Expand Up @@ -192,7 +192,7 @@ func TestHotRegionStorageReservedDayConfigChange(t *testing.T) {
re.NoError(err)
re.Nil(next)
schedule := leaderServer.GetConfig().Schedule
// set reserved day to zero,close hot region storage
// set reserved day to zero, close hot region storage
schedule.HotRegionsReservedDays = 0
leaderServer.GetServer().SetScheduleConfig(schedule)
time.Sleep(3 * interval)
Expand All @@ -210,7 +210,7 @@ func TestHotRegionStorageReservedDayConfigChange(t *testing.T) {
next, err = iter.Next()
re.NoError(err)
re.Nil(next)
// set reserved day to one,open hot region storage
// set reserved day to one, open hot region storage
schedule.HotRegionsReservedDays = 1
leaderServer.GetServer().SetScheduleConfig(schedule)
time.Sleep(3 * interval)
Expand Down

0 comments on commit 88aec38

Please sign in to comment.