Skip to content

Commit 2555083

Browse files
committed
*: Set collation to uint16
1 parent c60f97d commit 2555083

File tree

237 files changed

+1577
-1579
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

237 files changed

+1577
-1579
lines changed

br/pkg/backup/client.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -966,13 +966,13 @@ func (bc *Client) FindTargetPeer(ctx context.Context, key []byte, isRawKv bool,
966966
var leader *metapb.Peer
967967
key = codec.EncodeBytesExt([]byte{}, key, isRawKv)
968968
state := utils.InitialRetryState(60, 100*time.Millisecond, 2*time.Second)
969-
failpoint.Inject("retry-state-on-find-target-peer", func(v failpoint.Value) {
969+
if v, _err_ := failpoint.Eval(_curpkg_("retry-state-on-find-target-peer")); _err_ == nil {
970970
logutil.CL(ctx).Info("reset state for FindTargetPeer")
971971
state = utils.InitialRetryState(v.(int), 100*time.Millisecond, 100*time.Millisecond)
972-
})
972+
}
973973
err := utils.WithRetry(ctx, func() error {
974974
region, err := bc.mgr.GetPDClient().GetRegion(ctx, key)
975-
failpoint.Inject("return-region-on-find-target-peer", func(v failpoint.Value) {
975+
if v, _err_ := failpoint.Eval(_curpkg_("return-region-on-find-target-peer")); _err_ == nil {
976976
switch v.(string) {
977977
case "nil":
978978
{
@@ -1017,7 +1017,7 @@ func (bc *Client) FindTargetPeer(ctx context.Context, key []byte, isRawKv bool,
10171017
}
10181018
}
10191019
}
1020-
})
1020+
}
10211021
if err != nil || region == nil {
10221022
logutil.CL(ctx).Error("find region failed", zap.Error(err), zap.Reflect("region", region))
10231023
return errors.Annotate(berrors.ErrPDLeaderNotFound, "cannot find region from pd client")
@@ -1067,7 +1067,7 @@ func (bc *Client) fineGrainedBackup(
10671067
ctx = opentracing.ContextWithSpan(ctx, span1)
10681068
}
10691069

1070-
failpoint.Inject("hint-fine-grained-backup", func(v failpoint.Value) {
1070+
if v, _err_ := failpoint.Eval(_curpkg_("hint-fine-grained-backup")); _err_ == nil {
10711071
log.Info("failpoint hint-fine-grained-backup injected, "+
10721072
"process will sleep for 3s and notify the shell.", zap.String("file", v.(string)))
10731073
if sigFile, ok := v.(string); ok {
@@ -1080,7 +1080,7 @@ func (bc *Client) fineGrainedBackup(
10801080
}
10811081
time.Sleep(3 * time.Second)
10821082
}
1083-
})
1083+
}
10841084

10851085
bo := utils.AdaptTiKVBackoffer(ctx, backupFineGrainedMaxBackoff, berrors.ErrUnknown)
10861086
for {
@@ -1298,7 +1298,7 @@ func doSendBackup(
12981298
req backuppb.BackupRequest,
12991299
respFn func(*backuppb.BackupResponse) error,
13001300
) error {
1301-
failpoint.Inject("hint-backup-start", func(v failpoint.Value) {
1301+
if v, _err_ := failpoint.Eval(_curpkg_("hint-backup-start")); _err_ == nil {
13021302
logutil.CL(ctx).Info("failpoint hint-backup-start injected, " +
13031303
"process will notify the shell.")
13041304
if sigFile, ok := v.(string); ok {
@@ -1311,9 +1311,9 @@ func doSendBackup(
13111311
}
13121312
}
13131313
time.Sleep(3 * time.Second)
1314-
})
1314+
}
13151315
bCli, err := client.Backup(ctx, &req)
1316-
failpoint.Inject("reset-retryable-error", func(val failpoint.Value) {
1316+
if val, _err_ := failpoint.Eval(_curpkg_("reset-retryable-error")); _err_ == nil {
13171317
switch val.(string) {
13181318
case "Unavaiable":
13191319
{
@@ -1326,13 +1326,13 @@ func doSendBackup(
13261326
err = status.Error(codes.Internal, "Internal error")
13271327
}
13281328
}
1329-
})
1330-
failpoint.Inject("reset-not-retryable-error", func(val failpoint.Value) {
1329+
}
1330+
if val, _err_ := failpoint.Eval(_curpkg_("reset-not-retryable-error")); _err_ == nil {
13311331
if val.(bool) {
13321332
logutil.CL(ctx).Debug("failpoint reset-not-retryable-error injected.")
13331333
err = status.Error(codes.Unknown, "Your server was haunted hence doesn't work, meow :3")
13341334
}
1335-
})
1335+
}
13361336
if err != nil {
13371337
return err
13381338
}

br/pkg/backup/prepare_snap/prepare.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -454,9 +454,9 @@ func (p *Preparer) pushWaitApply(reqs pendingRequests, region Region) {
454454
// PrepareConnections prepares the connections for each store.
455455
// This will pause the admin commands for each store.
456456
func (p *Preparer) PrepareConnections(ctx context.Context) error {
457-
failpoint.Inject("PrepareConnectionsErr", func() {
458-
failpoint.Return(errors.New("mock PrepareConnectionsErr"))
459-
})
457+
if _, _err_ := failpoint.Eval(_curpkg_("PrepareConnectionsErr")); _err_ == nil {
458+
return errors.New("mock PrepareConnectionsErr")
459+
}
460460
log.Info("Preparing connections to stores.")
461461
stores, err := p.env.GetAllLiveStores(ctx)
462462
if err != nil {

br/pkg/backup/push.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,10 @@ func (push *pushDown) pushBackup(
6666
}
6767

6868
// Push down backup tasks to all tikv instances.
69-
failpoint.Inject("noop-backup", func(_ failpoint.Value) {
69+
if _, _err_ := failpoint.Eval(_curpkg_("noop-backup")); _err_ == nil {
7070
logutil.CL(ctx).Warn("skipping normal backup, jump to fine-grained backup, meow :3", logutil.Key("start-key", req.StartKey), logutil.Key("end-key", req.EndKey))
71-
failpoint.Return(nil)
72-
})
71+
return nil
72+
}
7373

7474
wg := new(sync.WaitGroup)
7575
errContext := utils.NewErrorContext("pushBackup", 10)
@@ -128,28 +128,28 @@ func (push *pushDown) pushBackup(
128128
// Finished.
129129
return nil
130130
}
131-
failpoint.Inject("backup-timeout-error", func(val failpoint.Value) {
131+
if val, _err_ := failpoint.Eval(_curpkg_("backup-timeout-error")); _err_ == nil {
132132
msg := val.(string)
133133
logutil.CL(ctx).Info("failpoint backup-timeout-error injected.", zap.String("msg", msg))
134134
resp.Error = &backuppb.Error{
135135
Msg: msg,
136136
}
137-
})
138-
failpoint.Inject("backup-storage-error", func(val failpoint.Value) {
137+
}
138+
if val, _err_ := failpoint.Eval(_curpkg_("backup-storage-error")); _err_ == nil {
139139
msg := val.(string)
140140
logutil.CL(ctx).Debug("failpoint backup-storage-error injected.", zap.String("msg", msg))
141141
resp.Error = &backuppb.Error{
142142
Msg: msg,
143143
}
144-
})
145-
failpoint.Inject("tikv-rw-error", func(val failpoint.Value) {
144+
}
145+
if val, _err_ := failpoint.Eval(_curpkg_("tikv-rw-error")); _err_ == nil {
146146
msg := val.(string)
147147
logutil.CL(ctx).Debug("failpoint tikv-rw-error injected.", zap.String("msg", msg))
148148
resp.Error = &backuppb.Error{
149149
Msg: msg,
150150
}
151-
})
152-
failpoint.Inject("tikv-region-error", func(val failpoint.Value) {
151+
}
152+
if val, _err_ := failpoint.Eval(_curpkg_("tikv-region-error")); _err_ == nil {
153153
msg := val.(string)
154154
logutil.CL(ctx).Debug("failpoint tikv-region-error injected.", zap.String("msg", msg))
155155
resp.Error = &backuppb.Error{
@@ -160,7 +160,7 @@ func (push *pushDown) pushBackup(
160160
},
161161
},
162162
}
163-
})
163+
}
164164
if resp.GetError() == nil {
165165
// None error means range has been backuped successfully.
166166
if checkpointRunner != nil {

br/pkg/checkpoint/checkpoint.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ func (r *CheckpointRunner[K, V]) startCheckpointMainLoop(
391391
tickDurationForChecksum,
392392
tickDurationForLock time.Duration,
393393
) {
394-
failpoint.Inject("checkpoint-more-quickly-flush", func(_ failpoint.Value) {
394+
if _, _err_ := failpoint.Eval(_curpkg_("checkpoint-more-quickly-flush")); _err_ == nil {
395395
tickDurationForChecksum = 1 * time.Second
396396
tickDurationForFlush = 3 * time.Second
397397
if tickDurationForLock > 0 {
@@ -402,7 +402,7 @@ func (r *CheckpointRunner[K, V]) startCheckpointMainLoop(
402402
zap.Duration("checksum", tickDurationForChecksum),
403403
zap.Duration("lock", tickDurationForLock),
404404
)
405-
})
405+
}
406406
r.wg.Add(1)
407407
checkpointLoop := func(ctx context.Context) {
408408
defer r.wg.Done()
@@ -506,9 +506,9 @@ func (r *CheckpointRunner[K, V]) doChecksumFlush(ctx context.Context, checksumIt
506506
return errors.Annotatef(err, "failed to write file %s for checkpoint checksum", fname)
507507
}
508508

509-
failpoint.Inject("failed-after-checkpoint-flushes-checksum", func(_ failpoint.Value) {
510-
failpoint.Return(errors.Errorf("failpoint: failed after checkpoint flushes checksum"))
511-
})
509+
if _, _err_ := failpoint.Eval(_curpkg_("failed-after-checkpoint-flushes-checksum")); _err_ == nil {
510+
return errors.Errorf("failpoint: failed after checkpoint flushes checksum")
511+
}
512512
return nil
513513
}
514514

@@ -570,9 +570,9 @@ func (r *CheckpointRunner[K, V]) doFlush(ctx context.Context, meta map[K]*RangeG
570570
}
571571
}
572572

573-
failpoint.Inject("failed-after-checkpoint-flushes", func(_ failpoint.Value) {
574-
failpoint.Return(errors.Errorf("failpoint: failed after checkpoint flushes"))
575-
})
573+
if _, _err_ := failpoint.Eval(_curpkg_("failed-after-checkpoint-flushes")); _err_ == nil {
574+
return errors.Errorf("failpoint: failed after checkpoint flushes")
575+
}
576576
return nil
577577
}
578578

@@ -663,9 +663,9 @@ func (r *CheckpointRunner[K, V]) updateLock(ctx context.Context) error {
663663
return errors.Trace(err)
664664
}
665665

666-
failpoint.Inject("failed-after-checkpoint-updates-lock", func(_ failpoint.Value) {
667-
failpoint.Return(errors.Errorf("failpoint: failed after checkpoint updates lock"))
668-
})
666+
if _, _err_ := failpoint.Eval(_curpkg_("failed-after-checkpoint-updates-lock")); _err_ == nil {
667+
return errors.Errorf("failpoint: failed after checkpoint updates lock")
668+
}
669669

670670
return nil
671671
}

br/pkg/checksum/executor.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -387,12 +387,12 @@ func (exec *Executor) Execute(
387387
vars.BackOffWeight = exec.backoffWeight
388388
}
389389
resp, err = sendChecksumRequest(ctx, client, req, vars)
390-
failpoint.Inject("checksumRetryErr", func(val failpoint.Value) {
390+
if val, _err_ := failpoint.Eval(_curpkg_("checksumRetryErr")); _err_ == nil {
391391
// first time reach here. return error
392392
if val.(bool) {
393393
err = errors.New("inject checksum error")
394394
}
395-
})
395+
}
396396
if err != nil {
397397
return errors.Trace(err)
398398
}

br/pkg/conn/conn.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,23 +87,23 @@ func GetAllTiKVStoresWithRetry(ctx context.Context,
8787
ctx,
8888
func() error {
8989
stores, err = util.GetAllTiKVStores(ctx, pdClient, storeBehavior)
90-
failpoint.Inject("hint-GetAllTiKVStores-error", func(val failpoint.Value) {
90+
if val, _err_ := failpoint.Eval(_curpkg_("hint-GetAllTiKVStores-error")); _err_ == nil {
9191
logutil.CL(ctx).Debug("failpoint hint-GetAllTiKVStores-error injected.")
9292
if val.(bool) {
9393
err = status.Error(codes.Unknown, "Retryable error")
9494
} else {
9595
err = context.Canceled
9696
}
97-
})
97+
}
9898

99-
failpoint.Inject("hint-GetAllTiKVStores-cancel", func(val failpoint.Value) {
99+
if val, _err_ := failpoint.Eval(_curpkg_("hint-GetAllTiKVStores-cancel")); _err_ == nil {
100100
logutil.CL(ctx).Debug("failpoint hint-GetAllTiKVStores-cancel injected.")
101101
if val.(bool) {
102102
err = status.Error(codes.Canceled, "Cancel Retry")
103103
} else {
104104
err = context.Canceled
105105
}
106-
})
106+
}
107107

108108
return errors.Trace(err)
109109
},

br/pkg/pdutil/pd.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,12 +203,12 @@ func parseVersion(versionStr string) *semver.Version {
203203
zap.String("version", versionStr), zap.Error(err))
204204
version = &semver.Version{Major: 0, Minor: 0, Patch: 0}
205205
}
206-
failpoint.Inject("PDEnabledPauseConfig", func(val failpoint.Value) {
206+
if val, _err_ := failpoint.Eval(_curpkg_("PDEnabledPauseConfig")); _err_ == nil {
207207
if val.(bool) {
208208
// test pause config is enable
209209
version = &semver.Version{Major: 5, Minor: 0, Patch: 0}
210210
}
211-
})
211+
}
212212
return version
213213
}
214214

br/pkg/restore/client.go

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -735,11 +735,11 @@ func (rc *Client) GetTSWithRetry(ctx context.Context) (uint64, error) {
735735

736736
err := utils.WithRetry(ctx, func() error {
737737
startTS, getTSErr = rc.GetTS(ctx)
738-
failpoint.Inject("get-ts-error", func(val failpoint.Value) {
738+
if val, _err_ := failpoint.Eval(_curpkg_("get-ts-error")); _err_ == nil {
739739
if val.(bool) && retry < 3 {
740740
getTSErr = errors.Errorf("rpc error: code = Unknown desc = [PD:tso:ErrGenerateTimestamp]generate timestamp failed, requested pd is not leader of cluster")
741741
}
742-
})
742+
}
743743

744744
retry++
745745
if getTSErr != nil {
@@ -1139,11 +1139,11 @@ func (rc *Client) createTablesInWorkerPool(ctx context.Context, dom *domain.Doma
11391139
workers.ApplyWithIDInErrorGroup(eg, func(id uint64) error {
11401140
db := rc.dbPool[id%uint64(len(rc.dbPool))]
11411141
cts, err := rc.createTables(ectx, db, dom, tableSlice, newTS) // ddl job for [lastSent:i)
1142-
failpoint.Inject("restore-createtables-error", func(val failpoint.Value) {
1142+
if val, _err_ := failpoint.Eval(_curpkg_("restore-createtables-error")); _err_ == nil {
11431143
if val.(bool) {
11441144
err = errors.New("sample error without extra message")
11451145
}
1146-
})
1146+
}
11471147
if err != nil {
11481148
log.Error("create tables fail", zap.Error(err))
11491149
return err
@@ -2287,9 +2287,9 @@ func (rc *Client) getRuleID(tableID int64) string {
22872287

22882288
// IsFull returns whether this backup is full.
22892289
func (rc *Client) IsFull() bool {
2290-
failpoint.Inject("mock-incr-backup-data", func() {
2291-
failpoint.Return(false)
2292-
})
2290+
if _, _err_ := failpoint.Eval(_curpkg_("mock-incr-backup-data")); _err_ == nil {
2291+
return false
2292+
}
22932293
return !rc.IsIncremental()
22942294
}
22952295

@@ -3011,9 +3011,9 @@ func (rc *Client) RestoreMetaKVFiles(
30113011
filesInDefaultCF = SortMetaKVFiles(filesInDefaultCF)
30123012
filesInWriteCF = SortMetaKVFiles(filesInWriteCF)
30133013

3014-
failpoint.Inject("failed-before-id-maps-saved", func(_ failpoint.Value) {
3015-
failpoint.Return(errors.New("failpoint: failed before id maps saved"))
3016-
})
3014+
if _, _err_ := failpoint.Eval(_curpkg_("failed-before-id-maps-saved")); _err_ == nil {
3015+
return errors.New("failpoint: failed before id maps saved")
3016+
}
30173017

30183018
log.Info("start to restore meta files",
30193019
zap.Int("total files", len(files)),
@@ -3031,9 +3031,9 @@ func (rc *Client) RestoreMetaKVFiles(
30313031
return errors.Trace(err)
30323032
}
30333033
}
3034-
failpoint.Inject("failed-after-id-maps-saved", func(_ failpoint.Value) {
3035-
failpoint.Return(errors.New("failpoint: failed after id maps saved"))
3036-
})
3034+
if _, _err_ := failpoint.Eval(_curpkg_("failed-after-id-maps-saved")); _err_ == nil {
3035+
return errors.New("failpoint: failed after id maps saved")
3036+
}
30373037

30383038
// run the rewrite and restore meta-kv into TiKV cluster.
30393039
if err := rc.RestoreMetaKVFilesWithBatchMethod(
@@ -3276,18 +3276,18 @@ func (rc *Client) restoreMetaKvEntries(
32763276
log.Debug("after rewrite entry", zap.Int("new-key-len", len(newEntry.Key)),
32773277
zap.Int("new-value-len", len(entry.e.Value)), zap.ByteString("new-key", newEntry.Key))
32783278

3279-
failpoint.Inject("failed-to-restore-metakv", func(_ failpoint.Value) {
3280-
failpoint.Return(0, 0, errors.Errorf("failpoint: failed to restore metakv"))
3281-
})
3279+
if _, _err_ := failpoint.Eval(_curpkg_("failed-to-restore-metakv")); _err_ == nil {
3280+
return 0, 0, errors.Errorf("failpoint: failed to restore metakv")
3281+
}
32823282
if err := rc.rawKVClient.Put(ctx, newEntry.Key, newEntry.Value, entry.ts); err != nil {
32833283
return 0, 0, errors.Trace(err)
32843284
}
32853285
// for failpoint, we need to flush the cache in rawKVClient every time
3286-
failpoint.Inject("do-not-put-metakv-in-batch", func(_ failpoint.Value) {
3286+
if _, _err_ := failpoint.Eval(_curpkg_("do-not-put-metakv-in-batch")); _err_ == nil {
32873287
if err := rc.rawKVClient.PutRest(ctx); err != nil {
3288-
failpoint.Return(0, 0, errors.Trace(err))
3288+
return 0, 0, errors.Trace(err)
32893289
}
3290-
})
3290+
}
32913291
kvCount++
32923292
size += uint64(len(newEntry.Key) + len(newEntry.Value))
32933293
}
@@ -3544,11 +3544,11 @@ NEXTSQL:
35443544
return errors.Trace(err)
35453545
}
35463546
}
3547-
failpoint.Inject("failed-before-create-ingest-index", func(v failpoint.Value) {
3547+
if v, _err_ := failpoint.Eval(_curpkg_("failed-before-create-ingest-index")); _err_ == nil {
35483548
if v != nil && v.(bool) {
3549-
failpoint.Return(errors.New("failed before create ingest index"))
3549+
return errors.New("failed before create ingest index")
35503550
}
3551-
})
3551+
}
35523552
// create the repaired index when first execution or not found it
35533553
if err := rc.db.se.ExecuteInternal(ctx, sql.AddSQL, sql.AddArgs...); err != nil {
35543554
return errors.Trace(err)

0 commit comments

Comments
 (0)