Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into unity
Browse files Browse the repository at this point in the history
  • Loading branch information
qw4990 committed Apr 26, 2024
2 parents 073c664 + 10971ea commit 8d38b87
Show file tree
Hide file tree
Showing 107 changed files with 1,045 additions and 761 deletions.
2 changes: 1 addition & 1 deletion br/pkg/restore/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -425,5 +425,5 @@ func TestGetExistedUserDBs(t *testing.T) {
//
// The above variables are in the file br/pkg/restore/systable_restore.go
func TestMonitorTheSystemTableIncremental(t *testing.T) {
require.Equal(t, int64(195), session.CurrentBootstrapVersion)
require.Equal(t, int64(196), session.CurrentBootstrapVersion)
}
2 changes: 1 addition & 1 deletion cmd/tidb-server/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ go_library(
"//pkg/util/cpuprofile",
"//pkg/util/deadlockhistory",
"//pkg/util/disk",
"//pkg/util/distrole",
"//pkg/util/domainutil",
"//pkg/util/kvcache",
"//pkg/util/logutil",
Expand All @@ -50,6 +49,7 @@ go_library(
"//pkg/util/printer",
"//pkg/util/redact",
"//pkg/util/sem",
"//pkg/util/servicescope",
"//pkg/util/signal",
"//pkg/util/stmtsummary/v2:stmtsummary",
"//pkg/util/sys/linux",
Expand Down
13 changes: 4 additions & 9 deletions cmd/tidb-server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ import (
"github.com/pingcap/tidb/pkg/util/cpuprofile"
"github.com/pingcap/tidb/pkg/util/deadlockhistory"
"github.com/pingcap/tidb/pkg/util/disk"
distroleutil "github.com/pingcap/tidb/pkg/util/distrole"
"github.com/pingcap/tidb/pkg/util/domainutil"
"github.com/pingcap/tidb/pkg/util/kvcache"
"github.com/pingcap/tidb/pkg/util/logutil"
Expand All @@ -74,6 +73,7 @@ import (
"github.com/pingcap/tidb/pkg/util/printer"
"github.com/pingcap/tidb/pkg/util/redact"
"github.com/pingcap/tidb/pkg/util/sem"
"github.com/pingcap/tidb/pkg/util/servicescope"
"github.com/pingcap/tidb/pkg/util/signal"
stmtsummaryv2 "github.com/pingcap/tidb/pkg/util/stmtsummary/v2"
"github.com/pingcap/tidb/pkg/util/sys/linux"
Expand Down Expand Up @@ -677,14 +677,9 @@ func overrideConfig(cfg *config.Config, fset *flag.FlagSet) {
}

if actualFlags[nmTiDBServiceScope] {
scope, ok := distroleutil.ToTiDBServiceScope(*serviceScope)
if !ok {
err := fmt.Errorf("incorrect value: `%s`. %s options: %s",
*serviceScope,
nmTiDBServiceScope, `"", background`)
terror.MustNil(err)
}
cfg.Instance.TiDBServiceScope = scope
err = servicescope.CheckServiceScope(*serviceScope)
terror.MustNil(err)
cfg.Instance.TiDBServiceScope = *serviceScope
}
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/ddl/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ go_library(
"//pkg/ddl/ingest",
"//pkg/ddl/internal/session",
"//pkg/ddl/label",
"//pkg/ddl/logutil",
"//pkg/ddl/placement",
"//pkg/ddl/resourcegroup",
"//pkg/ddl/syncer",
Expand Down Expand Up @@ -259,6 +260,7 @@ go_test(
"//pkg/ddl/copr",
"//pkg/ddl/ingest",
"//pkg/ddl/internal/session",
"//pkg/ddl/logutil",
"//pkg/ddl/placement",
"//pkg/ddl/schematracker",
"//pkg/ddl/syncer",
Expand Down Expand Up @@ -313,7 +315,6 @@ go_test(
"//pkg/util/dbterror/plannererrors",
"//pkg/util/domainutil",
"//pkg/util/gcutil",
"//pkg/util/logutil",
"//pkg/util/mathutil",
"//pkg/util/mock",
"//pkg/util/sem",
Expand Down
31 changes: 17 additions & 14 deletions pkg/ddl/backfilling.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
sess "github.com/pingcap/tidb/pkg/ddl/internal/session"
"github.com/pingcap/tidb/pkg/ddl/logutil"
ddlutil "github.com/pingcap/tidb/pkg/ddl/util"
"github.com/pingcap/tidb/pkg/expression"
"github.com/pingcap/tidb/pkg/kv"
Expand All @@ -40,7 +41,6 @@ import (
"github.com/pingcap/tidb/pkg/tablecodec"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/dbterror"
"github.com/pingcap/tidb/pkg/util/logutil"
decoder "github.com/pingcap/tidb/pkg/util/rowDecoder"
"github.com/pingcap/tidb/pkg/util/timeutil"
"github.com/pingcap/tidb/pkg/util/topsql"
Expand Down Expand Up @@ -315,7 +315,7 @@ func (w *backfillWorker) handleBackfillTask(d *ddlCtx, task *reorgBackfillTask,

if num := result.scanCount - lastLogCount; num >= 90000 {
lastLogCount = result.scanCount
logutil.BgLogger().Info("backfill worker back fill index", zap.String("category", "ddl"), zap.Stringer("worker", w),
logutil.DDLLogger().Info("backfill worker back fill index", zap.Stringer("worker", w),
zap.Int("addedCount", result.addedCount), zap.Int("scanCount", result.scanCount),
zap.String("next key", hex.EncodeToString(taskCtx.nextKey)),
zap.Float64("speed(rows/s)", float64(num)/time.Since(lastLogTime).Seconds()))
Expand All @@ -327,7 +327,7 @@ func (w *backfillWorker) handleBackfillTask(d *ddlCtx, task *reorgBackfillTask,
break
}
}
logutil.BgLogger().Info("backfill worker finish task", zap.String("category", "ddl"),
logutil.DDLLogger().Info("backfill worker finish task",
zap.Stringer("worker", w), zap.Stringer("task", task),
zap.Int("added count", result.addedCount),
zap.Int("scan count", result.scanCount),
Expand All @@ -347,7 +347,7 @@ func (w *backfillWorker) sendResult(result *backfillResult) {
}

func (w *backfillWorker) run(d *ddlCtx, bf backfiller, job *model.Job) {
logger := ddlLogger.With(zap.Stringer("worker", w), zap.Int64("jobID", job.ID))
logger := logutil.DDLLogger().With(zap.Stringer("worker", w), zap.Int64("jobID", job.ID))
var (
curTaskID int
task *reorgBackfillTask
Expand Down Expand Up @@ -413,12 +413,13 @@ func splitTableRanges(
startKey, endKey kv.Key,
limit int,
) ([]kv.KeyRange, error) {
logutil.BgLogger().Info("split table range from PD", zap.String("category", "ddl"),
logutil.DDLLogger().Info("split table range from PD",
zap.Int64("physicalTableID", t.GetPhysicalID()),
zap.String("start key", hex.EncodeToString(startKey)),
zap.String("end key", hex.EncodeToString(endKey)))
if len(startKey) == 0 && len(endKey) == 0 {
logutil.BgLogger().Info("split table range from PD, get noop table range", zap.String("category", "ddl"), zap.Int64("physicalTableID", t.GetPhysicalID()))
logutil.DDLLogger().Info("split table range from PD, get noop table range",
zap.Int64("physicalTableID", t.GetPhysicalID()))
return []kv.KeyRange{}, nil
}

Expand Down Expand Up @@ -469,9 +470,9 @@ func getBatchTasks(t table.Table, reorgInfo *reorgInfo, kvRanges []kv.KeyRange,
}
endK, err := GetRangeEndKey(jobCtx, reorgInfo.d.store, job.Priority, prefix, startKey, endKey)
if err != nil {
logutil.BgLogger().Info("get backfill range task, get reverse key failed", zap.String("category", "ddl"), zap.Error(err))
logutil.DDLLogger().Info("get backfill range task, get reverse key failed", zap.Error(err))
} else {
logutil.BgLogger().Info("get backfill range task, change end key", zap.String("category", "ddl"),
logutil.DDLLogger().Info("get backfill range task, change end key",
zap.Int("id", taskID), zap.Int64("pTbl", phyTbl.GetPhysicalID()),
zap.String("end key", hex.EncodeToString(endKey)), zap.String("current end key", hex.EncodeToString(endK)))
endKey = endK
Expand Down Expand Up @@ -628,7 +629,7 @@ func (dc *ddlCtx) writePhysicalTableRecord(
return egCtx.Err()
case result, ok := <-scheduler.resultChan():
if !ok {
ddlLogger.Info("backfill workers successfully processed",
logutil.DDLLogger().Info("backfill workers successfully processed",
zap.Stringer("element", reorgInfo.currElement),
zap.Int64("total added count", totalAddedCount),
zap.String("start key", hex.EncodeToString(startKey)))
Expand All @@ -637,7 +638,7 @@ func (dc *ddlCtx) writePhysicalTableRecord(
cnt++

if result.err != nil {
ddlLogger.Warn("backfill worker failed",
logutil.DDLLogger().Warn("backfill worker failed",
zap.Int64("job ID", reorgInfo.ID),
zap.Int64("total added count", totalAddedCount),
zap.String("start key", hex.EncodeToString(startKey)),
Expand All @@ -658,15 +659,15 @@ func (dc *ddlCtx) writePhysicalTableRecord(
if cnt%(scheduler.currentWorkerSize()*4) == 0 {
err2 := reorgInfo.UpdateReorgMeta(keeper.nextKey, sessPool)
if err2 != nil {
ddlLogger.Warn("update reorg meta failed",
logutil.DDLLogger().Warn("update reorg meta failed",
zap.Int64("job ID", reorgInfo.ID),
zap.Error(err2))
}
// We try to adjust the worker size regularly to reduce
// the overhead of loading the DDL related global variables.
err2 = scheduler.adjustWorkerSize()
if err2 != nil {
ddlLogger.Warn("cannot adjust backfill worker size",
logutil.DDLLogger().Warn("cannot adjust backfill worker size",
zap.Int64("job ID", reorgInfo.ID),
zap.Error(err2))
}
Expand All @@ -688,7 +689,7 @@ func (dc *ddlCtx) writePhysicalTableRecord(
if len(kvRanges) == 0 {
break
}
ddlLogger.Info("start backfill workers to reorg record",
logutil.DDLLogger().Info("start backfill workers to reorg record",
zap.Stringer("type", bfWorkerType),
zap.Int("workerCnt", scheduler.currentWorkerSize()),
zap.Int("regionCnt", len(kvRanges)),
Expand Down Expand Up @@ -847,7 +848,9 @@ func logSlowOperations(elapsed time.Duration, slowMsg string, threshold uint32)
}

if elapsed >= time.Duration(threshold)*time.Millisecond {
logutil.BgLogger().Info("slow operations", zap.String("category", "ddl"), zap.Duration("takeTimes", elapsed), zap.String("msg", slowMsg))
logutil.DDLLogger().Info("slow operations",
zap.Duration("takeTimes", elapsed),
zap.String("msg", slowMsg))
}
}

Expand Down
7 changes: 4 additions & 3 deletions pkg/ddl/backfilling_dist_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (

"github.com/pingcap/errors"
"github.com/pingcap/tidb/pkg/ddl/ingest"
"github.com/pingcap/tidb/pkg/ddl/logutil"
"github.com/pingcap/tidb/pkg/disttask/framework/proto"
"github.com/pingcap/tidb/pkg/disttask/framework/taskexecutor"
"github.com/pingcap/tidb/pkg/disttask/framework/taskexecutor/execute"
Expand All @@ -29,7 +30,6 @@ import (
"github.com/pingcap/tidb/pkg/parser/terror"
"github.com/pingcap/tidb/pkg/table"
"github.com/pingcap/tidb/pkg/util/dbterror"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/tikv/client-go/v2/tikv"
"go.uber.org/zap"
)
Expand Down Expand Up @@ -101,8 +101,9 @@ func (s *backfillDistExecutor) newBackfillSubtaskExecutor(
for _, eid := range eleIDs {
indexInfo := model.FindIndexInfoByID(tbl.Meta().Indices, eid)
if indexInfo == nil {
logutil.BgLogger().Warn("index info not found", zap.String("category", "ddl-ingest"),
zap.Int64("table ID", tbl.Meta().ID), zap.Int64("index ID", eid))
logutil.DDLIngestLogger().Warn("index info not found",
zap.Int64("table ID", tbl.Meta().ID),
zap.Int64("index ID", eid))
return nil, errors.Errorf("index info not found: %d", eid)
}
indexInfos = append(indexInfos, indexInfo)
Expand Down
11 changes: 5 additions & 6 deletions pkg/ddl/backfilling_dist_scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/pkg/ddl/ingest"
"github.com/pingcap/tidb/pkg/ddl/logutil"
"github.com/pingcap/tidb/pkg/disttask/framework/handle"
"github.com/pingcap/tidb/pkg/disttask/framework/proto"
"github.com/pingcap/tidb/pkg/disttask/framework/scheduler"
Expand All @@ -40,7 +41,7 @@ import (
"github.com/pingcap/tidb/pkg/store/helper"
"github.com/pingcap/tidb/pkg/table"
"github.com/pingcap/tidb/pkg/util/backoff"
"github.com/pingcap/tidb/pkg/util/logutil"
tidblogutil "github.com/pingcap/tidb/pkg/util/logutil"
"github.com/tikv/client-go/v2/tikv"
"go.uber.org/zap"
)
Expand Down Expand Up @@ -76,7 +77,7 @@ func (sch *BackfillingSchedulerExt) OnNextSubtasksBatch(
execIDs []string,
nextStep proto.Step,
) (taskMeta [][]byte, err error) {
logger := logutil.BgLogger().With(
logger := logutil.DDLLogger().With(
zap.Stringer("type", task.Type),
zap.Int64("task-id", task.ID),
zap.String("curr-step", proto.Step2Str(task.Type, task.Step)),
Expand Down Expand Up @@ -265,7 +266,7 @@ func generateNonPartitionPlan(

subTaskMetas := make([][]byte, 0, 4)
backoffer := backoff.NewExponential(scanRegionBackoffBase, 2, scanRegionBackoffMax)
err = handle.RunWithRetry(d.ctx, 8, backoffer, logutil.Logger(d.ctx), func(_ context.Context) (bool, error) {
err = handle.RunWithRetry(d.ctx, 8, backoffer, tidblogutil.Logger(d.ctx), func(_ context.Context) (bool, error) {
regionCache := d.store.(helper.Storage).GetRegionCache()
recordRegionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey)
if err != nil {
Expand Down Expand Up @@ -590,9 +591,7 @@ func forEachBackfillSubtaskMeta(
for _, subTaskMeta := range subTaskMetas {
subtask, err := decodeBackfillSubTaskMeta(subTaskMeta)
if err != nil {
logutil.BgLogger().Error("unmarshal error",
zap.String("category", "ddl"),
zap.Error(err))
logutil.DDLLogger().Error("unmarshal error", zap.Error(err))
return errors.Trace(err)
}
fn(subtask)
Expand Down
2 changes: 1 addition & 1 deletion pkg/ddl/backfilling_dist_scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func TestBackfillingSchedulerGlobalSortMode(t *testing.T) {
ext.(*ddl.BackfillingSchedulerExt).GlobalSort = true
sch.Extension = ext

taskID, err := mgr.CreateTask(ctx, task.Key, proto.Backfill, 1, task.Meta)
taskID, err := mgr.CreateTask(ctx, task.Key, proto.Backfill, 1, "", task.Meta)
require.NoError(t, err)
task.ID = taskID
execIDs := []string{":4000"}
Expand Down
19 changes: 8 additions & 11 deletions pkg/ddl/backfilling_read_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/ddl/ingest"
"github.com/pingcap/tidb/pkg/ddl/logutil"
"github.com/pingcap/tidb/pkg/disttask/framework/proto"
"github.com/pingcap/tidb/pkg/disttask/framework/taskexecutor/execute"
"github.com/pingcap/tidb/pkg/disttask/operator"
Expand All @@ -33,7 +34,7 @@ import (
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/table"
"github.com/pingcap/tidb/pkg/util/logutil"
tidblogutil "github.com/pingcap/tidb/pkg/util/logutil"
"go.uber.org/zap"
)

Expand Down Expand Up @@ -87,14 +88,12 @@ func newReadIndexExecutor(
}

func (*readIndexExecutor) Init(_ context.Context) error {
logutil.BgLogger().Info("read index executor init subtask exec env",
zap.String("category", "ddl"))
logutil.DDLLogger().Info("read index executor init subtask exec env")
return nil
}

func (r *readIndexExecutor) RunSubtask(ctx context.Context, subtask *proto.Subtask) error {
logutil.BgLogger().Info("read index executor run subtask",
zap.String("category", "ddl"),
logutil.DDLLogger().Info("read index executor run subtask",
zap.Bool("use cloud", len(r.cloudStorageURI) > 0))

r.subtaskSummary.Store(subtask.ID, &readIndexSummary{
Expand Down Expand Up @@ -149,8 +148,7 @@ func (r *readIndexExecutor) RealtimeSummary() *execute.SubtaskSummary {
}

func (r *readIndexExecutor) Cleanup(ctx context.Context) error {
logutil.Logger(ctx).Info("read index executor cleanup subtask exec env",
zap.String("category", "ddl"))
tidblogutil.Logger(ctx).Info("read index executor cleanup subtask exec env")
// cleanup backend context
ingest.LitBackCtxMgr.Unregister(r.job.ID)
return nil
Expand Down Expand Up @@ -182,7 +180,7 @@ func (r *readIndexExecutor) OnFinished(ctx context.Context, subtask *proto.Subta
}
sm.MetaGroups = s.metaGroups

logutil.Logger(ctx).Info("get key boundary on subtask finished",
tidblogutil.Logger(ctx).Info("get key boundary on subtask finished",
zap.String("start", hex.EncodeToString(all.StartKey)),
zap.String("end", hex.EncodeToString(all.EndKey)),
zap.Int("fileCount", len(all.MultipleFilesStats)),
Expand All @@ -205,8 +203,7 @@ func (r *readIndexExecutor) getTableStartEndKey(sm *BackfillSubTaskMeta) (
pid := sm.PhysicalTableID
start, end, err = getTableRange(r.jc, r.d.ddlCtx, parTbl.GetPartition(pid), currentVer.Ver, r.job.Priority)
if err != nil {
logutil.BgLogger().Error("get table range error",
zap.String("category", "ddl"),
logutil.DDLLogger().Error("get table range error",
zap.Error(err))
return nil, nil, nil, err
}
Expand All @@ -233,7 +230,7 @@ func (r *readIndexExecutor) buildLocalStorePipeline(
for _, index := range r.indexes {
ei, err := r.bc.Register(r.job.ID, index.ID, r.job.SchemaName, r.job.TableName)
if err != nil {
logutil.Logger(opCtx).Warn("cannot register new engine", zap.Error(err),
tidblogutil.Logger(opCtx).Warn("cannot register new engine", zap.Error(err),
zap.Int64("job ID", r.job.ID), zap.Int64("index ID", index.ID))
return nil, err
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/ddl/backfilling_scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/pingcap/tidb/pkg/ddl/copr"
"github.com/pingcap/tidb/pkg/ddl/ingest"
sess "github.com/pingcap/tidb/pkg/ddl/internal/session"
ddllogutil "github.com/pingcap/tidb/pkg/ddl/logutil"
"github.com/pingcap/tidb/pkg/errctx"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/metrics"
Expand Down Expand Up @@ -230,7 +231,7 @@ func (b *txnBackfillScheduler) adjustWorkerSize() error {
job := reorgInfo.Job
jc := b.jobCtx
if err := loadDDLReorgVars(b.ctx, b.sessPool); err != nil {
logutil.BgLogger().Error("load DDL reorganization variable failed", zap.String("category", "ddl"), zap.Error(err))
ddllogutil.DDLLogger().Error("load DDL reorganization variable failed", zap.Error(err))
}
workerCnt := b.expectedWorkerSize()
// Increase the worker.
Expand Down

0 comments on commit 8d38b87

Please sign in to comment.