Skip to content

Commit

Permalink
*: record index name in the slow log instead of index id (#11795) (#1…
Browse files Browse the repository at this point in the history
  • Loading branch information
crazycs520 authored and zimulala committed Sep 9, 2019
1 parent ff58c3c commit 8938907
Show file tree
Hide file tree
Showing 10 changed files with 52 additions and 50 deletions.
26 changes: 13 additions & 13 deletions domain/topn_slow_query.go
Expand Up @@ -213,17 +213,17 @@ func (q *topNSlowQueries) Close() {

// SlowQueryInfo is a struct to record slow query info.
type SlowQueryInfo struct {
SQL string
Start time.Time
Duration time.Duration
Detail execdetails.ExecDetails
Succ bool
ConnID uint64
TxnTS uint64
User string
DB string
TableIDs string
IndexIDs string
Internal bool
Digest string
SQL string
Start time.Time
Duration time.Duration
Detail execdetails.ExecDetails
Succ bool
ConnID uint64
TxnTS uint64
User string
DB string
TableIDs string
IndexNames string
Internal bool
Digest string
}
36 changes: 18 additions & 18 deletions executor/adapter.go
Expand Up @@ -625,23 +625,23 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
}
sql = QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo()

var tableIDs, indexIDs string
var tableIDs, indexNames string
if len(sessVars.StmtCtx.TableIDs) > 0 {
tableIDs = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.TableIDs), " ", ",", -1)
}
if len(sessVars.StmtCtx.IndexIDs) > 0 {
indexIDs = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.IndexIDs), " ", ",", -1)
if len(sessVars.StmtCtx.IndexNames) > 0 {
indexNames = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.IndexNames), " ", ",", -1)
}
execDetail := sessVars.StmtCtx.GetExecDetails()
copTaskInfo := sessVars.StmtCtx.CopTasksDetails()
statsInfos := plannercore.GetStatsInfo(a.Plan)
memMax := sessVars.StmtCtx.MemTracker.MaxConsumed()
if costTime < threshold {
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, digest, statsInfos, copTaskInfo, memMax, succ, sql))
logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexNames, digest, statsInfos, copTaskInfo, memMax, succ, sql))
} else {
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, digest, statsInfos, copTaskInfo, memMax, succ, sql))
logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexNames, digest, statsInfos, copTaskInfo, memMax, succ, sql))
metrics.TotalQueryProcHistogram.Observe(costTime.Seconds())
metrics.TotalCopProcHistogram.Observe(execDetail.ProcessTime.Seconds())
metrics.TotalCopWaitHistogram.Observe(execDetail.WaitTime.Seconds())
Expand All @@ -650,19 +650,19 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
userString = sessVars.User.String()
}
domain.GetDomain(a.Ctx).LogSlowQuery(&domain.SlowQueryInfo{
SQL: sql,
Digest: digest,
Start: a.StartTime,
Duration: costTime,
Detail: sessVars.StmtCtx.GetExecDetails(),
Succ: succ,
ConnID: sessVars.ConnectionID,
TxnTS: txnTS,
User: userString,
DB: sessVars.CurrentDB,
TableIDs: tableIDs,
IndexIDs: indexIDs,
Internal: sessVars.InRestrictedSQL,
SQL: sql,
Digest: digest,
Start: a.StartTime,
Duration: costTime,
Detail: sessVars.StmtCtx.GetExecDetails(),
Succ: succ,
ConnID: sessVars.ConnectionID,
TxnTS: txnTS,
User: userString,
DB: sessVars.CurrentDB,
TableIDs: tableIDs,
IndexNames: indexNames,
Internal: sessVars.InRestrictedSQL,
})
}
}
Expand Down
4 changes: 2 additions & 2 deletions executor/builder.go
Expand Up @@ -1953,7 +1953,7 @@ func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) *
is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan)
ret.ranges = is.Ranges
sctx := b.ctx.GetSessionVars().StmtCtx
sctx.IndexIDs = append(sctx.IndexIDs, is.Index.ID)
sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O)
return ret
}

Expand Down Expand Up @@ -2032,7 +2032,7 @@ func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLoo
ret.ranges = is.Ranges
executorCounterIndexLookUpExecutor.Inc()
sctx := b.ctx.GetSessionVars().StmtCtx
sctx.IndexIDs = append(sctx.IndexIDs, is.Index.ID)
sctx.IndexNames = append(sctx.IndexNames, ts.Table.Name.O+":"+is.Index.Name.O)
sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID)
return ret
}
Expand Down
2 changes: 1 addition & 1 deletion executor/executor.go
Expand Up @@ -703,7 +703,7 @@ func (e *ShowSlowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.AppendString(7, slow.User)
req.AppendString(8, slow.DB)
req.AppendString(9, slow.TableIDs)
req.AppendString(10, slow.IndexIDs)
req.AppendString(10, slow.IndexNames)
if slow.Internal {
req.AppendInt64(11, 1)
} else {
Expand Down
2 changes: 2 additions & 0 deletions go.mod
Expand Up @@ -77,3 +77,5 @@ require (
sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67
)

go 1.13
4 changes: 2 additions & 2 deletions infoschema/slow_log.go
Expand Up @@ -47,7 +47,7 @@ var slowQueryCols = []columnInfo{
{execdetails.TotalKeysStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil},
{execdetails.ProcessKeysStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil},
{variable.SlowLogDBStr, mysql.TypeVarchar, 64, 0, nil, nil},
{variable.SlowLogIndexIDsStr, mysql.TypeVarchar, 100, 0, nil, nil},
{variable.SlowLogIndexNamesStr, mysql.TypeVarchar, 100, 0, nil, nil},
{variable.SlowLogIsInternalStr, mysql.TypeTiny, 1, 0, nil, nil},
{variable.SlowLogDigestStr, mysql.TypeVarchar, 64, 0, nil, nil},
{variable.SlowLogStatsInfoStr, mysql.TypeVarchar, 512, 0, nil, nil},
Expand Down Expand Up @@ -240,7 +240,7 @@ func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string)
st.processKeys, err = strconv.ParseUint(value, 10, 64)
case variable.SlowLogDBStr:
st.db = value
case variable.SlowLogIndexIDsStr:
case variable.SlowLogIndexNamesStr:
st.indexIDs = value
case variable.SlowLogIsInternalStr:
st.isInternal = value == "true"
Expand Down
4 changes: 2 additions & 2 deletions sessionctx/stmtctx/stmtctx.go
Expand Up @@ -123,7 +123,7 @@ type StatementContext struct {
MemTracker *memory.Tracker
RuntimeStatsColl *execdetails.RuntimeStatsColl
TableIDs []int64
IndexIDs []int64
IndexNames []string
nowTs time.Time // use this variable for now/current_timestamp calculation/cache for one stmt
stmtTimeCached bool
StmtType string
Expand Down Expand Up @@ -419,7 +419,7 @@ func (sc *StatementContext) ResetForRetry() {
sc.mu.allExecDetails = make([]*execdetails.ExecDetails, 0, 4)
sc.mu.Unlock()
sc.TableIDs = sc.TableIDs[:0]
sc.IndexIDs = sc.IndexIDs[:0]
sc.IndexNames = sc.IndexNames[:0]
}

// MergeExecDetails merges a single region execution details into self, used to print
Expand Down
12 changes: 6 additions & 6 deletions sessionctx/variable/session.go
Expand Up @@ -985,8 +985,8 @@ const (
SlowLogDBStr = "DB"
// SlowLogIsInternalStr is slow log field name.
SlowLogIsInternalStr = "Is_internal"
// SlowLogIndexIDsStr is slow log field name.
SlowLogIndexIDsStr = "Index_ids"
// SlowLogIndexNamesStr is slow log field name.
SlowLogIndexNamesStr = "Index_names"
// SlowLogDigestStr is slow log field name.
SlowLogDigestStr = "Digest"
// SlowLogQuerySQLStr is slow log field name.
Expand Down Expand Up @@ -1026,7 +1026,7 @@ const (
// # Query_time: 4.895492
// # Process_time: 0.161 Request_count: 1 Total_keys: 100001 Processed_keys: 100000
// # DB: test
// # Index_ids: [1,2]
// # Index_names: [t1.idx1,t2.idx2]
// # Is_internal: false
// # Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
// # Stats: t1:1,t2:2
Expand All @@ -1036,7 +1036,7 @@ const (
// # Memory_max: 4096
// # Succ: true
// select * from t_slim;
func (s *SessionVars) SlowLogFormat(txnTS uint64, costTime time.Duration, execDetail execdetails.ExecDetails, indexIDs string, digest string,
func (s *SessionVars) SlowLogFormat(txnTS uint64, costTime time.Duration, execDetail execdetails.ExecDetails, indexNames string, digest string,
statsInfos map[string]uint64, copTasks *stmtctx.CopTasksDetails, memMax int64, succ bool, sql string) string {
var buf bytes.Buffer
execDetailStr := execDetail.String()
Expand All @@ -1054,8 +1054,8 @@ func (s *SessionVars) SlowLogFormat(txnTS uint64, costTime time.Duration, execDe
if len(s.CurrentDB) > 0 {
buf.WriteString(SlowLogRowPrefixStr + SlowLogDBStr + SlowLogSpaceMarkStr + s.CurrentDB + "\n")
}
if len(indexIDs) > 0 {
buf.WriteString(SlowLogRowPrefixStr + SlowLogIndexIDsStr + SlowLogSpaceMarkStr + indexIDs + "\n")
if len(indexNames) > 0 {
buf.WriteString(SlowLogRowPrefixStr + SlowLogIndexNamesStr + SlowLogSpaceMarkStr + indexNames + "\n")
}
buf.WriteString(SlowLogRowPrefixStr + SlowLogIsInternalStr + SlowLogSpaceMarkStr + strconv.FormatBool(s.InRestrictedSQL) + "\n")
if len(digest) > 0 {
Expand Down
4 changes: 2 additions & 2 deletions sessionctx/variable/session_test.go
Expand Up @@ -125,7 +125,7 @@ func (*testSessionSuite) TestSlowLogFormat(c *C) {
# Query_time: 1
# Process_time: 2 Wait_time: 60 Backoff_time: 0.001 Request_count: 2 Total_keys: 10000 Process_keys: 20001
# DB: test
# Index_ids: [1,2]
# Index_names: [t1:a,t2:b]
# Is_internal: true
# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
# Stats: t1:pseudo
Expand All @@ -137,6 +137,6 @@ func (*testSessionSuite) TestSlowLogFormat(c *C) {
select * from t;`
sql := "select * from t"
digest := parser.DigestHash(sql)
logString := seVar.SlowLogFormat(txnTS, costTime, execDetail, "[1,2]", digest, statsInfos, copTasks, memMax, true, sql)
logString := seVar.SlowLogFormat(txnTS, costTime, execDetail, "[t1:a,t2:b]", digest, statsInfos, copTasks, memMax, true, sql)
c.Assert(logString, Equals, resultString)
}
8 changes: 4 additions & 4 deletions util/expensivequery/expensivequery.go
Expand Up @@ -133,14 +133,14 @@ func logExpensiveQuery(costTime time.Duration, info *util.ProcessInfo) {
if info.DB != nil && len(info.DB.(string)) > 0 {
logFields = append(logFields, zap.String("database", info.DB.(string)))
}
var tableIDs, indexIDs string
var tableIDs, indexNames string
if len(info.StmtCtx.TableIDs) > 0 {
tableIDs = strings.Replace(fmt.Sprintf("%v", info.StmtCtx.TableIDs), " ", ",", -1)
logFields = append(logFields, zap.String("table_ids", tableIDs))
}
if len(info.StmtCtx.IndexIDs) > 0 {
indexIDs = strings.Replace(fmt.Sprintf("%v", info.StmtCtx.IndexIDs), " ", ",", -1)
logFields = append(logFields, zap.String("index_ids", indexIDs))
if len(info.StmtCtx.IndexNames) > 0 {
indexNames = strings.Replace(fmt.Sprintf("%v", info.StmtCtx.IndexNames), " ", ",", -1)
logFields = append(logFields, zap.String("index_ids", indexNames))
}
logFields = append(logFields, zap.Uint64("txn_start_ts", info.CurTxnStartTS))
if memTracker := info.StmtCtx.MemTracker; memTracker != nil {
Expand Down

0 comments on commit 8938907

Please sign in to comment.