Skip to content

Commit

Permalink
Merge branch 'master' into fix_pause_config
Browse files Browse the repository at this point in the history
  • Loading branch information
3pointer committed May 10, 2022
2 parents be41a5a + adebe44 commit c7d6295
Show file tree
Hide file tree
Showing 58 changed files with 764 additions and 449 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# Builder image
FROM golang:1.16-alpine as builder
FROM golang:1.18.1-alpine as builder

RUN apk add --no-cache \
wget \
Expand Down
10 changes: 5 additions & 5 deletions br/pkg/lightning/restore/table_restore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1414,7 +1414,7 @@ func (s *tableRestoreSuite) TestSchemaIsValid() {
{
// colB doesn't have the default value
Name: model.NewCIStr("colB"),
FieldType: types.NewFieldTypeBuilderP().SetType(0).SetFlag(1).Build(),
FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(),
},
},
},
Expand Down Expand Up @@ -1566,7 +1566,7 @@ func (s *tableRestoreSuite) TestSchemaIsValid() {
{
// colC doesn't have the default value
Name: model.NewCIStr("colC"),
FieldType: types.NewFieldTypeBuilderP().SetType(0).SetFlag(1).Build(),
FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(),
},
},
},
Expand Down Expand Up @@ -1617,7 +1617,7 @@ func (s *tableRestoreSuite) TestSchemaIsValid() {
{
// colB doesn't have the default value
Name: model.NewCIStr("colB"),
FieldType: types.NewFieldTypeBuilderP().SetType(0).SetFlag(1).Build(),
FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(),
},
{
// colC has the default value
Expand Down Expand Up @@ -1820,11 +1820,11 @@ func (s *tableRestoreSuite) TestGBKEncodedSchemaIsValid() {
Columns: []*model.ColumnInfo{
{
Name: model.NewCIStr("colA"),
FieldType: types.NewFieldTypeBuilderP().SetType(0).SetFlag(1).Build(),
FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(),
},
{
Name: model.NewCIStr("colB"),
FieldType: types.NewFieldTypeBuilderP().SetType(0).SetFlag(1).Build(),
FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(),
},
},
},
Expand Down
5 changes: 2 additions & 3 deletions ddl/util/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/owner"
"github.com/pingcap/tidb/parser/terror"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
Expand Down Expand Up @@ -172,7 +171,7 @@ func (s *schemaVersionSyncer) Init(ctx context.Context) error {
return errors.Trace(err)
}
logPrefix := fmt.Sprintf("[%s] %s", ddlPrompt, s.selfSchemaVerPath)
session, err := owner.NewSession(ctx, logPrefix, s.etcdCli, owner.NewSessionDefaultRetryCnt, SyncerSessionTTL)
session, err := tidbutil.NewSession(ctx, logPrefix, s.etcdCli, tidbutil.NewSessionDefaultRetryCnt, SyncerSessionTTL)
if err != nil {
return errors.Trace(err)
}
Expand Down Expand Up @@ -217,7 +216,7 @@ func (s *schemaVersionSyncer) Restart(ctx context.Context) error {

logPrefix := fmt.Sprintf("[%s] %s", ddlPrompt, s.selfSchemaVerPath)
// NewSession's context will affect the exit of the session.
session, err := owner.NewSession(ctx, logPrefix, s.etcdCli, owner.NewSessionRetryUnlimited, SyncerSessionTTL)
session, err := tidbutil.NewSession(ctx, logPrefix, s.etcdCli, tidbutil.NewSessionRetryUnlimited, SyncerSessionTTL)
if err != nil {
return errors.Trace(err)
}
Expand Down
3 changes: 1 addition & 2 deletions ddl/util/syncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
. "github.com/pingcap/tidb/ddl"
. "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/owner"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util"
Expand Down Expand Up @@ -162,7 +161,7 @@ func TestSyncerSimple(t *testing.T) {
NeededCleanTTL = int64(11)
ttlKey := "session_ttl_key"
ttlVal := "session_ttl_val"
session, err := owner.NewSession(ctx, "", cli, owner.NewSessionDefaultRetryCnt, ttl)
session, err := util.NewSession(ctx, "", cli, util.NewSessionDefaultRetryCnt, ttl)
require.NoError(t, err)
require.NoError(t, PutKVToEtcd(context.Background(), cli, 5, ttlKey, ttlVal, clientv3.WithLease(session.Lease())))

Expand Down
2 changes: 1 addition & 1 deletion distsql/distsql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ func (resp *mockResponse) Next(context.Context) (kv.ResultSubset, error) {

colTypes := make([]*types.FieldType, 4)
for i := 0; i < 4; i++ {
colTypes[i] = types.NewFieldTypeBuilderP().SetType(mysql.TypeLonglong).BuildP()
colTypes[i] = types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP()
}
chk := chunk.New(colTypes, numRows, numRows)

Expand Down
13 changes: 6 additions & 7 deletions domain/infosync/info.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/owner"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
Expand Down Expand Up @@ -196,14 +195,14 @@ func GlobalInfoSyncerInit(ctx context.Context, id string, serverIDGetter func()

// Init creates a new etcd session and stores server info to etcd.
func (is *InfoSyncer) init(ctx context.Context, skipRegisterToDashboard bool) error {
err := is.newSessionAndStoreServerInfo(ctx, owner.NewSessionDefaultRetryCnt)
err := is.newSessionAndStoreServerInfo(ctx, util2.NewSessionDefaultRetryCnt)
if err != nil {
return err
}
if skipRegisterToDashboard {
return nil
}
return is.newTopologySessionAndStoreServerInfo(ctx, owner.NewSessionDefaultRetryCnt)
return is.newTopologySessionAndStoreServerInfo(ctx, util2.NewSessionDefaultRetryCnt)
}

// SetSessionManager set the session manager for InfoSyncer.
Expand Down Expand Up @@ -683,12 +682,12 @@ func (is *InfoSyncer) TopologyDone() <-chan struct{} {

// Restart restart the info syncer with new session leaseID and store server info to etcd again.
func (is *InfoSyncer) Restart(ctx context.Context) error {
return is.newSessionAndStoreServerInfo(ctx, owner.NewSessionDefaultRetryCnt)
return is.newSessionAndStoreServerInfo(ctx, util2.NewSessionDefaultRetryCnt)
}

// RestartTopology restart the topology syncer with new session leaseID and store server info to etcd again.
func (is *InfoSyncer) RestartTopology(ctx context.Context) error {
return is.newTopologySessionAndStoreServerInfo(ctx, owner.NewSessionDefaultRetryCnt)
return is.newTopologySessionAndStoreServerInfo(ctx, util2.NewSessionDefaultRetryCnt)
}

// GetAllTiDBTopology gets all tidb topology
Expand Down Expand Up @@ -718,7 +717,7 @@ func (is *InfoSyncer) newSessionAndStoreServerInfo(ctx context.Context, retryCnt
return nil
}
logPrefix := fmt.Sprintf("[Info-syncer] %s", is.serverInfoPath)
session, err := owner.NewSession(ctx, logPrefix, is.etcdCli, retryCnt, InfoSessionTTL)
session, err := util2.NewSession(ctx, logPrefix, is.etcdCli, retryCnt, InfoSessionTTL)
if err != nil {
return err
}
Expand All @@ -737,7 +736,7 @@ func (is *InfoSyncer) newTopologySessionAndStoreServerInfo(ctx context.Context,
return nil
}
logPrefix := fmt.Sprintf("[topology-syncer] %s/%s:%d", TopologyInformationPath, is.info.IP, is.info.Port)
session, err := owner.NewSession(ctx, logPrefix, is.etcdCli, retryCnt, TopologySessionTTL)
session, err := util2.NewSession(ctx, logPrefix, is.etcdCli, retryCnt, TopologySessionTTL)
if err != nil {
return err
}
Expand Down
8 changes: 4 additions & 4 deletions domain/infosync/info_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ import (
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/owner"
"github.com/pingcap/tidb/parser/model"
util2 "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/testbridge"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/tests/v3/integration"
Expand Down Expand Up @@ -69,7 +69,7 @@ func TestTopology(t *testing.T) {
info, err := GlobalInfoSyncerInit(ctx, currentID, func() uint64 { return 1 }, client, false)
require.NoError(t, err)

err = info.newTopologySessionAndStoreServerInfo(ctx, owner.NewSessionDefaultRetryCnt)
err = info.newTopologySessionAndStoreServerInfo(ctx, util2.NewSessionDefaultRetryCnt)
require.NoError(t, err)

topology, err := info.getTopologyFromEtcd(ctx)
Expand All @@ -84,7 +84,7 @@ func TestTopology(t *testing.T) {
nonTTLKey := fmt.Sprintf("%s/%s:%v/info", TopologyInformationPath, info.info.IP, info.info.Port)
ttlKey := fmt.Sprintf("%s/%s:%v/ttl", TopologyInformationPath, info.info.IP, info.info.Port)

err = util.DeleteKeyFromEtcd(nonTTLKey, client, owner.NewSessionDefaultRetryCnt, time.Second)
err = util.DeleteKeyFromEtcd(nonTTLKey, client, util2.NewSessionDefaultRetryCnt, time.Second)
require.NoError(t, err)

// Refresh and re-test if the key exists
Expand All @@ -107,7 +107,7 @@ func TestTopology(t *testing.T) {
require.NoError(t, err)
require.True(t, ttlExists)

err = util.DeleteKeyFromEtcd(ttlKey, client, owner.NewSessionDefaultRetryCnt, time.Second)
err = util.DeleteKeyFromEtcd(ttlKey, client, util2.NewSessionDefaultRetryCnt, time.Second)
require.NoError(t, err)

err = info.updateTopologyAliveness(ctx)
Expand Down
2 changes: 1 addition & 1 deletion executor/collation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
)

func TestVecGroupChecker(t *testing.T) {
tp := types.NewFieldTypeBuilderP().SetType(mysql.TypeVarchar).BuildP()
tp := types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP()
col0 := &expression.Column{
RetType: tp,
Index: 0,
Expand Down
4 changes: 2 additions & 2 deletions executor/executor_required_rows_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -757,7 +757,7 @@ func genTestChunk4VecGroupChecker(chkRows []int, sameNum int) (expr []expression

expr = make([]expression.Expression, 1)
expr[0] = &expression.Column{
RetType: types.NewFieldTypeBuilderP().SetType(mysql.TypeLonglong).SetFlen(mysql.MaxIntWidth).BuildP(),
RetType: types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetFlen(mysql.MaxIntWidth).BuildP(),
Index: 0,
}
return
Expand Down Expand Up @@ -870,7 +870,7 @@ func TestVecGroupCheckerDATARACE(t *testing.T) {
for _, mType := range mTypes {
exprs := make([]expression.Expression, 1)
exprs[0] = &expression.Column{
RetType: types.NewFieldTypeBuilderP().SetType(mType).BuildP(),
RetType: types.NewFieldTypeBuilder().SetType(mType).BuildP(),
Index: 0,
}
vgc := newVecGroupChecker(ctx, exprs)
Expand Down
4 changes: 4 additions & 0 deletions executor/explainfor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ func TestExplainForVerbose(t *testing.T) {
require.Equal(t, rs2[i][j], rs[i][j])
}
}
tk.MustQuery("explain format = 'verbose' select * from t1").Rows()
tk.MustQuery("explain format = 'VERBOSE' select * from t1").Rows()
tk.MustQuery("explain analyze format = 'verbose' select * from t1").Rows()
tk.MustQuery("explain analyze format = 'VERBOSE' select * from t1").Rows()
}

func TestIssue11124(t *testing.T) {
Expand Down
18 changes: 9 additions & 9 deletions executor/hash_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ import (
func initBuildChunk(numRows int) (*chunk.Chunk, []*types.FieldType) {
numCols := 6
colTypes := make([]*types.FieldType, 0, numCols)
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeNewDecimal).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeJSON).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeJSON).BuildP())

oldChk := chunk.NewChunkWithCapacity(colTypes, numRows)
for i := 0; i < numRows; i++ {
Expand All @@ -57,9 +57,9 @@ func initBuildChunk(numRows int) (*chunk.Chunk, []*types.FieldType) {
func initProbeChunk(numRows int) (*chunk.Chunk, []*types.FieldType) {
numCols := 3
colTypes := make([]*types.FieldType, 0, numCols)
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilderP().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP())

oldChk := chunk.NewChunkWithCapacity(colTypes, numRows)
for i := 0; i < numRows; i++ {
Expand Down
4 changes: 4 additions & 0 deletions executor/index_lookup_hash_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,10 @@ func (iw *indexHashJoinInnerWorker) run(ctx context.Context, cancelFunc context.
}
h, resultCh := fnv.New64(), iw.resultCh
for {
// The previous task has been processed, so release the occupied memory
if task != nil {
task.memTracker.Detach()
}
select {
case <-ctx.Done():
return
Expand Down
6 changes: 5 additions & 1 deletion executor/index_lookup_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,10 @@ func (e *IndexLookUpJoin) getFinishedTask(ctx context.Context) (*lookUpJoinTask,
return task, nil
}

// The previous task has been processed, so release the occupied memory
if task != nil {
task.memTracker.Detach()
}
select {
case task = <-e.resultCh:
case <-ctx.Done():
Expand Down Expand Up @@ -559,7 +563,7 @@ func (iw *innerWorker) constructLookupContent(task *lookUpJoinTask) ([]*indexJoi
return nil, err
}
if rowIdx == 0 {
iw.lookup.memTracker.Consume(types.EstimatedMemUsage(dLookUpKey, numRows))
iw.memTracker.Consume(types.EstimatedMemUsage(dLookUpKey, numRows))
}
if dHashKey == nil {
// Append null to make lookUpKeys the same length as outer Result.
Expand Down
2 changes: 1 addition & 1 deletion executor/shuffle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestPartitionRangeSplitter(t *testing.T) {
ctx := mock.NewContext()
concurrency := 2

tp := types.NewFieldTypeBuilderP().SetType(mysql.TypeVarchar).BuildP()
tp := types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP()
col0 := &expression.Column{
RetType: tp,
Index: 0,
Expand Down
11 changes: 11 additions & 0 deletions executor/slow_query.go
Original file line number Diff line number Diff line change
Expand Up @@ -663,6 +663,17 @@ func (e *slowQueryRetriever) parseLog(ctx context.Context, sctx sessionctx.Conte
func (e *slowQueryRetriever) setColumnValue(sctx sessionctx.Context, row []types.Datum, tz *time.Location, field, value string, checker *slowLogChecker, lineNum int) bool {
factory := e.columnValueFactoryMap[field]
if factory == nil {
// Fix issue 34320, when slow log time is not in the output columns, the time filter condition is mistakenly discard.
if field == variable.SlowLogTimeStr && checker != nil {
t, err := ParseTime(value)
if err != nil {
err = fmt.Errorf("Parse slow log at line %v, failed field is %v, failed value is %v, error is %v", lineNum, field, value, err)
sctx.GetSessionVars().StmtCtx.AppendWarning(err)
return false
}
timeValue := types.NewTime(types.FromGoTime(t), mysql.TypeTimestamp, types.MaxFsp)
return checker.isTimeValid(timeValue)
}
return true
}
valid, err := factory(row, value, tz, checker)
Expand Down
37 changes: 37 additions & 0 deletions executor/slow_query_sql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,42 @@ func TestSlowQuery(t *testing.T) {
require.NoError(t, err)
_, err = f.WriteString(`
# Time: 2020-10-13T20:08:13.970563+08:00
# Plan_digest: 0368dd12858f813df842c17bcb37ca0e8858b554479bebcd78da1f8c14ad12d0
select * from t;
# Time: 2020-10-16T20:08:13.970563+08:00
# Plan_digest: 0368dd12858f813df842c17bcb37ca0e8858b554479bebcd78da1f8c14ad12d0
select * from t;
# Time: 2022-04-21T14:44:54.103041447+08:00
# Txn_start_ts: 432674816242745346
# Query_time: 59.251052432
# Parse_time: 0
# Compile_time: 21.36997765
# Rewrite_time: 2.107040149
# Optimize_time: 12.866449698
# Wait_TS: 1.485568827
# Cop_time: 8.619838386 Request_count: 1 Total_keys: 1 Rocksdb_block_cache_hit_count: 3
# Index_names: [bind_info:time_index]
# Is_internal: true
# Digest: caf0da652413a857b1ded77811703043e52753ca8a466e20e89c6b74d9662783
# Stats: bind_info:pseudo
# Num_cop_tasks: 1
# Cop_proc_avg: 0 Cop_proc_addr: 172.16.6.173:40161
# Cop_wait_avg: 0 Cop_wait_addr: 172.16.6.173:40161
# Mem_max: 186
# Prepared: false
# Plan_from_cache: false
# Plan_from_binding: false
# Has_more_results: false
# KV_total: 4.032247202
# PD_total: 0.108570401
# Backoff_total: 0
# Write_sql_response_total: 0
# Result_rows: 0
# Succ: true
# IsExplicitTxn: false
# Plan: tidb_decode_plan('8gW4MAkxNF81CTAJMzMzMy4zMwlteXNxbC5iaW5kX2luZm8udXBkYXRlX3RpbWUsIG06HQAMY3JlYQ0ddAkwCXRpbWU6MTkuM3MsIGxvb3BzOjEJMCBCeXRlcxEIIAoxCTMwXzEzCRlxFTkINy40GTkYLCAJMTg2IAk9OE4vQQoyCTQ3XzExCTFfMBWsFHRhYmxlOhWsHCwgaW5kZXg6AYgAXwULCCh1cBW+OCksIHJhbmdlOigwMDAwLQUDDCAwMDoFAwAuARSgMDAsK2luZl0sIGtlZXAgb3JkZXI6ZmFsc2UsIHN0YXRzOnBzZXVkbwkN6wg5LjYysgDAY29wX3Rhc2s6IHtudW06IDEsIG1heDogNS4wNnMsIHByb2Nfa2V5czogMCwgcnBjXxEmAQwBtRw6IDQuMDVzLAFKSHJfY2FjaGVfaGl0X3JhdGlvOiABphh9LCB0aWt2CWgAewU1ADA5Nlh9LCBzY2FuX2RldGFpbDoge3RvdGFsXwF6CGVzcxl9RhcAFF9zaXplOgGZCRwAawWogDEsIHJvY2tzZGI6IHtkZWxldGVfc2tpcHBlZF9jb3VudAUyCGtleUoWAAxibG9jIQsZxw0yFDMsIHJlYS5BAAUPCGJ5dAGBKfMYfX19CU4vQQEEIfoQNV8xMgly+gGCsgEgCU4vQQlOL0EK')
# Plan_digest: c338c3017eb2e4980cb49c8f804fea1fb7c1104aede2385f12909cdd376799b3
SELECT original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source FROM mysql.bind_info WHERE update_time > '0000-00-00 00:00:00' ORDER BY update_time, create_time;
`)
require.NoError(t, err)
require.NoError(t, f.Close())
Expand All @@ -171,4 +204,8 @@ select * from t;
tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name()))
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2020-10-16 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2019-10-13 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("2"))
// Cover tidb issue 34320
tk.MustQuery("select count(plan_digest) from `information_schema`.`slow_query` where time > '2019-10-13 20:08:13' and time < now();").Check(testkit.Rows("3"))
tk.MustQuery("select count(plan_digest) from `information_schema`.`slow_query` where time > '2022-04-29 17:50:00'").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time < '2010-01-02 15:04:05'").Check(testkit.Rows("0"))
}
2 changes: 1 addition & 1 deletion expression/aggregation/base_func.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ func (a *baseFuncDesc) typeInfer4MaxMin(ctx sessionctx.Context) {
// issue #13027, #13961
if (a.RetTp.GetType() == mysql.TypeEnum || a.RetTp.GetType() == mysql.TypeSet) &&
(a.Name != ast.AggFuncFirstRow && a.Name != ast.AggFuncMax && a.Name != ast.AggFuncMin) {
a.RetTp = types.NewFieldTypeBuilderP().SetType(mysql.TypeString).SetFlen(mysql.MaxFieldCharLength).BuildP()
a.RetTp = types.NewFieldTypeBuilder().SetType(mysql.TypeString).SetFlen(mysql.MaxFieldCharLength).BuildP()
}
}

Expand Down

0 comments on commit c7d6295

Please sign in to comment.