diff --git a/config/config.go b/config/config.go index 7d6560b5d6783..664cb5bceaa7d 100644 --- a/config/config.go +++ b/config/config.go @@ -60,9 +60,9 @@ const ( DefHost = "0.0.0.0" // DefStatusHost is the default status host of TiDB DefStatusHost = "0.0.0.0" - // Def TableColumnCountLimit is limit of the number of columns in a table + // DefTableColumnCountLimit is limit of the number of columns in a table DefTableColumnCountLimit = 1017 - // Def TableColumnCountLimit is maximum limitation of the number of columns in a table + // DefMaxOfTableColumnCountLimit is maximum limitation of the number of columns in a table DefMaxOfTableColumnCountLimit = 4096 ) @@ -73,7 +73,7 @@ var ( "tikv": true, "unistore": true, } - // checkTableBeforeDrop enable to execute `admin check table` before `drop table`. + // CheckTableBeforeDrop enable to execute `admin check table` before `drop table`. CheckTableBeforeDrop = false // checkBeforeDropLDFlag is a go build flag. checkBeforeDropLDFlag = "None" diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index fe6bca7dc4563..405ada57f15ec 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -969,9 +969,9 @@ func (s *testIntegrationSuite5) TestModifyColumnOption(c *C) { func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("create database if not exists test") tk.MustExec("use test") + tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int, b int as (a + 1), c int as (b + 1))") tk.MustExec("insert into t (a) values (1)") @@ -980,42 +980,72 @@ func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn(c *C) { res := tk.MustQuery("select * from t use index(idx) where c > 1") tk.MustQuery("select * from t ignore index(idx) where c > 1").Check(res.Rows()) tk.MustExec("admin check table t") +} + +func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn1(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test") + tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int, b int as (a + 1), c int as (b + 1), d int as (c + 1))") tk.MustExec("insert into t (a) values (1)") tk.MustExec("create index idx on t (d)") tk.MustQuery("select * from t where d > 2").Check(testkit.Rows("1 2 3 4")) - res = tk.MustQuery("select * from t use index(idx) where d > 2") + res := tk.MustQuery("select * from t use index(idx) where d > 2") tk.MustQuery("select * from t ignore index(idx) where d > 2").Check(res.Rows()) tk.MustExec("admin check table t") +} + +func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn2(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test") + tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a bigint, b decimal as (a+1), c varchar(20) as (b*2), d float as (a*23+b-1+length(c)))") tk.MustExec("insert into t (a) values (1)") tk.MustExec("create index idx on t (d)") tk.MustQuery("select * from t where d > 2").Check(testkit.Rows("1 2 4 25")) - res = tk.MustQuery("select * from t use index(idx) where d > 2") + res := tk.MustQuery("select * from t use index(idx) where d > 2") tk.MustQuery("select * from t ignore index(idx) where d > 2").Check(res.Rows()) tk.MustExec("admin check table t") +} + +func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn3(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test") + tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a varchar(10), b float as (length(a)+123), c varchar(20) as (right(a, 2)), d float as (b+b-7+1-3+3*ASCII(c)))") tk.MustExec("insert into t (a) values ('adorable')") tk.MustExec("create index idx on t (d)") tk.MustQuery("select * from t where d > 2").Check(testkit.Rows("adorable 131 le 577")) // 131+131-7+1-3+3*108 - res = tk.MustQuery("select * from t use index(idx) where d > 2") + res := tk.MustQuery("select * from t use index(idx) where d > 2") tk.MustQuery("select * from t ignore index(idx) where d > 2").Check(res.Rows()) tk.MustExec("admin check table t") +} + +func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn4(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test") + tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a bigint, b decimal as (a), c int(10) as (a+b), d float as (a+b+c), e decimal as (a+b+c+d))") tk.MustExec("insert into t (a) values (1)") tk.MustExec("create index idx on t (d)") tk.MustQuery("select * from t where d > 2").Check(testkit.Rows("1 1 2 4 8")) - res = tk.MustQuery("select * from t use index(idx) where d > 2") + res := tk.MustQuery("select * from t use index(idx) where d > 2") tk.MustQuery("select * from t ignore index(idx) where d > 2").Check(res.Rows()) tk.MustExec("admin check table t") +} + +func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn5(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test") + tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a bigint, b bigint as (a+1) virtual, c bigint as (b+1) virtual)") @@ -1025,7 +1055,7 @@ func (s *testIntegrationSuite4) TestIndexOnMultipleGeneratedColumn(c *C) { tk.MustExec("alter table t add column(d bigint as (c+1) virtual)") tk.MustExec("alter table t add index idx_d(d)") tk.MustQuery("select * from t where d > 2").Check(testkit.Rows("1 2 3 4")) - res = tk.MustQuery("select * from t use index(idx_d) where d > 2") + res := tk.MustQuery("select * from t use index(idx_d) where d > 2") tk.MustQuery("select * from t ignore index(idx_d) where d > 2").Check(res.Rows()) tk.MustExec("admin check table t") } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index b03b4ca66f536..e6d77c9e674e9 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1617,7 +1617,10 @@ func checkPartitionDefinitionConstraints(ctx sessionctx.Context, tbInfo *model.T return errors.Trace(err) } if err = checkAddPartitionTooManyPartitions(uint64(len(tbInfo.Partition.Definitions))); err != nil { - return errors.Trace(err) + return err + } + if err = checkAddPartitionOnTemporaryMode(tbInfo); err != nil { + return err } switch tbInfo.Partition.Type { diff --git a/ddl/partition.go b/ddl/partition.go index 4cc71eb1c8d74..0cafa9d2ff525 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -1476,6 +1476,13 @@ func checkAddPartitionTooManyPartitions(piDefs uint64) error { return nil } +func checkAddPartitionOnTemporaryMode(tbInfo *model.TableInfo) error { + if tbInfo.Partition != nil && tbInfo.TempTableType != model.TempTableNone { + return ErrPartitionNoTemporary + } + return nil +} + func checkNoHashPartitions(ctx sessionctx.Context, partitionNum uint64) error { if partitionNum == 0 { return ast.ErrNoParts.GenWithStackByArgs("partitions") diff --git a/ddl/table.go b/ddl/table.go index acd209a7bb0da..668de3ac41c05 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -56,11 +56,6 @@ func onCreateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) job.State = model.JobStateCancelled return ver, errors.Trace(err) } - if tbInfo.Partition != nil && (tbInfo.TempTableType == model.TempTableGlobal || tbInfo.TempTableType == model.TempTableLocal) { - // unsupported ddl, cancel this job. - job.State = model.JobStateCancelled - return ver, errors.Trace(ErrPartitionNoTemporary) - } tbInfo.State = model.StateNone err := checkTableNotExists(d, t, schemaID, tbInfo.Name.L) diff --git a/errors.toml b/errors.toml index a54913fa1bd2c..926823909f96e 100644 --- a/errors.toml +++ b/errors.toml @@ -501,6 +501,11 @@ error = ''' Deadlock found when trying to get lock; try restarting transaction ''' +["executor:1221"] +error = ''' +Incorrect usage of %s and %s +''' + ["executor:1242"] error = ''' Subquery returns more than 1 row diff --git a/executor/ddl.go b/executor/ddl.go index 2f10555d21e1e..5058704ee2f9e 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -26,12 +26,14 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" @@ -563,6 +565,11 @@ func (e *DDLExec) getRecoverTableByTableName(tableName *ast.TableName) (*model.J if tableInfo == nil || jobInfo == nil { return nil, nil, errors.Errorf("Can't find dropped/truncated table: %v in DDL history jobs", tableName.Name) } + // Dropping local temporary tables won't appear in DDL jobs. + if tableInfo.TempTableType == model.TempTableGlobal { + msg := mysql.Message("Recover/flashback table is not supported on temporary tables", nil) + return nil, nil, dbterror.ClassDDL.NewStdErr(errno.ErrUnsupportedDDLOperation, msg) + } return jobInfo, tableInfo, nil } diff --git a/executor/errors.go b/executor/errors.go index 7f3345659e4f9..ad8104a96e7ee 100644 --- a/executor/errors.go +++ b/executor/errors.go @@ -42,6 +42,7 @@ var ( ErrTableaccessDenied = dbterror.ClassExecutor.NewStd(mysql.ErrTableaccessDenied) ErrBadDB = dbterror.ClassExecutor.NewStd(mysql.ErrBadDB) ErrWrongObject = dbterror.ClassExecutor.NewStd(mysql.ErrWrongObject) + ErrWrongUsage = dbterror.ClassExecutor.NewStd(mysql.ErrWrongUsage) ErrRoleNotGranted = dbterror.ClassPrivilege.NewStd(mysql.ErrRoleNotGranted) ErrDeadlock = dbterror.ClassExecutor.NewStd(mysql.ErrLockDeadlock) ErrQueryInterrupted = dbterror.ClassExecutor.NewStd(mysql.ErrQueryInterrupted) diff --git a/executor/executor_test.go b/executor/executor_test.go index e69b956f8d82b..3b168636606ed 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -5697,24 +5697,15 @@ func (s *testRecoverTable) TearDownSuite(c *C) { s.dom.Close() } -func (s *testRecoverTable) TestRecoverTable(c *C) { - c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) - defer func() { - err := failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange") - c.Assert(err, IsNil) - }() - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("create database if not exists test_recover") - tk.MustExec("use test_recover") - tk.MustExec("drop table if exists t_recover") - tk.MustExec("create table t_recover (a int);") - defer func(originGC bool) { +func (s *testRecoverTable) mockGC(tk *testkit.TestKit) (string, string, string, func()) { + originGC := ddl.IsEmulatorGCEnable() + resetGC := func() { if originGC { ddl.EmulatorGCEnable() } else { ddl.EmulatorGCDisable() } - }(ddl.IsEmulatorGCEnable()) + } // disable emulator GC. // Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl. @@ -5727,6 +5718,23 @@ func (s *testRecoverTable) TestRecoverTable(c *C) { UPDATE variable_value = '%[1]s'` // clear GC variables first. tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )") + return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC +} + +func (s *testRecoverTable) TestRecoverTable(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) + defer func() { + err := failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange") + c.Assert(err, IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_recover") + tk.MustExec("use test_recover") + tk.MustExec("drop table if exists t_recover") + tk.MustExec("create table t_recover (a int);") + + timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC := s.mockGC(tk) + defer resetGC() tk.MustExec("insert into t_recover values (1),(2),(3)") tk.MustExec("drop table t_recover") @@ -5819,24 +5827,10 @@ func (s *testRecoverTable) TestFlashbackTable(c *C) { tk.MustExec("use test_flashback") tk.MustExec("drop table if exists t_flashback") tk.MustExec("create table t_flashback (a int);") - defer func(originGC bool) { - if originGC { - ddl.EmulatorGCEnable() - } else { - ddl.EmulatorGCDisable() - } - }(ddl.IsEmulatorGCEnable()) - // Disable emulator GC. - // Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl. - ddl.EmulatorGCDisable() - gcTimeFormat := "20060102-15:04:05 -0700 MST" - timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat) - safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '') - ON DUPLICATE KEY - UPDATE variable_value = '%[1]s'` - // Clear GC variables first. - tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )") + timeBeforeDrop, _, safePointSQL, resetGC := s.mockGC(tk) + defer resetGC() + // Set GC safe point tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // Set GC enable. @@ -5939,6 +5933,23 @@ func (s *testRecoverTable) TestFlashbackTable(c *C) { tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2", "3")) } +func (s *testRecoverTable) TestRecoverTempTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_recover") + tk.MustExec("use test_recover") + tk.MustExec("drop table if exists t_recover") + tk.MustExec("create global temporary table t_recover (a int) on commit delete rows;") + + timeBeforeDrop, _, safePointSQL, resetGC := s.mockGC(tk) + defer resetGC() + // Set GC safe point + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + + tk.MustExec("drop table t_recover") + tk.MustGetErrCode("recover table t_recover;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("flashback table t_recover;", errno.ErrUnsupportedDDLOperation) +} + func (s *testSuiteP2) TestPointGetPreparedPlan(c *C) { tk1 := testkit.NewTestKit(c, s.store) tk1.MustExec("drop database if exists ps_text") diff --git a/executor/grant.go b/executor/grant.go index 6c715758b6c7f..86620cc0124b1 100644 --- a/executor/grant.go +++ b/executor/grant.go @@ -473,6 +473,12 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna if priv.Priv == mysql.UsagePriv { return nil } + for _, v := range mysql.StaticGlobalOnlyPrivs { + if v == priv.Priv { + return ErrWrongUsage.GenWithStackByArgs("DB GRANT", "GLOBAL PRIVILEGES") + } + } + dbName := e.Level.DBName if len(dbName) == 0 { dbName = e.ctx.GetSessionVars().CurrentDB diff --git a/executor/grant_test.go b/executor/grant_test.go index bb720b48b730b..13686494feddc 100644 --- a/executor/grant_test.go +++ b/executor/grant_test.go @@ -89,6 +89,10 @@ func (s *testSuite3) TestGrantDBScope(c *C) { sql := fmt.Sprintf("SELECT %s FROM mysql.DB WHERE User=\"testDB1\" and host=\"localhost\" and db=\"test\";", mysql.Priv2UserCol[v]) tk.MustQuery(sql).Check(testkit.Rows("Y")) } + + // Grant in wrong scope. + _, err := tk.Exec(` grant create user on test.* to 'testDB1'@'localhost';`) + c.Assert(terror.ErrorEqual(err, executor.ErrWrongUsage.GenWithStackByArgs("DB GRANT", "GLOBAL PRIVILEGES")), IsTrue) } func (s *testSuite3) TestWithGrantOption(c *C) { diff --git a/executor/insert_test.go b/executor/insert_test.go index bee38e51c0fea..ffcfdc214bdb9 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -1590,3 +1590,120 @@ func (s *testSuite10) TestBinaryLiteralInsertToSet(c *C) { tk.MustExec("insert into bintest(h) values(0x61)") tk.MustQuery("select * from bintest").Check(testkit.Rows("a")) } + +var _ = SerialSuites(&testSuite13{&baseTestSuite{}}) + +type testSuite13 struct { + *baseTestSuite +} + +func (s *testSuite13) TestGlobalTempTableAutoInc(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`use test`) + tk.MustExec("drop table if exists temp_test") + tk.MustExec("create global temporary table temp_test(id int primary key auto_increment) on commit delete rows") + defer tk.MustExec("drop table if exists temp_test") + + // Data is cleared after transaction auto commits. + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select * from temp_test").Check(testkit.Rows()) + + // Data is not cleared inside a transaction. + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select * from temp_test").Check(testkit.Rows("1")) + tk.MustExec("commit") + + // AutoID allocator is cleared. + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select * from temp_test").Check(testkit.Rows("1")) + // Test whether auto-inc is incremental + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select id from temp_test order by id").Check(testkit.Rows("1", "2")) + tk.MustExec("commit") + + // multi-value insert + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(0), (0)") + tk.MustQuery("select id from temp_test order by id").Check(testkit.Rows("1", "2")) + tk.MustExec("insert into temp_test(id) values(0), (0)") + tk.MustQuery("select id from temp_test order by id").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustExec("commit") + + // rebase + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(10)") + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select id from temp_test order by id").Check(testkit.Rows("10", "11")) + tk.MustExec("insert into temp_test(id) values(20), (30)") + tk.MustExec("insert into temp_test(id) values(0), (0)") + tk.MustQuery("select id from temp_test order by id").Check(testkit.Rows("10", "11", "20", "30", "31", "32")) + tk.MustExec("commit") +} + +func (s *testSuite13) TestGlobalTempTableRowID(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`use test`) + tk.MustExec("drop table if exists temp_test") + tk.MustExec("create global temporary table temp_test(id int) on commit delete rows") + defer tk.MustExec("drop table if exists temp_test") + + // Data is cleared after transaction auto commits. + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select _tidb_rowid from temp_test").Check(testkit.Rows()) + + // Data is not cleared inside a transaction. + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select _tidb_rowid from temp_test").Check(testkit.Rows("1")) + tk.MustExec("commit") + + // AutoID allocator is cleared. + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select _tidb_rowid from temp_test").Check(testkit.Rows("1")) + // Test whether row id is incremental + tk.MustExec("insert into temp_test(id) values(0)") + tk.MustQuery("select _tidb_rowid from temp_test order by _tidb_rowid").Check(testkit.Rows("1", "2")) + tk.MustExec("commit") + + // multi-value insert + tk.MustExec("begin") + tk.MustExec("insert into temp_test(id) values(0), (0)") + tk.MustQuery("select _tidb_rowid from temp_test order by _tidb_rowid").Check(testkit.Rows("1", "2")) + tk.MustExec("insert into temp_test(id) values(0), (0)") + tk.MustQuery("select _tidb_rowid from temp_test order by _tidb_rowid").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustExec("commit") +} + +func (s *testSuite13) TestGlobalTempTableParallel(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`use test`) + tk.MustExec("drop table if exists temp_test") + tk.MustExec("create global temporary table temp_test(id int primary key auto_increment) on commit delete rows") + defer tk.MustExec("drop table if exists temp_test") + + threads := 8 + loops := 1 + wg := sync.WaitGroup{} + wg.Add(threads) + + insertFunc := func() { + defer wg.Done() + newTk := testkit.NewTestKitWithInit(c, s.store) + newTk.MustExec("begin") + for i := 0; i < loops; i++ { + newTk.MustExec("insert temp_test value(0)") + newTk.MustExec("insert temp_test value(0), (0)") + } + maxID := strconv.Itoa(loops * 3) + newTk.MustQuery("select max(id) from temp_test").Check(testkit.Rows(maxID)) + newTk.MustExec("commit") + } + + for i := 0; i < threads; i++ { + go insertFunc() + } + wg.Wait() +} diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 0c9e63129ebf1..5be39c3a04d54 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -494,12 +494,45 @@ func (s *partitionTableSuite) TestDynamicPruneModeWithEqualExpression(c *C) { for _, t := range tests { for i := range t.partitions { sql := fmt.Sprintf(t.sql, tables[i]) - c.Assert(tk.MustPartition(sql, t.partitions[i]), IsTrue) - tk.MustQuery(sql).Sort().Check(tk.MustQuery(fmt.Sprintf(t.sql, "t")).Sort().Rows()) + tk.MustPartition(sql, t.partitions[i]).Sort().Check(tk.MustQuery(fmt.Sprintf(t.sql, "t")).Sort().Rows()) } } } +func (s *partitionTableSuite) TestAddDropPartitions(c *C) { + if israce.RaceEnabled { + c.Skip("exhaustive types test, skip race test") + } + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create database test_add_drop_partition") + tk.MustExec("use test_add_drop_partition") + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + + tk.MustExec(`create table t(a int) partition by range(a) ( + partition p0 values less than (5), + partition p1 values less than (10), + partition p2 values less than (15))`) + tk.MustExec(`insert into t values (2), (7), (12)`) + tk.MustPartition(`select * from t where a < 3`, "p0").Sort().Check(testkit.Rows("2")) + tk.MustPartition(`select * from t where a < 8`, "p0,p1").Sort().Check(testkit.Rows("2", "7")) + tk.MustPartition(`select * from t where a < 20`, "all").Sort().Check(testkit.Rows("12", "2", "7")) + + // remove p0 + tk.MustExec(`alter table t drop partition p0`) + tk.MustPartition(`select * from t where a < 3`, "p1").Sort().Check(testkit.Rows()) + tk.MustPartition(`select * from t where a < 8`, "p1").Sort().Check(testkit.Rows("7")) + tk.MustPartition(`select * from t where a < 20`, "all").Sort().Check(testkit.Rows("12", "7")) + + // add 2 more partitions + tk.MustExec(`alter table t add partition (partition p3 values less than (20))`) + tk.MustExec(`alter table t add partition (partition p4 values less than (40))`) + tk.MustExec(`insert into t values (15), (25)`) + tk.MustPartition(`select * from t where a < 3`, "p1").Sort().Check(testkit.Rows()) + tk.MustPartition(`select * from t where a < 8`, "p1").Sort().Check(testkit.Rows("7")) + tk.MustPartition(`select * from t where a < 20`, "p1,p2,p3").Sort().Check(testkit.Rows("12", "15", "7")) +} + func (s *partitionTableSuite) TestDirectReadingWithAgg(c *C) { if israce.RaceEnabled { c.Skip("exhaustive types test, skip race test") @@ -511,15 +544,15 @@ func (s *partitionTableSuite) TestDirectReadingWithAgg(c *C) { tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") // list partition table - tk.MustExec(`create table tlist(a int, b int, index idx_a(a), index idx_b(b)) partition by list(a)( + tk.MustExec(`create table tlist(a int, b int, index idx_a(a), index idx_b(b)) partition by list(a)( partition p0 values in (1, 2, 3, 4), partition p1 values in (5, 6, 7, 8), partition p2 values in (9, 10, 11, 12));`) // range partition table tk.MustExec(`create table trange(a int, b int, index idx_a(a), index idx_b(b)) partition by range(a) ( - partition p0 values less than(300), - partition p1 values less than (500), + partition p0 values less than(300), + partition p1 values less than (500), partition p2 values less than(1100));`) // hash partition table diff --git a/executor/point_get.go b/executor/point_get.go index b0cd700c5c920..c34987b7f0c1d 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -531,6 +531,9 @@ func tryDecodeFromHandle(tblInfo *model.TableInfo, schemaColIdx int, col *expres chk.AppendInt64(schemaColIdx, handle.IntValue()) return true, nil } + if types.NeedRestoredData(col.RetType) { + return false, nil + } // Try to decode common handle. if mysql.HasPriKeyFlag(col.RetType.Flag) { for i, hid := range pkCols { diff --git a/go.mod b/go.mod index cd8becf5a757c..c4b668a81dcf1 100644 --- a/go.mod +++ b/go.mod @@ -45,9 +45,9 @@ require ( github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 - github.com/pingcap/kvproto v0.0.0-20210429093846-65f54a202d7e + github.com/pingcap/kvproto v0.0.0-20210507054410-a8152f8a876c github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 - github.com/pingcap/parser v0.0.0-20210508071014-cd9cd78e230c + github.com/pingcap/parser v0.0.0-20210513020953-ae2c4497c07b github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible github.com/pingcap/tipb v0.0.0-20210422074242-57dd881b81b1 diff --git a/go.sum b/go.sum index 4e1030039a04f..212917e414404 100644 --- a/go.sum +++ b/go.sum @@ -436,15 +436,15 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20210429093846-65f54a202d7e h1:oUMZ6X/Kpaoxfejh9/jQ+4UZ5xk9MRYcouWJ0oXRKNE= -github.com/pingcap/kvproto v0.0.0-20210429093846-65f54a202d7e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20210507054410-a8152f8a876c h1:cy87vgUJT0U4JuxC7R14PuwBrabI9fDawYhyKTbjOBQ= +github.com/pingcap/kvproto v0.0.0-20210507054410-a8152f8a876c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 h1:ERrF0fTuIOnwfGbt71Ji3DKbOEaP189tjym50u8gpC8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20210508071014-cd9cd78e230c h1:GLFd+wBN7EsV6ad/tVGFCD37taOyzIMVs3SdiWZF18I= -github.com/pingcap/parser v0.0.0-20210508071014-cd9cd78e230c/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= +github.com/pingcap/parser v0.0.0-20210513020953-ae2c4497c07b h1:eLuDQ6eJCEKCbGwhGrkjzagwev1GJGU2Y2kFkAsBzV0= +github.com/pingcap/parser v0.0.0-20210513020953-ae2c4497c07b/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 h1:A9KL9R+lWSVPH8IqUuH1QSTRJ5FGoY1bT2IcfPKsWD8= github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041UWP+NqYzrJ3fMgC/Hw9wnmQ/tUkp/JaHly8= diff --git a/session/clustered_index_test.go b/session/clustered_index_test.go index 0f79b1b13fc2e..b7e529f29fe0e 100644 --- a/session/clustered_index_test.go +++ b/session/clustered_index_test.go @@ -665,3 +665,18 @@ func (s *testClusteredSerialSuite) TestPrefixedClusteredIndexUniqueKeyWithNewCol tk.MustExec("admin check table t;") tk.MustExec("drop table t;") } + +func (s *testClusteredSerialSuite) TestClusteredIndexNewCollationWithOldRowFormat(c *C) { + // This case maybe not useful, because newCollation isn't convenience to run on TiKV(it's required serialSuit) + // but unistore doesn't support old row format. + defer collate.SetNewCollationEnabledForTest(false) + collate.SetNewCollationEnabledForTest(true) + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("use test;") + tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.Se.GetSessionVars().RowEncoder.Enable = false + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t2(col_1 varchar(132) CHARACTER SET utf8 COLLATE utf8_unicode_ci, primary key(col_1) clustered)") + tk.MustExec("insert into t2 select 'aBc'") + tk.MustQuery("select col_1 from t2 where col_1 = 'aBc'").Check(testkit.Rows("aBc")) +} diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index d6bb5763e67d8..815f42cc03ef3 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/auth" + "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" pumpcli "github.com/pingcap/tidb-tools/tidb-binlog/pump_client" @@ -49,6 +50,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/rowcodec" "github.com/pingcap/tidb/util/stringutil" + "github.com/pingcap/tidb/util/tableutil" "github.com/pingcap/tidb/util/timeutil" "github.com/twmb/murmur3" atomic2 "go.uber.org/atomic" @@ -174,7 +176,9 @@ type TransactionContext struct { // TableDeltaMap lock to prevent potential data race tdmLock sync.Mutex - GlobalTemporaryTables map[int64]struct{} + // GlobalTemporaryTables is used to store transaction-specific information for global temporary tables. + // It can also be stored in sessionCtx with local temporary tables, but it's easier to clean this data after transaction ends. + GlobalTemporaryTables map[int64]tableutil.TempTable } // GetShard returns the shard prefix for the next `count` rowids. @@ -1456,6 +1460,24 @@ func (s *SessionVars) LazyCheckKeyNotExists() bool { return s.PresumeKeyNotExists || (s.TxnCtx.IsPessimistic && !s.StmtCtx.DupKeyAsWarning) } +// GetTemporaryTable returns a TempTable by tableInfo. +func (s *SessionVars) GetTemporaryTable(tblInfo *model.TableInfo) tableutil.TempTable { + if tblInfo.TempTableType == model.TempTableGlobal { + if s.TxnCtx.GlobalTemporaryTables == nil { + s.TxnCtx.GlobalTemporaryTables = make(map[int64]tableutil.TempTable) + } + globalTempTables := s.TxnCtx.GlobalTemporaryTables + globalTempTable, ok := globalTempTables[tblInfo.ID] + if !ok { + globalTempTable = tableutil.TempTableFromMeta(tblInfo) + globalTempTables[tblInfo.ID] = globalTempTable + } + return globalTempTable + } + // TODO: check local temporary tables + return nil +} + // special session variables. const ( SQLModeVar = "sql_mode" diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 1e6cdc3cf09bb..bf8d1b7c893e0 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -126,8 +126,10 @@ func buildBatchCopTasks(bo *backoffer, cache *tikv.RegionCache, ranges *tikv.Key if err != nil { return nil, errors.Trace(err) } - // If the region is not found in cache, it must be out - // of date and already be cleaned up. We should retry and generate new tasks. + // When rpcCtx is nil, it's not only attributed to the miss region, but also + // some TiFlash stores crash and can't be recovered. + // That is not an error that can be easily recovered, so we regard this error + // same as rpc error. if rpcCtx == nil { needRetry = true logutil.BgLogger().Info("retry for TiFlash peer with region missing", zap.Uint64("region id", task.region.GetID())) @@ -147,8 +149,10 @@ func buildBatchCopTasks(bo *backoffer, cache *tikv.RegionCache, ranges *tikv.Key } } if needRetry { - // Backoff once for each retry. - err = bo.Backoff(tikv.BoRegionMiss, errors.New("Cannot find region with TiFlash peer")) + // As mentioned above, nil rpcCtx is always attributed to failed stores. + // It's equal to long poll the store but get no response. Here we'd better use + // TiFlash error to trigger the TiKV fallback mechanism. + err = bo.Backoff(tikv.BoTiFlashRPC, errors.New("Cannot find region with TiFlash peer")) if err != nil { return nil, errors.Trace(err) } @@ -402,6 +406,20 @@ func (b *batchCopIterator) handleBatchCopResponse(bo *backoffer, response *copro return errors.Trace(err) } + if len(response.RetryRegions) > 0 { + logutil.BgLogger().Info("multiple regions are stale and need to be refreshed", zap.Int("region size", len(response.RetryRegions))) + for idx, retry := range response.RetryRegions { + id := tikv.NewRegionVerID(retry.Id, retry.RegionEpoch.ConfVer, retry.RegionEpoch.Version) + logutil.BgLogger().Info("invalid region because tiflash detected stale region", zap.String("region id", id.String())) + b.store.GetRegionCache().InvalidateCachedRegionWithReason(id, tikv.EpochNotMatch) + if idx >= 10 { + logutil.BgLogger().Info("stale regions are too many, so we omit the rest ones") + break + } + } + return + } + resp := batchCopResponse{ pbResp: response, detail: new(CopRuntimeStats), diff --git a/store/driver/txn/snapshot.go b/store/driver/txn/snapshot.go index 95029978ed11e..ee1d1eeee29d8 100644 --- a/store/driver/txn/snapshot.go +++ b/store/driver/txn/snapshot.go @@ -17,6 +17,7 @@ import ( "context" "unsafe" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/kv" derr "github.com/pingcap/tidb/store/driver/error" "github.com/pingcap/tidb/store/tikv" @@ -76,10 +77,14 @@ func (s *tikvSnapshot) SetOption(opt int, val interface{}) { s.KVSnapshot.SetSnapshotTS(val.(uint64)) case tikvstore.ReplicaRead: s.KVSnapshot.SetReplicaRead(val.(tikvstore.ReplicaReadType)) + case tikvstore.SampleStep: + s.KVSnapshot.SetSampleStep(val.(uint32)) case tikvstore.TaskID: s.KVSnapshot.SetTaskID(val.(uint64)) case tikvstore.IsStalenessReadOnly: s.KVSnapshot.SetIsStatenessReadOnly(val.(bool)) + case tikvstore.MatchStoreLabels: + s.KVSnapshot.SetMatchStoreLabels(val.([]*metapb.StoreLabel)) default: s.KVSnapshot.SetOption(opt, val) } diff --git a/store/driver/txn/txn_driver.go b/store/driver/txn/txn_driver.go index 200f9e4e18a58..12f2c8233ccb1 100644 --- a/store/driver/txn/txn_driver.go +++ b/store/driver/txn/txn_driver.go @@ -19,6 +19,7 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/binloginfo" @@ -153,6 +154,8 @@ func (txn *tikvTxn) SetOption(opt int, val interface{}) { txn.SetSchemaVer(val.(tikv.SchemaVer)) case tikvstore.SchemaAmender: txn.SetSchemaAmender(val.(tikv.SchemaAmender)) + case tikvstore.SampleStep: + txn.KVTxn.GetSnapshot().SetSampleStep(val.(uint32)) case tikvstore.CommitHook: txn.SetCommitCallback(val.(func(string, error))) case tikvstore.EnableAsyncCommit: @@ -163,6 +166,8 @@ func (txn *tikvTxn) SetOption(opt int, val interface{}) { txn.SetScope(val.(string)) case tikvstore.IsStalenessReadOnly: txn.KVTxn.GetSnapshot().SetIsStatenessReadOnly(val.(bool)) + case tikvstore.MatchStoreLabels: + txn.KVTxn.GetSnapshot().SetMatchStoreLabels(val.([]*metapb.StoreLabel)) default: txn.KVTxn.SetOption(opt, val) } diff --git a/store/mockstore/unistore/tikv/server.go b/store/mockstore/unistore/tikv/server.go index f571ff4fe963f..adf3049330897 100644 --- a/store/mockstore/unistore/tikv/server.go +++ b/store/mockstore/unistore/tikv/server.go @@ -845,6 +845,11 @@ func (svr *Server) ReadIndex(context.Context, *kvrpcpb.ReadIndexRequest) (*kvrpc return &kvrpcpb.ReadIndexResponse{}, nil } +// GetLockWaitInfo implements implements the tikvpb.TikvServer interface. +func (svr *Server) GetLockWaitInfo(ctx context.Context, _ *kvrpcpb.GetLockWaitInfoRequest) (*kvrpcpb.GetLockWaitInfoResponse, error) { + return &kvrpcpb.GetLockWaitInfoResponse{}, nil +} + // transaction debugger commands. // MvccGetByKey implements implements the tikvpb.TikvServer interface. diff --git a/store/tikv/config/config_test.go b/store/tikv/config/config_test.go index f79f2d09c22c8..a47e1e7e5030b 100644 --- a/store/tikv/config/config_test.go +++ b/store/tikv/config/config_test.go @@ -34,20 +34,19 @@ func (s *testConfigSuite) TestParsePath(c *C) { } func (s *testConfigSuite) TestTxnScopeValue(c *C) { - - failpoint.Enable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope", `return("bj")`) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope", `return("bj")`), IsNil) isGlobal, v := GetTxnScopeFromConfig() c.Assert(isGlobal, IsFalse) c.Assert(v, Equals, "bj") - failpoint.Disable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope") - failpoint.Enable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope", `return("")`) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope"), IsNil) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope", `return("")`), IsNil) isGlobal, v = GetTxnScopeFromConfig() c.Assert(isGlobal, IsTrue) c.Assert(v, Equals, "global") - failpoint.Disable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope") - failpoint.Enable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope", `return("global")`) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope"), IsNil) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope", `return("global")`), IsNil) isGlobal, v = GetTxnScopeFromConfig() c.Assert(isGlobal, IsFalse) c.Assert(v, Equals, "global") - failpoint.Disable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope") + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/config/injectTxnScope"), IsNil) } diff --git a/store/tikv/extract_start_ts_test.go b/store/tikv/extract_start_ts_test.go index a108a0f7e41cb..b392ca365cde8 100644 --- a/store/tikv/extract_start_ts_test.go +++ b/store/tikv/extract_start_ts_test.go @@ -14,9 +14,8 @@ package tikv import ( - "context" - . "github.com/pingcap/check" + "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/mockstore/unistore" @@ -28,7 +27,7 @@ type extractStartTsSuite struct { store *KVStore } -var _ = Suite(&extractStartTsSuite{}) +var _ = SerialSuites(&extractStartTsSuite{}) func (s *extractStartTsSuite) SetUpTest(c *C) { client, pdClient, cluster, err := unistore.New("") @@ -63,60 +62,53 @@ func (s *extractStartTsSuite) SetUpTest(c *C) { func (s *extractStartTsSuite) TestExtractStartTs(c *C) { i := uint64(100) - cases := []kv.TransactionOption{ + // to prevent time change during test case execution + // we use failpoint to make it "fixed" + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/MockStalenessTimestamp", "return(200)"), IsNil) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/MockCurrentTimestamp", `return(300)`), IsNil) + + cases := []struct { + expectedTS uint64 + option kv.TransactionOption + }{ // StartTS setted - {TxnScope: oracle.GlobalTxnScope, StartTS: &i, PrevSec: nil, MinStartTS: nil, MaxPrevSec: nil}, + {100, kv.TransactionOption{TxnScope: oracle.GlobalTxnScope, StartTS: &i, PrevSec: nil, MinStartTS: nil, MaxPrevSec: nil}}, // PrevSec setted - {TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: &i, MinStartTS: nil, MaxPrevSec: nil}, + {200, kv.TransactionOption{TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: &i, MinStartTS: nil, MaxPrevSec: nil}}, // MinStartTS setted, global - {TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: &i, MaxPrevSec: nil}, + {101, kv.TransactionOption{TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: &i, MaxPrevSec: nil}}, // MinStartTS setted, local - {TxnScope: oracle.LocalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: &i, MaxPrevSec: nil}, + {102, kv.TransactionOption{TxnScope: oracle.LocalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: &i, MaxPrevSec: nil}}, // MaxPrevSec setted // however we need to add more cases to check the behavior when it fall backs to MinStartTS setted // see `TestMaxPrevSecFallback` - {TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: &i}, + {200, kv.TransactionOption{TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: &i}}, // nothing setted - {TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: nil}, - } - bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil) - stalenessTimestamp, _ := s.store.getStalenessTimestamp(bo, oracle.GlobalTxnScope, 100) - expectedTs := []uint64{ - 100, - stalenessTimestamp, - - 101, - 102, - - stalenessTimestamp, - // it's too hard to figure out the value `getTimestampWithRetry` returns - // so we just check whether it is greater than stalenessTimestamp - 0, + {300, kv.TransactionOption{TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: nil}}, } - for i, cs := range cases { - expected := expectedTs[i] - result, _ := extractStartTs(s.store, cs) - if expected == 0 { - c.Assert(result, Greater, stalenessTimestamp) - } else { - c.Assert(result, Equals, expected) - } + for _, cs := range cases { + expected := cs.expectedTS + result, _ := extractStartTs(s.store, cs.option) + c.Assert(result, Equals, expected) } + + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/MockStalenessTimestamp"), IsNil) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/MockCurrentTimestamp"), IsNil) } func (s *extractStartTsSuite) TestMaxPrevSecFallback(c *C) { s.store.setSafeTS(2, 0x8000000000000002) s.store.setSafeTS(3, 0x8000000000000001) - i := uint64(100) - cases := []kv.TransactionOption{ - {TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: &i}, - {TxnScope: oracle.LocalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: &i}, + cases := []struct { + expectedTS uint64 + option kv.TransactionOption + }{ + {0x8000000000000001, kv.TransactionOption{TxnScope: oracle.GlobalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: &i}}, + {0x8000000000000002, kv.TransactionOption{TxnScope: oracle.LocalTxnScope, StartTS: nil, PrevSec: nil, MinStartTS: nil, MaxPrevSec: &i}}, } - expectedTs := []uint64{0x8000000000000001, 0x8000000000000002} - for i, cs := range cases { - expected := expectedTs[i] - result, _ := extractStartTs(s.store, cs) - c.Assert(result, Equals, expected) + for _, cs := range cases { + result, _ := extractStartTs(s.store, cs.option) + c.Assert(result, Equals, cs.expectedTS) } } diff --git a/store/tikv/kv.go b/store/tikv/kv.go index a487b0024e3e9..981a1b7bc5cab 100644 --- a/store/tikv/kv.go +++ b/store/tikv/kv.go @@ -235,6 +235,13 @@ func (s *KVStore) CurrentTimestamp(txnScope string) (uint64, error) { } func (s *KVStore) getTimestampWithRetry(bo *Backoffer, txnScope string) (uint64, error) { + failpoint.Inject("MockCurrentTimestamp", func(val failpoint.Value) { + if v, ok := val.(int); ok { + failpoint.Return(uint64(v), nil) + } else { + panic("MockCurrentTimestamp should be a number, try use this failpoint with \"return(ts)\"") + } + }) if span := opentracing.SpanFromContext(bo.ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("TiKVStore.getTimestampWithRetry", opentracing.ChildOf(span.Context())) defer span1.Finish() @@ -264,6 +271,13 @@ func (s *KVStore) getTimestampWithRetry(bo *Backoffer, txnScope string) (uint64, } func (s *KVStore) getStalenessTimestamp(bo *Backoffer, txnScope string, prevSec uint64) (uint64, error) { + failpoint.Inject("MockStalenessTimestamp", func(val failpoint.Value) { + if v, ok := val.(int); ok { + failpoint.Return(uint64(v), nil) + } else { + panic("MockStalenessTimestamp should be a number, try use this failpoint with \"return(ts)\"") + } + }) for { startTS, err := s.oracle.GetStaleTimestamp(bo.ctx, txnScope, prevSec) if err == nil { diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index 9c5172e52f372..8d531ee209a78 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -462,6 +462,10 @@ func (s *mockTikvGrpcServer) SplitRegion(context.Context, *kvrpcpb.SplitRegionRe return nil, errors.New("unreachable") } +func (s *mockTikvGrpcServer) GetLockWaitInfo(context.Context, *kvrpcpb.GetLockWaitInfoRequest) (*kvrpcpb.GetLockWaitInfoResponse, error) { + return nil, errors.New("unreachable") +} + func (s *mockTikvGrpcServer) CoprocessorStream(*coprocessor.Request, tikvpb.Tikv_CoprocessorStreamServer) error { return errors.New("unreachable") } diff --git a/store/tikv/snapshot.go b/store/tikv/snapshot.go index 4e41aa609aad8..ae65f15dc18e6 100644 --- a/store/tikv/snapshot.go +++ b/store/tikv/snapshot.go @@ -569,12 +569,6 @@ func (s *KVSnapshot) SetOption(opt int, val interface{}) { s.mu.Lock() s.mu.stats = val.(*SnapshotRuntimeStats) s.mu.Unlock() - case kv.SampleStep: - s.sampleStep = val.(uint32) - case kv.MatchStoreLabels: - s.mu.Lock() - s.mu.matchStoreLabels = val.([]*metapb.StoreLabel) - s.mu.Unlock() } } @@ -611,6 +605,11 @@ func (s *KVSnapshot) SetIsolationLevel(level IsoLevel) { s.isolationLevel = level } +// SetSampleStep skips 'step - 1' number of keys after each returned key. +func (s *KVSnapshot) SetSampleStep(step uint32) { + s.sampleStep = step +} + // SetPriority sets the priority for tikv to execute commands. func (s *KVSnapshot) SetPriority(pri Priority) { s.priority = pri @@ -631,6 +630,13 @@ func (s *KVSnapshot) SetIsStatenessReadOnly(b bool) { s.mu.isStaleness = b } +// SetMatchStoreLabels sets up labels to filter target stores. +func (s *KVSnapshot) SetMatchStoreLabels(labels []*metapb.StoreLabel) { + s.mu.Lock() + defer s.mu.Unlock() + s.mu.matchStoreLabels = labels +} + // SnapCacheHitCount gets the snapshot cache hit count. Only for test. func (s *KVSnapshot) SnapCacheHitCount() int { return int(atomic.LoadInt64(&s.mu.hitCnt)) diff --git a/table/tables/tables.go b/table/tables/tables.go index 8fd3cca9e2657..74fd2d82f3ef9 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/sessionctx/binloginfo" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/statistics" tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" @@ -46,6 +47,7 @@ import ( "github.com/pingcap/tidb/util/generatedexpr" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/stringutil" + "github.com/pingcap/tidb/util/tableutil" "github.com/pingcap/tipb/go-binlog" "github.com/pingcap/tipb/go-tipb" "go.uber.org/zap" @@ -322,8 +324,8 @@ func (t *TableCommon) UpdateRecord(ctx context.Context, sctx sessionctx.Context, sh := memBuffer.Staging() defer memBuffer.Cleanup(sh) - if meta := t.Meta(); meta.TempTableType == model.TempTableGlobal { - addTemporaryTableID(sctx, meta.ID) + if m := t.Meta(); m.TempTableType == model.TempTableGlobal { + addTemporaryTable(sctx, m) } var colIDs, binlogColIDs []int64 @@ -588,12 +590,9 @@ func TryGetCommonPkColumns(tbl table.Table) []*table.Column { return pkCols } -func addTemporaryTableID(sctx sessionctx.Context, id int64) { - txnCtx := sctx.GetSessionVars().TxnCtx - if txnCtx.GlobalTemporaryTables == nil { - txnCtx.GlobalTemporaryTables = make(map[int64]struct{}) - } - txnCtx.GlobalTemporaryTables[id] = struct{}{} +func addTemporaryTable(sctx sessionctx.Context, tblInfo *model.TableInfo) { + tempTable := sctx.GetSessionVars().GetTemporaryTable(tblInfo) + tempTable.SetModified(true) } // AddRecord implements table.Table AddRecord interface. @@ -608,8 +607,8 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . fn.ApplyOn(&opt) } - if meta := t.Meta(); meta.TempTableType == model.TempTableGlobal { - addTemporaryTableID(sctx, meta.ID) + if m := t.Meta(); m.TempTableType == model.TempTableGlobal { + addTemporaryTable(sctx, m) } var ctx context.Context @@ -1010,8 +1009,8 @@ func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []type return err } - if meta := t.Meta(); meta.TempTableType == model.TempTableGlobal { - addTemporaryTableID(ctx, meta.ID) + if m := t.Meta(); m.TempTableType == model.TempTableGlobal { + addTemporaryTable(ctx, m) } // The table has non-public column and this column is doing the operation of "modify/change column". @@ -1370,7 +1369,14 @@ func OverflowShardBits(recordID int64, shardRowIDBits uint64, typeBitsLength uin // Allocators implements table.Table Allocators interface. func (t *TableCommon) Allocators(ctx sessionctx.Context) autoid.Allocators { - if ctx == nil || ctx.GetSessionVars().IDAllocator == nil { + if ctx == nil { + return t.allocs + } else if ctx.GetSessionVars().IDAllocator == nil { + // Use an independent allocator for global temporary tables. + if t.meta.TempTableType == model.TempTableGlobal { + alloc := ctx.GetSessionVars().GetTemporaryTable(t.meta).GetAutoIDAllocator() + return autoid.Allocators{alloc} + } return t.allocs } @@ -1498,6 +1504,7 @@ func getDuplicateErrorHandleString(t table.Table, handle kv.Handle, row []types. func init() { table.TableFromMeta = TableFromMeta table.MockTableFromMeta = MockTableFromMeta + tableutil.TempTableFromMeta = TempTableFromMeta } // sequenceCommon cache the sequence value. @@ -1763,3 +1770,43 @@ func BuildTableScanFromInfos(tableInfo *model.TableInfo, columnInfos []*model.Co } return tsExec } + +// TemporaryTable is used to store transaction-specific or session-specific information for global / local temporary tables. +// For example, stats and autoID should have their own copies of data, instead of being shared by all sessions. +type TemporaryTable struct { + // Whether it's modified in this transaction. + modified bool + // The stats of this table. So far it's always pseudo stats. + stats *statistics.Table + // The autoID allocator of this table. + autoIDAllocator autoid.Allocator +} + +// TempTableFromMeta builds a TempTable from model.TableInfo. +func TempTableFromMeta(tblInfo *model.TableInfo) tableutil.TempTable { + return &TemporaryTable{ + modified: false, + stats: statistics.PseudoTable(tblInfo), + autoIDAllocator: autoid.NewAllocatorFromTempTblInfo(tblInfo), + } +} + +// GetAutoIDAllocator is implemented from TempTable.GetAutoIDAllocator. +func (t *TemporaryTable) GetAutoIDAllocator() autoid.Allocator { + return t.autoIDAllocator +} + +// SetModified is implemented from TempTable.SetModified. +func (t *TemporaryTable) SetModified(modified bool) { + t.modified = modified +} + +// GetModified is implemented from TempTable.GetModified. +func (t *TemporaryTable) GetModified() bool { + return t.modified +} + +// GetStats is implemented from TempTable.GetStats. +func (t *TemporaryTable) GetStats() interface{} { + return t.stats +} diff --git a/tidb-server/main.go b/tidb-server/main.go index 3e2351bf7c352..6429ba960a0cb 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -629,7 +629,7 @@ func setupMetrics() { metrics.TimeJumpBackCounter.Inc() } callBackCount := 0 - sucessCallBack := func() { + successCallBack := func() { callBackCount++ // It is callback by monitor per second, we increase metrics.KeepAliveCounter per 5s. if callBackCount >= 5 { @@ -637,7 +637,7 @@ func setupMetrics() { metrics.KeepAliveCounter.Inc() } } - go systimemon.StartMonitor(time.Now, systimeErrHandler, sucessCallBack) + go systimemon.StartMonitor(time.Now, systimeErrHandler, successCallBack) pushMetric(cfg.Status.MetricsAddr, time.Duration(cfg.Status.MetricsInterval)*time.Second) } diff --git a/types/json/binary_functions_test.go b/types/json/binary_functions_test.go index 8191638f7b2f6..12a9c8ece8ae2 100644 --- a/types/json/binary_functions_test.go +++ b/types/json/binary_functions_test.go @@ -35,6 +35,6 @@ func (s *testJSONFuncSuite) TestdecodeEscapedUnicode(c *C) { func BenchmarkDecodeEscapedUnicode(b *testing.B) { for i := 0; i < b.N; i++ { in := "597d" - decodeEscapedUnicode([]byte(in)) + _, _, _ = decodeEscapedUnicode([]byte(in)) } } diff --git a/util/chunk/chunk.go b/util/chunk/chunk.go index a4350bd9628e3..e91cff2559d79 100644 --- a/util/chunk/chunk.go +++ b/util/chunk/chunk.go @@ -505,8 +505,10 @@ func (c *Chunk) Append(other *Chunk, begin, end int) { } else { beginOffset, endOffset := src.offsets[begin], src.offsets[end] dst.data = append(dst.data, src.data[beginOffset:endOffset]...) + lastOffset := dst.offsets[len(dst.offsets)-1] for i := begin; i < end; i++ { - dst.offsets = append(dst.offsets, dst.offsets[len(dst.offsets)-1]+src.offsets[i+1]-src.offsets[i]) + lastOffset += src.offsets[i+1] - src.offsets[i] + dst.offsets = append(dst.offsets, lastOffset) } } for i := begin; i < end; i++ { diff --git a/util/chunk/chunk_test.go b/util/chunk/chunk_test.go index 67222328794db..22a6ac5b473dc 100644 --- a/util/chunk/chunk_test.go +++ b/util/chunk/chunk_test.go @@ -1179,3 +1179,80 @@ func BenchmarkBatchAppendRows(b *testing.B) { }) } } + +func BenchmarkAppendRows(b *testing.B) { + b.ReportAllocs() + rowChk := newChunk(8, 8, 0, 0) + + for i := 0; i < 4096; i++ { + rowChk.AppendNull(0) + rowChk.AppendInt64(1, 1) + rowChk.AppendString(2, "abcd") + rowChk.AppendBytes(3, []byte("abcd")) + } + + type testCaseConf struct { + batchSize int + } + testCaseConfs := []testCaseConf{ + {batchSize: 2}, + {batchSize: 8}, + {batchSize: 16}, + {batchSize: 100}, + {batchSize: 1000}, + {batchSize: 4000}, + } + + chk := newChunk(8, 8, 0, 0) + for _, conf := range testCaseConfs { + b.ResetTimer() + b.Run(fmt.Sprintf("row-%d", conf.batchSize), func(b *testing.B) { + for i := 0; i < b.N; i++ { + chk.Reset() + for j := 0; j < conf.batchSize; j++ { + chk.AppendRow(rowChk.GetRow(j)) + } + } + }) + b.ResetTimer() + b.Run(fmt.Sprintf("column-%d", conf.batchSize), func(b *testing.B) { + for i := 0; i < b.N; i++ { + chk.Reset() + chk.Append(rowChk, 0, conf.batchSize) + } + }) + } +} + +func BenchmarkAppend(b *testing.B) { + b.ReportAllocs() + rowChk := newChunk(0, 0) + + for i := 0; i < 4096; i++ { + rowChk.AppendString(0, "abcd") + rowChk.AppendBytes(1, []byte("abcd")) + } + + type testCaseConf struct { + batchSize int + } + testCaseConfs := []testCaseConf{ + {batchSize: 2}, + {batchSize: 8}, + {batchSize: 16}, + {batchSize: 100}, + {batchSize: 1000}, + {batchSize: 4000}, + } + + chk := newChunk(0, 0) + for _, conf := range testCaseConfs { + b.ResetTimer() + b.Run(fmt.Sprintf("column-%d", conf.batchSize), func(b *testing.B) { + for i := 0; i < b.N; i++ { + chk.Reset() + chk.Append(rowChk, 0, conf.batchSize) + } + }) + } +} diff --git a/util/tableutil/tableutil.go b/util/tableutil/tableutil.go new file mode 100644 index 0000000000000..11cbe626dcc56 --- /dev/null +++ b/util/tableutil/tableutil.go @@ -0,0 +1,40 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tableutil + +import ( + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/meta/autoid" +) + +// TempTable is used to store transaction-specific or session-specific information for global / local temporary tables. +// For example, stats and autoID should have their own copies of data, instead of being shared by all sessions. +type TempTable interface { + // GetAutoIDAllocator gets the autoID allocator of this table. + GetAutoIDAllocator() autoid.Allocator + + // SetModified sets that the table is modified. + SetModified(bool) + + // GetModified queries whether the table is modified. + GetModified() bool + + // The stats of this table (*statistics.Table). + // Define the return type as interface{} here to avoid cycle imports. + GetStats() interface{} +} + +// TempTableFromMeta builds a TempTable from *model.TableInfo. +// Currently, it is assigned to tables.TempTableFromMeta in tidb package's init function. +var TempTableFromMeta func(tblInfo *model.TableInfo) TempTable diff --git a/util/testkit/testkit.go b/util/testkit/testkit.go index 06eb826c56b78..d6d6e41bb6e9f 100644 --- a/util/testkit/testkit.go +++ b/util/testkit/testkit.go @@ -256,14 +256,16 @@ func (tk *TestKit) MustNoGlobalStats(table string) bool { } // MustPartition checks if the result execution plan must read specific partitions. -func (tk *TestKit) MustPartition(sql string, partitions string, args ...interface{}) bool { +func (tk *TestKit) MustPartition(sql string, partitions string, args ...interface{}) *Result { rs := tk.MustQuery("explain "+sql, args...) + ok := false for i := range rs.rows { if strings.Compare(rs.rows[i][3], "partition:"+partitions) == 0 { - return true + ok = true } } - return false + tk.c.Assert(ok, check.IsTrue) + return tk.MustQuery(sql, args...) } // MustUseIndex checks if the result execution plan contains specific index(es).