Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: add show table regions syntax (#8719) (#10612) #11238

Merged
merged 3 commits into from
Jul 15, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion ddl/ddl.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ var (
// EnableSplitTableRegion is a flag to decide whether to split a new region for
// a newly created table. It takes effect only if the Storage supports split
// region.
EnableSplitTableRegion = false
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's better to cherry-pick #8719 firstly, rather than cherry-picking 2 commits together.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe I can simply change the PR title.

EnableSplitTableRegion = uint32(0)

// PartitionCountLimit is limit of the number of partitions in a table.
// Mysql maximum number of partitions is 8192, our maximum number of partitions is 1024.
Expand Down
3 changes: 2 additions & 1 deletion ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"context"
"fmt"
"strings"
"sync/atomic"
"time"

"github.com/cznic/mathutil"
Expand Down Expand Up @@ -1173,7 +1174,7 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e
if err == nil {
// do pre-split and scatter.
sp, ok := d.store.(kv.SplitableStore)
if ok && EnableSplitTableRegion {
if ok && atomic.LoadUint32(&EnableSplitTableRegion) != 0 {
var (
preSplit func()
scatterRegion bool
Expand Down
5 changes: 3 additions & 2 deletions ddl/table_split_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package ddl_test

import (
"bytes"
"sync/atomic"
"time"

. "github.com/pingcap/check"
Expand All @@ -37,11 +38,11 @@ func (s *testDDLTableSplitSuite) TestTableSplit(c *C) {
defer store.Close()
session.SetSchemaLease(0)
session.SetStatsLease(0)
ddl.EnableSplitTableRegion = true
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer dom.Close()
ddl.EnableSplitTableRegion = false
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
infoSchema := dom.InfoSchema()
c.Assert(infoSchema, NotNil)
t, err := infoSchema.TableByName(model.NewCIStr("mysql"), model.NewCIStr("tidb"))
Expand Down
1 change: 1 addition & 0 deletions executor/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,7 @@ func (b *executorBuilder) buildShow(v *plannercore.Show) Executor {
DBName: model.NewCIStr(v.DBName),
Table: v.Table,
Column: v.Column,
IndexName: v.IndexName,
User: v.User,
Flag: v.Flag,
Full: v.Full,
Expand Down
121 changes: 121 additions & 0 deletions executor/executor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
Expand Down Expand Up @@ -3721,3 +3722,123 @@ func (h *oomCapturer) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.
}
return ce
}

func (s *testSuite) TestShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_regions1, t_regions")
tk.MustExec("create table t_regions1 (a int key, b int, index idx(b))")
tk.MustExec("create table t_regions (a int key, b int, index idx(b))")

// Test show table regions.
tk.MustExec(`split table t_regions1 by (0)`)
tk.MustExec(`split table t_regions between (-10000) and (10000) regions 4;`)
re := tk.MustQuery("show table t_regions regions")
rows := re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
c.Assert(len(rows[0]), Equals, 7)
tbl1 := testGetTableByName(c, tk.Se, "test", "t_regions1")
tbl := testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", tbl1.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))

// Test show table index regions.
tk.MustExec(`split table t_regions index idx between (-1000) and (1000) regions 4;`)
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))

re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 7)
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))

// Test unsigned primary key and wait scatter finish.
tk.MustExec("drop table if exists t_regions")
tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))")

// Test show table regions.
tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`)
tk.MustExec(`split table t_regions between (0) and (10000) regions 4;`)
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, "t_.*")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID))

// Test show table index regions.
tk.MustExec(`split table t_regions index idx between (0) and (1000) regions 4;`)
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))

// Test show table regions for partition table when disable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Matches, "t_.*")

// Test show table regions for partition table when enable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set @@tidb_enable_table_partition = 1")
tk.MustExec("set @@global.tidb_scatter_region=1;")
tk.MustExec("drop table if exists partition_t;")
tk.MustExec(`create table partition_t (id int) partition by range (id) (
partition p0 values less than (1),
partition p1 values less than (4),
partition p2 values less than (7))`)
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 3)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
}

func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
c.Assert(err, IsNil)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table))
c.Assert(err, IsNil)
return tbl
}
125 changes: 118 additions & 7 deletions executor/show.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,13 @@ import (
"github.com/pingcap/tidb-tools/tidb-binlog/node"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/plugin"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
Expand All @@ -52,13 +55,14 @@ var etcdDialTimeout = 5 * time.Second
type ShowExec struct {
baseExecutor

Tp ast.ShowStmtType // Databases/Tables/Columns/....
DBName model.CIStr
Table *ast.TableName // Used for showing columns.
Column *ast.ColumnName // Used for `desc table column`.
Flag int // Some flag parsed from sql, such as FULL.
Full bool
User *auth.UserIdentity // Used for show grants.
Tp ast.ShowStmtType // Databases/Tables/Columns/....
DBName model.CIStr
Table *ast.TableName // Used for showing columns.
Column *ast.ColumnName // Used for `desc table column`.
IndexName model.CIStr // Used for show table regions.
Flag int // Some flag parsed from sql, such as FULL.
Full bool
User *auth.UserIdentity // Used for show grants.

// GlobalScope is used by show variables
GlobalScope bool
Expand Down Expand Up @@ -163,6 +167,8 @@ func (e *ShowExec) fetchAll() error {
return e.fetchShowMasterStatus()
case ast.ShowPrivileges:
return e.fetchShowPrivileges()
case ast.ShowRegions:
return e.fetchShowTableRegions()
}
return nil
}
Expand Down Expand Up @@ -937,3 +943,108 @@ func (e *ShowExec) appendRow(row []interface{}) {
}
}
}

func (e *ShowExec) fetchShowTableRegions() error {
store := e.ctx.GetStore()
tikvStore, ok := store.(tikv.Storage)
if !ok {
return nil
}
splitStore, ok := store.(kv.SplitableStore)
if !ok {
return nil
}

tb, err := e.getTable()
if err != nil {
return errors.Trace(err)
}

// Get table regions from from pd, not from regionCache, because the region cache maybe outdated.
var regions []regionMeta
if len(e.IndexName.L) != 0 {
indexInfo := tb.Meta().FindIndexByName(e.IndexName.L)
if indexInfo == nil {
return plannercore.ErrKeyDoesNotExist.GenWithStackByArgs(e.IndexName, tb.Meta().Name)
}
regions, err = getTableIndexRegions(tb, indexInfo, tikvStore, splitStore)
} else {
regions, err = getTableRegions(tb, tikvStore, splitStore)
}

if err != nil {
return err
}
e.fillRegionsToChunk(regions)
return nil
}

func getTableRegions(tb table.Table, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) {
if info := tb.Meta().GetPartitionInfo(); info != nil {
return getPartitionTableRegions(info, tb.(table.PartitionedTable), tikvStore, splitStore)
}
return getPhysicalTableRegions(tb.Meta().ID, tb.Meta(), tikvStore, splitStore, nil)
}

func getTableIndexRegions(tb table.Table, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) {
if info := tb.Meta().GetPartitionInfo(); info != nil {
return getPartitionIndexRegions(info, tb.(table.PartitionedTable), indexInfo, tikvStore, splitStore)
}
return getPhysicalIndexRegions(tb.Meta().ID, indexInfo, tikvStore, splitStore, nil)
}

func getPartitionTableRegions(info *model.PartitionInfo, tbl table.PartitionedTable, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) {
regions := make([]regionMeta, 0, len(info.Definitions))
uniqueRegionMap := make(map[uint64]struct{})
for _, def := range info.Definitions {
pid := def.ID
partition := tbl.GetPartition(pid)
partition.GetPhysicalID()
partitionRegions, err := getPhysicalTableRegions(partition.GetPhysicalID(), tbl.Meta(), tikvStore, splitStore, uniqueRegionMap)
if err != nil {
return nil, err
}
regions = append(regions, partitionRegions...)
}
return regions, nil
}

func getPartitionIndexRegions(info *model.PartitionInfo, tbl table.PartitionedTable, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) {
var regions []regionMeta
uniqueRegionMap := make(map[uint64]struct{})
for _, def := range info.Definitions {
pid := def.ID
partition := tbl.GetPartition(pid)
partition.GetPhysicalID()
partitionRegions, err := getPhysicalIndexRegions(partition.GetPhysicalID(), indexInfo, tikvStore, splitStore, uniqueRegionMap)
if err != nil {
return nil, err
}
regions = append(regions, partitionRegions...)
}
return regions, nil
}

func (e *ShowExec) fillRegionsToChunk(regions []regionMeta) {
for i := range regions {
e.result.AppendUint64(0, regions[i].region.Id)
e.result.AppendString(1, regions[i].start)
e.result.AppendString(2, regions[i].end)
e.result.AppendUint64(3, regions[i].leaderID)
e.result.AppendUint64(4, regions[i].storeID)

peers := ""
for i, peer := range regions[i].region.Peers {
if i > 0 {
peers += ", "
}
peers += strconv.FormatUint(peer.Id, 10)
}
e.result.AppendString(5, peers)
if regions[i].scattering {
e.result.AppendInt64(6, 1)
} else {
e.result.AppendInt64(6, 0)
}
}
}