Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: Always generate global stats for partitioned tables #37830

Draft
wants to merge 18 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 0 additions & 5 deletions ddl/db_partition_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -710,8 +710,6 @@ create table log_message_1 (
}

func TestPartitionRangeColumnsCollate(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create schema PartitionRangeColumnsCollate")
Expand Down Expand Up @@ -3650,9 +3648,6 @@ func TestPartitionListWithTimeType(t *testing.T) {
}

func TestPartitionListWithNewCollation(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")

store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
Expand Down
15 changes: 5 additions & 10 deletions executor/analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,8 @@ func (e *AnalyzeExec) Next(ctx context.Context, _ *chunk.Chunk) error {
close(taskCh)
e.wg.Wait()
close(resultsCh)
pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load())
// needGlobalStats used to indicate whether we should merge the partition-level stats to global-level stats.
needGlobalStats := pruneMode == variable.Dynamic
globalStatsMap := make(map[globalStatsKey]globalStatsInfo)
err = e.handleResultsError(ctx, concurrency, needGlobalStats, globalStatsMap, resultsCh)
err = e.handleResultsError(ctx, concurrency, globalStatsMap, resultsCh)
for _, task := range e.tasks {
if task.colExec != nil && task.colExec.memTracker != nil {
task.colExec.memTracker.Detach()
Expand All @@ -125,8 +122,7 @@ func (e *AnalyzeExec) Next(ctx context.Context, _ *chunk.Chunk) error {
dom.SysProcTracker().KillSysProcess(util.GetAutoAnalyzeProcID(dom.ServerID))
})

// If we enabled dynamic prune mode, then we need to generate global stats here for partition tables.
err = e.handleGlobalStats(ctx, needGlobalStats, globalStatsMap)
err = e.handleGlobalStats(ctx, globalStatsMap)
if err != nil {
return err
}
Expand All @@ -146,10 +142,9 @@ func (e *AnalyzeExec) saveV2AnalyzeOpts() error {
return nil
}
// only to save table options if dynamic prune mode
dynamicPrune := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load()) == variable.Dynamic
toSaveMap := make(map[int64]core.V2AnalyzeOptions)
for id, opts := range e.OptionsMap {
if !opts.IsPartition || !dynamicPrune {
if !opts.IsPartition {
toSaveMap[id] = opts
}
}
Expand Down Expand Up @@ -215,7 +210,7 @@ func (e *AnalyzeExec) recordHistoricalStats(tableID int64) error {
}

// handleResultsError will handle the error fetch from resultsCh and record it in log
func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, needGlobalStats bool,
func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int,
globalStatsMap globalStatsMap, resultsCh <-chan *statistics.AnalyzeResults) error {
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
panicCnt := 0
Expand All @@ -235,7 +230,7 @@ func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, n
finishJobWithLog(e.ctx, results.Job, err)
continue
}
if results.TableID.IsPartitionTable() && needGlobalStats {
if results.TableID.IsPartitionTable() {
for _, result := range results.Ars {
if result.IsIndex == 0 {
// If it does not belong to the statistics of index, we need to set it to -1 to distinguish.
Expand Down
5 changes: 1 addition & 4 deletions executor/analyze_global_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,7 @@ type globalStatsInfo struct {
// The meaning of value in map is some additional information needed to build global-level stats.
type globalStatsMap map[globalStatsKey]globalStatsInfo

func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats bool, globalStatsMap globalStatsMap) error {
if !needGlobalStats {
return nil
}
func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, globalStatsMap globalStatsMap) error {
globalStatsTableIDs := make(map[int64]struct{})
for globalStatsID := range globalStatsMap {
globalStatsTableIDs[globalStatsID.tableID] = struct{}{}
Expand Down
86 changes: 60 additions & 26 deletions executor/analyzetest/analyze_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -798,13 +798,25 @@ func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domai
tk.MustQuery("show stats_buckets").Check(testkit.Rows())
tk.MustExec("insert into t values (1,1)")
tk.MustExec("analyze incremental table t index")
tk.MustQuery("show warnings").Check(testkit.Rows()) // no warning
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 8244 Build table: `t` column: `b` global-level stats failed due to missing partition-level column stats, please run analyze table to refresh columns of all partitions"))
require.NoError(t, h.LoadNeededHistograms())
tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0"))
tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t global idx 1 0 1 1 1 1 0", "test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0"))
tk.MustExec(`analyze table t`)
tk.MustExec("insert into t values (2,2)")
tk.MustExec("analyze incremental table t index")
tk.MustQuery("show warnings").Check(testkit.Rows()) // no warnings
require.NoError(t, h.LoadNeededHistograms())
tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 a 0 1 2 1 2 2 0", "test t p0 idx 1 0 1 1 1 1 0", "test t p0 idx 1 1 2 1 2 2 0"))
tk.MustQuery("show stats_buckets").Sort().Check(testkit.Rows(
"test t global a 0 0 1 1 1 1 0",
"test t global a 0 1 2 1 1 2 0",
"test t global b 0 0 1 1 1 1 0",
"test t global idx 1 0 1 1 1 1 0",
"test t global idx 1 1 2 1 1 2 0",
"test t p0 a 0 0 1 1 1 1 0",
"test t p0 a 0 1 2 1 2 2 0",
"test t p0 b 0 0 1 1 1 1 0",
"test t p0 idx 1 0 1 1 1 1 0",
"test t p0 idx 1 1 2 1 2 2 0"))
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';")
tk.MustExec("insert into t values (11,11)")
err = tk.ExecToErr("analyze incremental table t index")
Expand Down Expand Up @@ -1801,6 +1813,7 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) {
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
require.NoError(t, err)
defs := tbl.Meta().Partition.Definitions
tblID := tbl.Meta().ID
p0ID := defs[0].ID
p1ID := defs[1].ID

Expand All @@ -1827,20 +1840,28 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) {
}

rows := tk.MustQuery("show column_stats_usage where db_name = 'test' and table_name = 't' and last_analyzed_at is not null").Sort().Rows()
require.Equal(t, 4, len(rows))
require.Equal(t, []interface{}{"test", "t", "p0", "a"}, rows[0][:4])
require.Equal(t, []interface{}{"test", "t", "p0", "c"}, rows[1][:4])
require.Equal(t, []interface{}{"test", "t", "p1", "a"}, rows[2][:4])
require.Equal(t, []interface{}{"test", "t", "p1", "c"}, rows[3][:4])
require.Equal(t, 6, len(rows))
require.Equal(t, []interface{}{"test", "t", "global", "a"}, rows[0][:4])
require.Equal(t, []interface{}{"test", "t", "global", "c"}, rows[1][:4])
require.Equal(t, []interface{}{"test", "t", "p0", "a"}, rows[2][:4])
require.Equal(t, []interface{}{"test", "t", "p0", "c"}, rows[3][:4])
require.Equal(t, []interface{}{"test", "t", "p1", "a"}, rows[4][:4])
require.Equal(t, []interface{}{"test", "t", "p1", "c"}, rows[5][:4])

rows = tk.MustQuery("show stats_meta where db_name = 'test' and table_name = 't'").Sort().Rows()
require.Equal(t, 2, len(rows))
require.Equal(t, []interface{}{"test", "t", "p0", "0", "9"}, append(rows[0][:3], rows[0][4:]...))
require.Equal(t, []interface{}{"test", "t", "p1", "0", "11"}, append(rows[1][:3], rows[1][4:]...))
require.Equal(t, 3, len(rows))
require.Equal(t, []interface{}{"test", "t", "global", "0", "20"}, append(rows[0][:3], rows[0][4:]...))
require.Equal(t, []interface{}{"test", "t", "p0", "0", "9"}, append(rows[1][:3], rows[1][4:]...))
require.Equal(t, []interface{}{"test", "t", "p1", "0", "11"}, append(rows[2][:3], rows[2][4:]...))

tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't' and is_index = 0").Sort().Check(
// db, tbl, part, col, is_idx, value, count
testkit.Rows("test t p0 a 0 4 2",
testkit.Rows(
"test t global a 0 16 4",
"test t global a 0 5 3",
"test t global c 0 1 3",
"test t global c 0 14 3",
"test t p0 a 0 4 2",
"test t p0 a 0 5 3",
"test t p0 c 0 1 3",
"test t p0 c 0 2 2",
Expand All @@ -1851,14 +1872,22 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) {

tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't' and is_index = 1").Sort().Check(
// db, tbl, part, col, is_idx, value, count
testkit.Rows("test t p0 idx 1 1 3",
testkit.Rows(
"test t global idx 1 1 3",
"test t global idx 1 14 3",
"test t p0 idx 1 1 3",
"test t p0 idx 1 2 2",
"test t p1 idx 1 13 2",
"test t p1 idx 1 14 3"))

tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't' and is_index = 0").Sort().Check(
// db, tbl, part, col, is_index, bucket_id, count, repeats, lower, upper, ndv
testkit.Rows("test t p0 a 0 0 2 1 1 2 0",
testkit.Rows(
"test t global a 0 0 5 2 1 4 0",
"test t global a 0 1 12 2 17 17 0",
"test t global c 0 0 6 1 2 6 0",
"test t global c 0 1 14 2 13 13 0",
"test t p0 a 0 0 2 1 1 2 0",
"test t p0 a 0 1 3 1 3 3 0",
"test t p0 c 0 0 3 1 3 5 0",
"test t p0 c 0 1 4 1 6 6 0",
Expand All @@ -1869,20 +1898,27 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) {

tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't' and is_index = 1").Sort().Check(
// db, tbl, part, col, is_index, bucket_id, count, repeats, lower, upper, ndv
testkit.Rows("test t p0 idx 1 0 3 1 3 5 0",
testkit.Rows(
"test t global idx 1 0 6 1 2 6 0",
"test t global idx 1 1 14 2 13 13 0",
"test t p0 idx 1 0 3 1 3 5 0",
"test t p0 idx 1 1 4 1 6 6 0",
"test t p1 idx 1 0 4 1 7 10 0",
"test t p1 idx 1 1 6 1 11 12 0"))

tk.MustQuery("select table_id, is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, truncate(correlation,2) from mysql.stats_histograms order by table_id, is_index, hist_id asc").Check(
testkit.Rows(fmt.Sprintf("%d 0 1 5 1 8 2 1", p0ID), // p0, a
fmt.Sprintf("%d 0 2 0 0 8 0 0", p0ID), // p0, b, not analyzed
fmt.Sprintf("%d 0 3 6 0 9 2 1", p0ID), // p0, c
fmt.Sprintf("%d 1 1 6 0 9 2 0", p0ID), // p0, idx
fmt.Sprintf("%d 0 1 7 0 11 2 1", p1ID), // p1, a
fmt.Sprintf("%d 0 2 0 0 11 0 0", p1ID), // p1, b, not analyzed
fmt.Sprintf("%d 0 3 8 0 11 2 1", p1ID), // p1, c
fmt.Sprintf("%d 1 1 8 0 11 2 0", p1ID), // p1, idx
testkit.Rows(
fmt.Sprintf("%d 0 1 12 1 19 2 0", tblID), // tbl, a
fmt.Sprintf("%d 0 3 14 0 20 2 0", tblID), // tbl, b, not analyzed
fmt.Sprintf("%d 1 1 14 0 0 2 0", tblID), // tbl, c
fmt.Sprintf("%d 0 1 5 1 8 2 1", p0ID), // p0, a
fmt.Sprintf("%d 0 2 0 0 8 0 0", p0ID), // p0, b, not analyzed
fmt.Sprintf("%d 0 3 6 0 9 2 1", p0ID), // p0, c
fmt.Sprintf("%d 1 1 6 0 9 2 0", p0ID), // p0, idx
fmt.Sprintf("%d 0 1 7 0 11 2 1", p1ID), // p1, a
fmt.Sprintf("%d 0 2 0 0 11 0 0", p1ID), // p1, b, not analyzed
fmt.Sprintf("%d 0 3 8 0 11 2 1", p1ID), // p1, c
fmt.Sprintf("%d 1 1 8 0 11 2 0", p1ID), // p1, idx
))
}(val)
}
Expand Down Expand Up @@ -2849,8 +2885,6 @@ PARTITION BY RANGE ( a ) (
}

func TestAnalyzePartitionStaticToDynamic(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
originalVal := tk.MustQuery("select @@tidb_persist_analyze_options").Rows()[0][0].(string)
Expand Down Expand Up @@ -2919,7 +2953,7 @@ PARTITION BY RANGE ( a ) (
tk.MustQuery("select * from t where a > 1 and b > 1 and c > 1 and d > 1")
require.NoError(t, h.LoadNeededHistograms())
tbl := h.GetTableStats(tableInfo)
require.Equal(t, 4, len(tbl.Columns))
require.Equal(t, 0, len(tbl.Columns))

// ignore both p0's 3 buckets, persisted-partition-options' 1 bucket, just use table-level 2 buckets
tk.MustExec("analyze table t partition p0")
Expand Down
18 changes: 9 additions & 9 deletions executor/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -3369,7 +3369,7 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E
sctx := b.ctx.GetSessionVars().StmtCtx
sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID)

if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() {
return ret
}
// When isPartition is set, it means the union rewriting is done, so a partition reader is preferred.
Expand Down Expand Up @@ -3573,7 +3573,7 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea
e.feedback = statistics.NewQueryFeedback(0, nil, 0, is.Desc)
} else {
tblID := e.physicalTableID
if b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if b.ctx.GetSessionVars().UseDynamicPartitionPrune() {
tblID = e.table.Meta().ID
}
e.feedback = statistics.NewQueryFeedback(tblID, is.Hist, int64(is.StatsCount()), is.Desc)
Expand Down Expand Up @@ -3616,7 +3616,7 @@ func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) E
sctx := b.ctx.GetSessionVars().StmtCtx
sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O)

if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() {
return ret
}
// When isPartition is set, it means the union rewriting is done, so a partition reader is preferred.
Expand Down Expand Up @@ -3792,7 +3792,7 @@ func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLoo
sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O)
sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID)

if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() {
return ret
}

Expand Down Expand Up @@ -3929,7 +3929,7 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg
sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID)
executorCounterIndexMergeReaderExecutor.Inc()

if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() {
return ret
}

Expand Down Expand Up @@ -4052,7 +4052,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte
return nil, err
}
tbInfo := e.table.Meta()
if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() {
if v.IsCommonHandle {
kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal)
if err != nil {
Expand Down Expand Up @@ -4300,7 +4300,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte
return nil, err
}
tbInfo := e.table.Meta()
if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() {
kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memoryTracker, interruptSignal)
if err != nil {
return nil, err
Expand Down Expand Up @@ -4347,7 +4347,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context
}

tbInfo := e.table.Meta()
if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() {
e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal)
if err != nil {
return nil, err
Expand Down Expand Up @@ -4933,7 +4933,7 @@ func getPhysicalTableID(t table.Table) int64 {
}

func getFeedbackStatsTableID(ctx sessionctx.Context, t table.Table) int64 {
if p, ok := t.(table.PhysicalTable); ok && !ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() {
if p, ok := t.(table.PhysicalTable); ok && !ctx.GetSessionVars().UseDynamicPartitionPrune() {
return p.GetPhysicalID()
}
return t.Meta().ID
Expand Down
2 changes: 0 additions & 2 deletions executor/distsql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,8 +291,6 @@ func TestPushLimitDownIndexLookUpReader(t *testing.T) {
}

func TestPartitionTableIndexLookUpReader(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)

Expand Down
7 changes: 0 additions & 7 deletions executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1927,13 +1927,6 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) {
sc.StatsLoadStatus = make(map[model.TableItemID]string)
sc.IsSyncStatsFailed = false
sc.IsExplainAnalyzeDML = false
// Firstly we assume that UseDynamicPruneMode can be enabled according session variable, then we will check other conditions
// in PlanBuilder.buildDataSource
if ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() {
sc.UseDynamicPruneMode = true
} else {
sc.UseDynamicPruneMode = false
}

sc.SysdateIsNow = ctx.GetSessionVars().SysdateIsNow

Expand Down
2 changes: 0 additions & 2 deletions executor/executor_issue_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -583,8 +583,6 @@ func TestFix31537(t *testing.T) {
}

func TestIssue30382(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
Expand Down
3 changes: 0 additions & 3 deletions executor/index_lookup_join_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"strings"
"testing"

"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
Expand Down Expand Up @@ -392,8 +391,6 @@ func TestIssue24547(t *testing.T) {
}

func TestIssue27138(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)

tk := testkit.NewTestKit(t, store)
Expand Down