diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index 633ea65b7f74b..d7ac0fd7f3a97 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -710,8 +710,6 @@ create table log_message_1 ( } func TestPartitionRangeColumnsCollate(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("create schema PartitionRangeColumnsCollate") @@ -3650,9 +3648,6 @@ func TestPartitionListWithTimeType(t *testing.T) { } func TestPartitionListWithNewCollation(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test;") diff --git a/executor/analyze.go b/executor/analyze.go index 6fccec8a9bf5b..cf2604251e2af 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -107,11 +107,8 @@ func (e *AnalyzeExec) Next(ctx context.Context, _ *chunk.Chunk) error { close(taskCh) e.wg.Wait() close(resultsCh) - pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load()) - // needGlobalStats used to indicate whether we should merge the partition-level stats to global-level stats. - needGlobalStats := pruneMode == variable.Dynamic globalStatsMap := make(map[globalStatsKey]globalStatsInfo) - err = e.handleResultsError(ctx, concurrency, needGlobalStats, globalStatsMap, resultsCh) + err = e.handleResultsError(ctx, concurrency, globalStatsMap, resultsCh) for _, task := range e.tasks { if task.colExec != nil && task.colExec.memTracker != nil { task.colExec.memTracker.Detach() @@ -125,8 +122,7 @@ func (e *AnalyzeExec) Next(ctx context.Context, _ *chunk.Chunk) error { dom.SysProcTracker().KillSysProcess(util.GetAutoAnalyzeProcID(dom.ServerID)) }) - // If we enabled dynamic prune mode, then we need to generate global stats here for partition tables. - err = e.handleGlobalStats(ctx, needGlobalStats, globalStatsMap) + err = e.handleGlobalStats(ctx, globalStatsMap) if err != nil { return err } @@ -146,10 +142,9 @@ func (e *AnalyzeExec) saveV2AnalyzeOpts() error { return nil } // only to save table options if dynamic prune mode - dynamicPrune := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load()) == variable.Dynamic toSaveMap := make(map[int64]core.V2AnalyzeOptions) for id, opts := range e.OptionsMap { - if !opts.IsPartition || !dynamicPrune { + if !opts.IsPartition { toSaveMap[id] = opts } } @@ -215,7 +210,7 @@ func (e *AnalyzeExec) recordHistoricalStats(tableID int64) error { } // handleResultsError will handle the error fetch from resultsCh and record it in log -func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, needGlobalStats bool, +func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, globalStatsMap globalStatsMap, resultsCh <-chan *statistics.AnalyzeResults) error { statsHandle := domain.GetDomain(e.ctx).StatsHandle() panicCnt := 0 @@ -235,7 +230,7 @@ func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, n finishJobWithLog(e.ctx, results.Job, err) continue } - if results.TableID.IsPartitionTable() && needGlobalStats { + if results.TableID.IsPartitionTable() { for _, result := range results.Ars { if result.IsIndex == 0 { // If it does not belong to the statistics of index, we need to set it to -1 to distinguish. diff --git a/executor/analyze_global_stats.go b/executor/analyze_global_stats.go index c9ff6217a195c..2795727ffba09 100644 --- a/executor/analyze_global_stats.go +++ b/executor/analyze_global_stats.go @@ -43,10 +43,7 @@ type globalStatsInfo struct { // The meaning of value in map is some additional information needed to build global-level stats. type globalStatsMap map[globalStatsKey]globalStatsInfo -func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats bool, globalStatsMap globalStatsMap) error { - if !needGlobalStats { - return nil - } +func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, globalStatsMap globalStatsMap) error { globalStatsTableIDs := make(map[int64]struct{}) for globalStatsID := range globalStatsMap { globalStatsTableIDs[globalStatsID.tableID] = struct{}{} diff --git a/executor/analyzetest/analyze_test.go b/executor/analyzetest/analyze_test.go index 9274fd62b423a..dbe4160af9d12 100644 --- a/executor/analyzetest/analyze_test.go +++ b/executor/analyzetest/analyze_test.go @@ -798,13 +798,25 @@ func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domai tk.MustQuery("show stats_buckets").Check(testkit.Rows()) tk.MustExec("insert into t values (1,1)") tk.MustExec("analyze incremental table t index") - tk.MustQuery("show warnings").Check(testkit.Rows()) // no warning + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 8244 Build table: `t` column: `b` global-level stats failed due to missing partition-level column stats, please run analyze table to refresh columns of all partitions")) require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t global idx 1 0 1 1 1 1 0", "test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) + tk.MustExec(`analyze table t`) tk.MustExec("insert into t values (2,2)") tk.MustExec("analyze incremental table t index") + tk.MustQuery("show warnings").Check(testkit.Rows()) // no warnings require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 a 0 1 2 1 2 2 0", "test t p0 idx 1 0 1 1 1 1 0", "test t p0 idx 1 1 2 1 2 2 0")) + tk.MustQuery("show stats_buckets").Sort().Check(testkit.Rows( + "test t global a 0 0 1 1 1 1 0", + "test t global a 0 1 2 1 1 2 0", + "test t global b 0 0 1 1 1 1 0", + "test t global idx 1 0 1 1 1 1 0", + "test t global idx 1 1 2 1 1 2 0", + "test t p0 a 0 0 1 1 1 1 0", + "test t p0 a 0 1 2 1 2 2 0", + "test t p0 b 0 0 1 1 1 1 0", + "test t p0 idx 1 0 1 1 1 1 0", + "test t p0 idx 1 1 2 1 2 2 0")) tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") tk.MustExec("insert into t values (11,11)") err = tk.ExecToErr("analyze incremental table t index") @@ -1801,6 +1813,7 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) { tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) require.NoError(t, err) defs := tbl.Meta().Partition.Definitions + tblID := tbl.Meta().ID p0ID := defs[0].ID p1ID := defs[1].ID @@ -1827,20 +1840,28 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) { } rows := tk.MustQuery("show column_stats_usage where db_name = 'test' and table_name = 't' and last_analyzed_at is not null").Sort().Rows() - require.Equal(t, 4, len(rows)) - require.Equal(t, []interface{}{"test", "t", "p0", "a"}, rows[0][:4]) - require.Equal(t, []interface{}{"test", "t", "p0", "c"}, rows[1][:4]) - require.Equal(t, []interface{}{"test", "t", "p1", "a"}, rows[2][:4]) - require.Equal(t, []interface{}{"test", "t", "p1", "c"}, rows[3][:4]) + require.Equal(t, 6, len(rows)) + require.Equal(t, []interface{}{"test", "t", "global", "a"}, rows[0][:4]) + require.Equal(t, []interface{}{"test", "t", "global", "c"}, rows[1][:4]) + require.Equal(t, []interface{}{"test", "t", "p0", "a"}, rows[2][:4]) + require.Equal(t, []interface{}{"test", "t", "p0", "c"}, rows[3][:4]) + require.Equal(t, []interface{}{"test", "t", "p1", "a"}, rows[4][:4]) + require.Equal(t, []interface{}{"test", "t", "p1", "c"}, rows[5][:4]) rows = tk.MustQuery("show stats_meta where db_name = 'test' and table_name = 't'").Sort().Rows() - require.Equal(t, 2, len(rows)) - require.Equal(t, []interface{}{"test", "t", "p0", "0", "9"}, append(rows[0][:3], rows[0][4:]...)) - require.Equal(t, []interface{}{"test", "t", "p1", "0", "11"}, append(rows[1][:3], rows[1][4:]...)) + require.Equal(t, 3, len(rows)) + require.Equal(t, []interface{}{"test", "t", "global", "0", "20"}, append(rows[0][:3], rows[0][4:]...)) + require.Equal(t, []interface{}{"test", "t", "p0", "0", "9"}, append(rows[1][:3], rows[1][4:]...)) + require.Equal(t, []interface{}{"test", "t", "p1", "0", "11"}, append(rows[2][:3], rows[2][4:]...)) tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't' and is_index = 0").Sort().Check( // db, tbl, part, col, is_idx, value, count - testkit.Rows("test t p0 a 0 4 2", + testkit.Rows( + "test t global a 0 16 4", + "test t global a 0 5 3", + "test t global c 0 1 3", + "test t global c 0 14 3", + "test t p0 a 0 4 2", "test t p0 a 0 5 3", "test t p0 c 0 1 3", "test t p0 c 0 2 2", @@ -1851,14 +1872,22 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) { tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't' and is_index = 1").Sort().Check( // db, tbl, part, col, is_idx, value, count - testkit.Rows("test t p0 idx 1 1 3", + testkit.Rows( + "test t global idx 1 1 3", + "test t global idx 1 14 3", + "test t p0 idx 1 1 3", "test t p0 idx 1 2 2", "test t p1 idx 1 13 2", "test t p1 idx 1 14 3")) tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't' and is_index = 0").Sort().Check( // db, tbl, part, col, is_index, bucket_id, count, repeats, lower, upper, ndv - testkit.Rows("test t p0 a 0 0 2 1 1 2 0", + testkit.Rows( + "test t global a 0 0 5 2 1 4 0", + "test t global a 0 1 12 2 17 17 0", + "test t global c 0 0 6 1 2 6 0", + "test t global c 0 1 14 2 13 13 0", + "test t p0 a 0 0 2 1 1 2 0", "test t p0 a 0 1 3 1 3 3 0", "test t p0 c 0 0 3 1 3 5 0", "test t p0 c 0 1 4 1 6 6 0", @@ -1869,20 +1898,27 @@ func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) { tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't' and is_index = 1").Sort().Check( // db, tbl, part, col, is_index, bucket_id, count, repeats, lower, upper, ndv - testkit.Rows("test t p0 idx 1 0 3 1 3 5 0", + testkit.Rows( + "test t global idx 1 0 6 1 2 6 0", + "test t global idx 1 1 14 2 13 13 0", + "test t p0 idx 1 0 3 1 3 5 0", "test t p0 idx 1 1 4 1 6 6 0", "test t p1 idx 1 0 4 1 7 10 0", "test t p1 idx 1 1 6 1 11 12 0")) tk.MustQuery("select table_id, is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, truncate(correlation,2) from mysql.stats_histograms order by table_id, is_index, hist_id asc").Check( - testkit.Rows(fmt.Sprintf("%d 0 1 5 1 8 2 1", p0ID), // p0, a - fmt.Sprintf("%d 0 2 0 0 8 0 0", p0ID), // p0, b, not analyzed - fmt.Sprintf("%d 0 3 6 0 9 2 1", p0ID), // p0, c - fmt.Sprintf("%d 1 1 6 0 9 2 0", p0ID), // p0, idx - fmt.Sprintf("%d 0 1 7 0 11 2 1", p1ID), // p1, a - fmt.Sprintf("%d 0 2 0 0 11 0 0", p1ID), // p1, b, not analyzed - fmt.Sprintf("%d 0 3 8 0 11 2 1", p1ID), // p1, c - fmt.Sprintf("%d 1 1 8 0 11 2 0", p1ID), // p1, idx + testkit.Rows( + fmt.Sprintf("%d 0 1 12 1 19 2 0", tblID), // tbl, a + fmt.Sprintf("%d 0 3 14 0 20 2 0", tblID), // tbl, b, not analyzed + fmt.Sprintf("%d 1 1 14 0 0 2 0", tblID), // tbl, c + fmt.Sprintf("%d 0 1 5 1 8 2 1", p0ID), // p0, a + fmt.Sprintf("%d 0 2 0 0 8 0 0", p0ID), // p0, b, not analyzed + fmt.Sprintf("%d 0 3 6 0 9 2 1", p0ID), // p0, c + fmt.Sprintf("%d 1 1 6 0 9 2 0", p0ID), // p0, idx + fmt.Sprintf("%d 0 1 7 0 11 2 1", p1ID), // p1, a + fmt.Sprintf("%d 0 2 0 0 11 0 0", p1ID), // p1, b, not analyzed + fmt.Sprintf("%d 0 3 8 0 11 2 1", p1ID), // p1, c + fmt.Sprintf("%d 1 1 8 0 11 2 0", p1ID), // p1, idx )) }(val) } @@ -2849,8 +2885,6 @@ PARTITION BY RANGE ( a ) ( } func TestAnalyzePartitionStaticToDynamic(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) originalVal := tk.MustQuery("select @@tidb_persist_analyze_options").Rows()[0][0].(string) @@ -2919,7 +2953,7 @@ PARTITION BY RANGE ( a ) ( tk.MustQuery("select * from t where a > 1 and b > 1 and c > 1 and d > 1") require.NoError(t, h.LoadNeededHistograms()) tbl := h.GetTableStats(tableInfo) - require.Equal(t, 4, len(tbl.Columns)) + require.Equal(t, 0, len(tbl.Columns)) // ignore both p0's 3 buckets, persisted-partition-options' 1 bucket, just use table-level 2 buckets tk.MustExec("analyze table t partition p0") diff --git a/executor/builder.go b/executor/builder.go index efbaf0fab95a0..9f53b88eed5da 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -3369,7 +3369,7 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E sctx := b.ctx.GetSessionVars().StmtCtx sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) - if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { return ret } // When isPartition is set, it means the union rewriting is done, so a partition reader is preferred. @@ -3573,7 +3573,7 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea e.feedback = statistics.NewQueryFeedback(0, nil, 0, is.Desc) } else { tblID := e.physicalTableID - if b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if b.ctx.GetSessionVars().UseDynamicPartitionPrune() { tblID = e.table.Meta().ID } e.feedback = statistics.NewQueryFeedback(tblID, is.Hist, int64(is.StatsCount()), is.Desc) @@ -3616,7 +3616,7 @@ func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) E sctx := b.ctx.GetSessionVars().StmtCtx sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) - if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { return ret } // When isPartition is set, it means the union rewriting is done, so a partition reader is preferred. @@ -3792,7 +3792,7 @@ func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLoo sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) - if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { return ret } @@ -3929,7 +3929,7 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) executorCounterIndexMergeReaderExecutor.Inc() - if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { return ret } @@ -4052,7 +4052,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte return nil, err } tbInfo := e.table.Meta() - if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { if v.IsCommonHandle { kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { @@ -4300,7 +4300,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte return nil, err } tbInfo := e.table.Meta() - if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memoryTracker, interruptSignal) if err != nil { return nil, err @@ -4347,7 +4347,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context } tbInfo := e.table.Meta() - if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err @@ -4933,7 +4933,7 @@ func getPhysicalTableID(t table.Table) int64 { } func getFeedbackStatsTableID(ctx sessionctx.Context, t table.Table) int64 { - if p, ok := t.(table.PhysicalTable); ok && !ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if p, ok := t.(table.PhysicalTable); ok && !ctx.GetSessionVars().UseDynamicPartitionPrune() { return p.GetPhysicalID() } return t.Meta().ID diff --git a/executor/distsql_test.go b/executor/distsql_test.go index 1f895a6703420..a0fc642a020d7 100644 --- a/executor/distsql_test.go +++ b/executor/distsql_test.go @@ -291,8 +291,6 @@ func TestPushLimitDownIndexLookUpReader(t *testing.T) { } func TestPartitionTableIndexLookUpReader(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/executor/executor.go b/executor/executor.go index 5e13783a489a3..e9c3a2a3a6a07 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1927,13 +1927,6 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.StatsLoadStatus = make(map[model.TableItemID]string) sc.IsSyncStatsFailed = false sc.IsExplainAnalyzeDML = false - // Firstly we assume that UseDynamicPruneMode can be enabled according session variable, then we will check other conditions - // in PlanBuilder.buildDataSource - if ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { - sc.UseDynamicPruneMode = true - } else { - sc.UseDynamicPruneMode = false - } sc.SysdateIsNow = ctx.GetSessionVars().SysdateIsNow diff --git a/executor/executor_issue_test.go b/executor/executor_issue_test.go index febdbd6031823..8e8b4d28f0fa3 100644 --- a/executor/executor_issue_test.go +++ b/executor/executor_issue_test.go @@ -583,8 +583,6 @@ func TestFix31537(t *testing.T) { } func TestIssue30382(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") diff --git a/executor/index_lookup_join_test.go b/executor/index_lookup_join_test.go index 24168058353ec..dfd9f04a69140 100644 --- a/executor/index_lookup_join_test.go +++ b/executor/index_lookup_join_test.go @@ -21,7 +21,6 @@ import ( "strings" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) @@ -392,8 +391,6 @@ func TestIssue24547(t *testing.T) { } func TestIssue27138(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 1f70c5bc04630..8daef02d4b53e 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -21,7 +21,6 @@ import ( "testing" "time" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" @@ -64,8 +63,6 @@ func TestSetPartitionPruneMode(t *testing.T) { } func TestFourReader(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -562,8 +559,6 @@ func TestView(t *testing.T) { } func TestDirectReadingwithIndexJoin(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -676,8 +671,6 @@ func TestDirectReadingwithIndexJoin(t *testing.T) { } func TestDynamicPruningUnderIndexJoin(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -980,8 +973,6 @@ func TestGlobalStatsAndSQLBinding(t *testing.T) { } func TestPartitionTableWithDifferentJoin(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1770,8 +1761,6 @@ func TestDynamicPruneModeWithExpression(t *testing.T) { } func TestAddDropPartitions(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1805,8 +1794,6 @@ func TestAddDropPartitions(t *testing.T) { } func TestMPPQueryExplainInfo(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1836,8 +1823,6 @@ func TestMPPQueryExplainInfo(t *testing.T) { } func TestPartitionPruningInTransaction(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -2071,8 +2056,6 @@ func TestSubqueries(t *testing.T) { } func TestSplitRegion(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -2109,9 +2092,6 @@ func TestSplitRegion(t *testing.T) { } func TestParallelApply(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -2619,8 +2599,6 @@ func TestDirectReadingWithAgg(t *testing.T) { } func TestDynamicModeByDefault(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -2816,8 +2794,6 @@ func TestIssue25309(t *testing.T) { } func TestGlobalIndexScan(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -2838,8 +2814,6 @@ partition p2 values less than (10))`) } func TestGlobalIndexDoubleRead(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/executor/show_stats.go b/executor/show_stats.go index 058b3ed7c62da..3388b90e27b44 100644 --- a/executor/show_stats.go +++ b/executor/show_stats.go @@ -108,18 +108,12 @@ func (e *ShowExec) fetchShowStatsMeta() error { for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { - partitionName := "" - if pi != nil { - partitionName = "global" - } - e.appendTableForStatsMeta(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)) - if pi != nil { - for _, def := range pi.Definitions { - e.appendTableForStatsMeta(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)) - } - } - } else { + partitionName := "" + if pi != nil { + partitionName = "global" + } + e.appendTableForStatsMeta(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)) + if pi != nil { for _, def := range pi.Definitions { e.appendTableForStatsMeta(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)) } @@ -150,18 +144,12 @@ func (e *ShowExec) fetchShowStatsHistogram() error { for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { - partitionName := "" - if pi != nil { - partitionName = "global" - } - e.appendTableForStatsHistograms(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)) - if pi != nil { - for _, def := range pi.Definitions { - e.appendTableForStatsHistograms(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)) - } - } - } else { + partitionName := "" + if pi != nil { + partitionName = "global" + } + e.appendTableForStatsHistograms(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)) + if pi != nil { for _, def := range pi.Definitions { e.appendTableForStatsHistograms(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)) } @@ -224,22 +212,14 @@ func (e *ShowExec) fetchShowStatsBuckets() error { for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { - partitionName := "" - if pi != nil { - partitionName = "global" - } - if err := e.appendTableForStatsBuckets(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)); err != nil { - return err - } - if pi != nil { - for _, def := range pi.Definitions { - if err := e.appendTableForStatsBuckets(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)); err != nil { - return err - } - } - } - } else { + partitionName := "" + if pi != nil { + partitionName = "global" + } + if err := e.appendTableForStatsBuckets(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)); err != nil { + return err + } + if pi != nil { for _, def := range pi.Definitions { if err := e.appendTableForStatsBuckets(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)); err != nil { return err @@ -283,22 +263,14 @@ func (e *ShowExec) fetchShowStatsTopN() error { for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { - partitionName := "" - if pi != nil { - partitionName = "global" - } - if err := e.appendTableForStatsTopN(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)); err != nil { - return err - } - if pi != nil { - for _, def := range pi.Definitions { - if err := e.appendTableForStatsTopN(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)); err != nil { - return err - } - } - } - } else { + partitionName := "" + if pi != nil { + partitionName = "global" + } + if err := e.appendTableForStatsTopN(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)); err != nil { + return err + } + if pi != nil { for _, def := range pi.Definitions { if err := e.appendTableForStatsTopN(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)); err != nil { return err @@ -415,18 +387,12 @@ func (e *ShowExec) fetchShowStatsHealthy() { for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { - partitionName := "" - if pi != nil { - partitionName = "global" - } - e.appendTableForStatsHealthy(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)) - if pi != nil { - for _, def := range pi.Definitions { - e.appendTableForStatsHealthy(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)) - } - } - } else { + partitionName := "" + if pi != nil { + partitionName = "global" + } + e.appendTableForStatsHealthy(db.Name.O, tbl.Name.O, partitionName, h.GetTableStats(tbl)) + if pi != nil { for _, def := range pi.Definitions { e.appendTableForStatsHealthy(db.Name.O, tbl.Name.O, def.Name.O, h.GetPartitionStats(tbl, def.ID)) } diff --git a/executor/show_stats_test.go b/executor/show_stats_test.go index cb8bdebcad8ac..bf15298c33fce 100644 --- a/executor/show_stats_test.go +++ b/executor/show_stats_test.go @@ -201,25 +201,32 @@ func TestShowPartitionStats(t *testing.T) { tk.MustExec("analyze table t") result := tk.MustQuery("show stats_meta") - require.Len(t, result.Rows(), 1) + require.Len(t, result.Rows(), 2) require.Equal(t, "test", result.Rows()[0][0]) require.Equal(t, "t", result.Rows()[0][1]) - require.Equal(t, "p0", result.Rows()[0][2]) + require.Equal(t, "global", result.Rows()[0][2]) + require.Equal(t, "test", result.Rows()[1][0]) + require.Equal(t, "t", result.Rows()[1][1]) + require.Equal(t, "p0", result.Rows()[1][2]) result = tk.MustQuery("show stats_histograms").Sort() - require.Len(t, result.Rows(), 3) - require.Equal(t, "p0", result.Rows()[0][2]) - require.Equal(t, "a", result.Rows()[0][3]) - require.Equal(t, "p0", result.Rows()[1][2]) - require.Equal(t, "b", result.Rows()[1][3]) + require.Len(t, result.Rows(), 5) + require.Equal(t, "global", result.Rows()[0][2]) + require.Equal(t, "b", result.Rows()[0][3]) + require.Equal(t, "global", result.Rows()[1][2]) + require.Equal(t, "idx", result.Rows()[1][3]) require.Equal(t, "p0", result.Rows()[2][2]) - require.Equal(t, "idx", result.Rows()[2][3]) + require.Equal(t, "a", result.Rows()[2][3]) + require.Equal(t, "p0", result.Rows()[3][2]) + require.Equal(t, "b", result.Rows()[3][3]) + require.Equal(t, "p0", result.Rows()[4][2]) + require.Equal(t, "idx", result.Rows()[4][3]) result = tk.MustQuery("show stats_buckets").Sort() - result.Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 b 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) + result.Check(testkit.Rows("test t global b 0 0 1 1 1 1 0", "test t global idx 1 0 1 1 1 1 0", "test t p0 a 0 0 1 1 1 1 0", "test t p0 b 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) result = tk.MustQuery("show stats_healthy") - result.Check(testkit.Rows("test t p0 100")) + result.Check(testkit.Rows("test t global 100", "test t p0 100")) }) } diff --git a/executor/tiflashtest/tiflash_test.go b/executor/tiflashtest/tiflash_test.go index 37bcc7272575e..56bb5fa04eb7f 100644 --- a/executor/tiflashtest/tiflash_test.go +++ b/executor/tiflashtest/tiflash_test.go @@ -457,8 +457,6 @@ func TestTiFlashPartitionTableReader(t *testing.T) { } func TestPartitionTable(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t, withMockTiFlash(2)) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") diff --git a/executor/write_test.go b/executor/write_test.go index ab3ec6813f5c7..0c28d521c61a1 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -21,7 +21,6 @@ import ( "strconv" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" @@ -1192,8 +1191,6 @@ func TestGeneratedColumnForInsert(t *testing.T) { } func TestPartitionedTableReplace(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -1452,8 +1449,6 @@ func TestHashPartitionedTableReplace(t *testing.T) { } func TestPartitionedTableUpdate(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -1724,8 +1719,6 @@ func TestDelete(t *testing.T) { } func TestPartitionedTableDelete(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") createTable := `CREATE TABLE test.t (id int not null default 1, name varchar(255), index(id)) PARTITION BY RANGE ( id ) ( PARTITION p0 VALUES LESS THAN (6), @@ -3244,8 +3237,6 @@ func TestWriteListPartitionTable1(t *testing.T) { // TestWriteListPartitionTable2 test for write list partition when the partition expression is complicated and contain generated column. func TestWriteListPartitionTable2(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -3368,8 +3359,6 @@ func TestWriteListPartitionTable2(t *testing.T) { } func TestWriteListColumnsPartitionTable1(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -3941,8 +3930,6 @@ func testEqualDatumsAsBinary(t *testing.T, a []interface{}, b []interface{}, sam } func TestUpdate(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -4213,8 +4200,6 @@ func TestUpdate(t *testing.T) { } func TestListColumnsPartitionWithGlobalIndex(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") diff --git a/planner/cascades/integration_test.go b/planner/cascades/integration_test.go index ebb80b192a383..e3a7b750373ec 100644 --- a/planner/cascades/integration_test.go +++ b/planner/cascades/integration_test.go @@ -18,7 +18,6 @@ import ( "fmt" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/planner/cascades" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" @@ -376,8 +375,6 @@ func TestTopN(t *testing.T) { } func TestCascadePlannerHashedPartTable(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/planner/core/access_object.go b/planner/core/access_object.go index c9994efe3de86..22c8deb777d50 100644 --- a/planner/core/access_object.go +++ b/planner/core/access_object.go @@ -352,7 +352,7 @@ func (p *BatchPointGetPlan) AccessObject() AccessObject { func getDynamicAccessPartition(sctx sessionctx.Context, tblInfo *model.TableInfo, partitionInfo *PartitionInfo, asName string) (res *DynamicPartitionAccessObject) { pi := tblInfo.GetPartitionInfo() - if pi == nil || !sctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if pi == nil || !sctx.GetSessionVars().UseDynamicPartitionPrune() { return nil } @@ -392,7 +392,7 @@ func getDynamicAccessPartition(sctx sessionctx.Context, tblInfo *model.TableInfo } func (p *PhysicalTableReader) accessObject(sctx sessionctx.Context) AccessObject { - if !sctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { return DynamicPartitionAccessObjects(nil) } if len(p.PartitionInfos) == 0 { @@ -444,7 +444,7 @@ func (p *PhysicalTableReader) accessObject(sctx sessionctx.Context) AccessObject } func (p *PhysicalIndexReader) accessObject(sctx sessionctx.Context) AccessObject { - if !sctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { return DynamicPartitionAccessObjects(nil) } is := p.IndexPlans[0].(*PhysicalIndexScan) @@ -460,7 +460,7 @@ func (p *PhysicalIndexReader) accessObject(sctx sessionctx.Context) AccessObject } func (p *PhysicalIndexLookUpReader) accessObject(sctx sessionctx.Context) AccessObject { - if !sctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { return DynamicPartitionAccessObjects(nil) } ts := p.TablePlans[0].(*PhysicalTableScan) @@ -476,7 +476,7 @@ func (p *PhysicalIndexLookUpReader) accessObject(sctx sessionctx.Context) Access } func (p *PhysicalIndexMergeReader) accessObject(sctx sessionctx.Context) AccessObject { - if !sctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { return DynamicPartitionAccessObjects(nil) } ts := p.TablePlans[0].(*PhysicalTableScan) diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index 31ba6bfeb3e07..d8d46c04d2aaa 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -23,7 +23,6 @@ import ( "strings" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/parser/model" @@ -820,8 +819,6 @@ func TestLimitIndexEstimation(t *testing.T) { } func TestBatchPointGetTablePartition(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) testKit := testkit.NewTestKit(t, store) testKit.MustExec("use test") diff --git a/planner/core/collect_column_stats_usage_test.go b/planner/core/collect_column_stats_usage_test.go index 38d246ff8bfd7..d494235bc66fc 100644 --- a/planner/core/collect_column_stats_usage_test.go +++ b/planner/core/collect_column_stats_usage_test.go @@ -20,7 +20,6 @@ import ( "sort" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/util/hint" @@ -273,8 +272,6 @@ func TestCollectPredicateColumns(t *testing.T) { } func TestCollectHistNeededColumns(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") tests := []struct { pruneMode string sql string diff --git a/planner/core/explain.go b/planner/core/explain.go index 706f96c7a047b..3e5b7e7d1b8df 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -214,7 +214,7 @@ func (p *PhysicalTableScan) OperatorInfo(normalized bool) string { if p.stats.StatsVersion == statistics.PseudoVersion && !normalized { buffer.WriteString(", stats:pseudo") } - if p.StoreType == kv.TiFlash && p.Table.GetPartitionInfo() != nil && p.IsMPPOrBatchCop && p.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if p.StoreType == kv.TiFlash && p.Table.GetPartitionInfo() != nil && p.IsMPPOrBatchCop && p.ctx.GetSessionVars().UseDynamicPartitionPrune() { buffer.WriteString(", PartitionTableScan:true") } return buffer.String() diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index ad66f366b9842..5a9521f577515 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -933,7 +933,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter var hashPartColName *ast.ColumnName if tblInfo := ds.table.Meta(); canConvertPointGet && tblInfo.GetPartitionInfo() != nil { // We do not build [batch] point get for dynamic table partitions now. This can be optimized. - if ds.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if ds.ctx.GetSessionVars().UseDynamicPartitionPrune() { canConvertPointGet = false } if canConvertPointGet && len(path.Ranges) > 1 { diff --git a/planner/core/integration_partition_test.go b/planner/core/integration_partition_test.go index f3e1dc96195f1..3e41d5a48111a 100644 --- a/planner/core/integration_partition_test.go +++ b/planner/core/integration_partition_test.go @@ -23,7 +23,6 @@ import ( "strings" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/planner/core" @@ -110,9 +109,6 @@ func TestListColVariousTypes(t *testing.T) { } func TestListPartitionPruning(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 17d265be0490b..d67c9bff4ef30 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -1478,8 +1478,6 @@ func TestPartitionTableStats(t *testing.T) { } func TestPartitionPruningForInExpr(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1505,8 +1503,6 @@ func TestPartitionPruningForInExpr(t *testing.T) { } func TestPartitionPruningWithDateType(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -3439,8 +3435,6 @@ func TestExplainAnalyzeDML2(t *testing.T) { } func TestPartitionExplain(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -3615,8 +3609,6 @@ func TestPartitionUnionWithPPruningColumn(t *testing.T) { } func TestIssue20139(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -5094,8 +5086,6 @@ func TestIncrementalAnalyzeStatsVer2(t *testing.T) { } func TestConflictReadFromStorage(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -6589,9 +6579,6 @@ func TestIssue32632(t *testing.T) { } func TestTiFlashPartitionTableScan(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") - store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -7253,29 +7240,23 @@ func TestPartitionTableFallBackStatic(t *testing.T) { tk.MustExec("analyze table t2") tk.MustExec("set @@tidb_partition_prune_mode='dynamic'") + rows = [][]interface{}{ + {"TableReader", "partition:all", "data:TableFullScan"}, + {"└─TableFullScan", "table:t", "keep order:false"}, + } // use static plan in dynamic mode due to having not global stats tk.MustQuery("explain format='brief' select * from t").CheckAt([]int{0, 3, 4}, rows) tk.MustExec("analyze table t") // use dynamic plan in dynamic mode with global stats - rows = [][]interface{}{ - {"TableReader", "partition:all", "data:TableFullScan"}, - {"└─TableFullScan", "table:t", "keep order:false"}, - } tk.MustQuery("explain format='brief' select * from t").CheckAt([]int{0, 3, 4}, rows) rows = [][]interface{}{ {"Union", "", ""}, - {"├─PartitionUnion", "", ""}, - {"│ ├─TableReader", "", "data:TableFullScan"}, - {"│ │ └─TableFullScan", "table:t, partition:p0", "keep order:false"}, - {"│ └─TableReader", "", "data:TableFullScan"}, - {"│ └─TableFullScan", "table:t, partition:p1", "keep order:false"}, - {"└─PartitionUnion", "", ""}, - {" ├─TableReader", "", "data:TableFullScan"}, - {" │ └─TableFullScan", "table:t2, partition:p0", "keep order:false"}, - {" └─TableReader", "", "data:TableFullScan"}, - {" └─TableFullScan", "table:t2, partition:p1", "keep order:false"}, + {"├─TableReader", "partition:all", "data:TableFullScan"}, + {"│ └─TableFullScan", "table:t", "keep order:false"}, + {"└─TableReader", "partition:all", "data:TableFullScan"}, + {" └─TableFullScan", "table:t2", "keep order:false"}, } // use static plan in dynamic mode due to t2 has no global stats tk.MustQuery("explain format='brief' select * from t union all select * from t2;").CheckAt([]int{0, 3, 4}, rows) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 462448cfa4fbe..60e24516e9eba 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -26,7 +26,6 @@ import ( "unicode" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" @@ -4181,7 +4180,7 @@ func getStatsTable(ctx sessionctx.Context, tblInfo *model.TableInfo, pid int64) } var statsTbl *statistics.Table - if pid == tblInfo.ID || ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if pid == tblInfo.ID || ctx.GetSessionVars().UseDynamicPartitionPrune() { statsTbl = statsHandle.GetTableStats(tblInfo, handle.WithTableStatsByQuery()) } else { statsTbl = statsHandle.GetPartitionStats(tblInfo, pid, handle.WithTableStatsByQuery()) @@ -4403,28 +4402,9 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as } if tableInfo.GetPartitionInfo() != nil { - h := domain.GetDomain(b.ctx).StatsHandle() - tblStats := h.GetTableStats(tableInfo) - isDynamicEnabled := b.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() - globalStatsReady := tblStats.IsInitialized() - // If dynamic partition prune isn't enabled or global stats is not ready, we won't enable dynamic prune mode in query - usePartitionProcessor := !isDynamicEnabled || !globalStatsReady - - failpoint.Inject("forceDynamicPrune", func(val failpoint.Value) { - if val.(bool) { - if isDynamicEnabled { - usePartitionProcessor = false - } - } - }) - - if usePartitionProcessor { + // Use the new partition implementation, clean up the code here when it's full implemented. + if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { b.optFlag = b.optFlag | flagPartitionProcessor - b.ctx.GetSessionVars().StmtCtx.UseDynamicPruneMode = false - if isDynamicEnabled { - b.ctx.GetSessionVars().StmtCtx.AppendWarning( - fmt.Errorf("disable dynamic pruning due to %s has no global stats", tableInfo.Name.String())) - } } pt := tbl.(table.PartitionedTable) diff --git a/planner/core/partition_pruner_test.go b/planner/core/partition_pruner_test.go index fffef54373bd8..d3db641a8efd9 100644 --- a/planner/core/partition_pruner_test.go +++ b/planner/core/partition_pruner_test.go @@ -21,7 +21,6 @@ import ( "strings" "testing" - "github.com/pingcap/failpoint" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" @@ -30,8 +29,6 @@ import ( ) func TestHashPartitionPruner(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("create database test_partition") @@ -253,8 +250,6 @@ func TestRangeColumnPartitionPruningForInString(t *testing.T) { } func TestListPartitionPruner(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("drop database if exists test_partition;") @@ -326,8 +321,6 @@ func TestListPartitionPruner(t *testing.T) { } func TestListColumnsPartitionPruner(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("set @@session.tidb_enable_list_partition = ON") @@ -539,8 +532,6 @@ func TestListColumnsPartitionPrunerRandom(t *testing.T) { } func TestIssue22635(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("USE test;") diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index 7acb189f3060f..8f741b1977aa0 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -21,7 +21,6 @@ import ( "strings" "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" @@ -1317,8 +1316,6 @@ func doTestPushdownDistinct(t *testing.T, vars, input []string, output []struct } func TestGroupConcatOrderby(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") var ( input []string output []struct { @@ -2127,9 +2124,6 @@ func TestHJBuildAndProbeHint4StaticPartitionTable(t *testing.T) { } func TestHJBuildAndProbeHint4DynamicPartitionTable(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") - var ( input []string output []struct { diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 5b296acf79a4b..d9bfd2b5e659f 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -185,7 +185,7 @@ func (p *PhysicalLimit) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*t // ToPB implements PhysicalPlan ToPB interface. func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) { - if storeType == kv.TiFlash && p.Table.GetPartitionInfo() != nil && p.IsMPPOrBatchCop && p.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if storeType == kv.TiFlash && p.Table.GetPartitionInfo() != nil && p.IsMPPOrBatchCop && p.ctx.GetSessionVars().UseDynamicPartitionPrune() { return p.partitionTableScanToPBForFlash(ctx) } tsExec := tables.BuildTableScanFromInfos(p.Table, p.Columns) diff --git a/planner/core/point_get_plan_test.go b/planner/core/point_get_plan_test.go index 3a92d25719c09..71804ae0fc00e 100644 --- a/planner/core/point_get_plan_test.go +++ b/planner/core/point_get_plan_test.go @@ -21,7 +21,6 @@ import ( "testing" "time" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/planner" @@ -654,8 +653,6 @@ func TestBatchPointGetPartition(t *testing.T) { } func TestBatchPointGetPartitionForAccessObject(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/planner/core/rule_join_reorder_test.go b/planner/core/rule_join_reorder_test.go index a0c136d8d4a2f..a981495807d3d 100644 --- a/planner/core/rule_join_reorder_test.go +++ b/planner/core/rule_join_reorder_test.go @@ -17,7 +17,6 @@ package core_test import ( "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/parser/model" plannercore "github.com/pingcap/tidb/planner/core" @@ -234,8 +233,6 @@ func TestJoinOrderHint4StaticPartitionTable(t *testing.T) { } func TestJoinOrderHint4DynamicPartitionTable(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 81905683dc0ab..bdc1d797ed283 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -318,8 +318,6 @@ type StatementContext struct { StatsLoadStatus map[model.TableItemID]string // IsSyncStatsFailed indicates whether any failure happened during sync stats IsSyncStatsFailed bool - // UseDynamicPruneMode indicates whether use UseDynamicPruneMode in query stmt - UseDynamicPruneMode bool // ColRefFromPlan mark the column ref used by assignment in update statement. ColRefFromUpdatePlan []int64 @@ -1005,11 +1003,6 @@ func (sc *StatementContext) RecordRangeFallback(rangeMaxSize int64) { } } -// UseDynamicPartitionPrune indicates whether dynamic partition is used during the query -func (sc *StatementContext) UseDynamicPartitionPrune() bool { - return sc.UseDynamicPruneMode -} - // CopTasksDetails collects some useful information of cop-tasks during execution. type CopTasksDetails struct { NumCopTasks int diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index db3606baf60b3..19af25d6a4fe0 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -1359,10 +1359,8 @@ func (s *SessionVars) CheckAndGetTxnScope() string { return kv.GlobalTxnScope } -// IsDynamicPartitionPruneEnabled indicates whether dynamic partition prune enabled -// Note that: IsDynamicPartitionPruneEnabled only indicates whether dynamic partition prune mode is enabled according to -// session variable, it isn't guaranteed to be used during query due to other conditions checking. -func (s *SessionVars) IsDynamicPartitionPruneEnabled() bool { +// UseDynamicPartitionPrune indicates whether use new dynamic partition prune. +func (s *SessionVars) UseDynamicPartitionPrune() bool { return PartitionPruneMode(s.PartitionPruneMode.Load()) == Dynamic } diff --git a/statistics/handle/dump_test.go b/statistics/handle/dump_test.go index 0e3006604e5cc..f4bc176c7b300 100644 --- a/statistics/handle/dump_test.go +++ b/statistics/handle/dump_test.go @@ -133,13 +133,19 @@ func TestDumpGlobalStats(t *testing.T) { tk.MustExec("insert into t values (1), (2)") tk.MustExec("analyze table t") - // global-stats is not existed + // global-stats is also generated even in static prune mode stats := getStatsJSON(t, dom, "test", "t") require.NotNil(t, stats.Partitions["p0"]) require.NotNil(t, stats.Partitions["p1"]) + require.NotNil(t, stats.Partitions["global"]) + + tk.MustExec(`DROP STATS t`) + stats = getStatsJSON(t, dom, "test", "t") + require.Nil(t, stats.Partitions["p0"]) + require.Nil(t, stats.Partitions["p1"]) require.Nil(t, stats.Partitions["global"]) - // global-stats is existed + // global-stats is created also for dynamic prune mode tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") tk.MustExec("analyze table t") stats = getStatsJSON(t, dom, "test", "t") diff --git a/statistics/handle/gc_test.go b/statistics/handle/gc_test.go index 66c4df5260a9a..7df0763798ba1 100644 --- a/statistics/handle/gc_test.go +++ b/statistics/handle/gc_test.go @@ -70,23 +70,23 @@ func TestGCPartition(t *testing.T) { testKit.MustExec("insert into t values (1,2),(2,3),(3,4),(4,5),(5,6)") testKit.MustExec("analyze table t") - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("6")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("15")) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("9")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("30")) h := dom.StatsHandle() ddlLease := time.Duration(0) testKit.MustExec("alter table t drop index idx") require.Nil(t, h.GCStats(dom.InfoSchema(), ddlLease)) - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("4")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("10")) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("6")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("20")) testKit.MustExec("alter table t drop column b") require.Nil(t, h.GCStats(dom.InfoSchema(), ddlLease)) - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("2")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("5")) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("3")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("10")) testKit.MustExec("drop table t") require.Nil(t, h.GCStats(dom.InfoSchema(), ddlLease)) - testKit.MustQuery("select count(*) from mysql.stats_meta").Check(testkit.Rows("2")) + testKit.MustQuery("select count(*) from mysql.stats_meta").Check(testkit.Rows("3")) testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("0")) testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("0")) require.Nil(t, h.GCStats(dom.InfoSchema(), ddlLease)) diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index f8f73b933bd9a..335d49c943c3f 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -791,17 +791,18 @@ func TestShowGlobalStats(t *testing.T) { tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2") tk.MustExec("insert into t values (1), (2), (3), (4)") tk.MustExec("analyze table t with 1 buckets") - require.Len(t, tk.MustQuery("show stats_meta").Rows(), 2) - require.Len(t, tk.MustQuery("show stats_meta where partition_name='global'").Rows(), 0) - require.Len(t, tk.MustQuery("show stats_buckets").Rows(), 4) // 2 partitions * (1 for the column_a and 1 for the index_a) - require.Len(t, tk.MustQuery("show stats_buckets where partition_name='global'").Rows(), 0) - require.Len(t, tk.MustQuery("show stats_histograms").Rows(), 4) - require.Len(t, tk.MustQuery("show stats_histograms where partition_name='global'").Rows(), 0) - require.Len(t, tk.MustQuery("show stats_healthy").Rows(), 2) - require.Len(t, tk.MustQuery("show stats_healthy where partition_name='global'").Rows(), 0) + require.Len(t, tk.MustQuery("show stats_meta").Rows(), 3) + require.Len(t, tk.MustQuery("show stats_meta where partition_name='global'").Rows(), 1) + require.Len(t, tk.MustQuery("show stats_buckets").Rows(), 6) + require.Len(t, tk.MustQuery("show stats_buckets where partition_name='global'").Rows(), 2) + require.Len(t, tk.MustQuery("show stats_histograms").Rows(), 6) + require.Len(t, tk.MustQuery("show stats_histograms where partition_name='global'").Rows(), 2) + require.Len(t, tk.MustQuery("show stats_healthy").Rows(), 3) + require.Len(t, tk.MustQuery("show stats_healthy where partition_name='global'").Rows(), 1) tk.MustExec("set @@tidb_analyze_version = 2") tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + tk.MustExec(`drop stats t`) tk.MustExec("analyze table t with 0 topn, 1 buckets") require.Len(t, tk.MustQuery("show stats_meta").Rows(), 3) require.Len(t, tk.MustQuery("show stats_meta where partition_name='global'").Rows(), 1) @@ -828,12 +829,13 @@ func TestBuildGlobalLevelStats(t *testing.T) { testKit.MustExec("create index idx_t_b on t(b);") testKit.MustExec("analyze table t, t1;") result := testKit.MustQuery("show stats_meta where table_name = 't';").Sort() - require.Len(t, result.Rows(), 3) - require.Equal(t, "1", result.Rows()[0][5]) - require.Equal(t, "2", result.Rows()[1][5]) + require.Len(t, result.Rows(), 4) + require.Equal(t, "5", result.Rows()[0][5]) + require.Equal(t, "1", result.Rows()[1][5]) require.Equal(t, "2", result.Rows()[2][5]) + require.Equal(t, "2", result.Rows()[3][5]) result = testKit.MustQuery("show stats_histograms where table_name = 't';").Sort() - require.Len(t, result.Rows(), 15) + require.Len(t, result.Rows(), 20) result = testKit.MustQuery("show stats_meta where table_name = 't1';").Sort() require.Len(t, result.Rows(), 1) @@ -843,6 +845,8 @@ func TestBuildGlobalLevelStats(t *testing.T) { // Test the 'dynamic' mode testKit.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") + testKit.MustExec("drop stats t1") + testKit.MustExec("drop stats t") testKit.MustExec("analyze table t, t1;") result = testKit.MustQuery("show stats_meta where table_name = 't'").Sort() require.Len(t, result.Rows(), 4) @@ -1665,7 +1669,7 @@ partition by range (a) ( tk.MustExec("set @@tidb_partition_prune_mode='static'") tk.MustExec("set @@session.tidb_analyze_version=1") tk.MustExec("analyze table t") // both p0 and p1 are in ver1 - require.Len(t, tk.MustQuery("show stats_meta").Rows(), 2) + require.Len(t, tk.MustQuery("show stats_meta").Rows(), 3) tk.MustExec("set @@tidb_partition_prune_mode='dynamic'") tk.MustExec("set @@session.tidb_analyze_version=1") @@ -2089,16 +2093,16 @@ func TestStaticPartitionPruneMode(t *testing.T) { partition p1 values less than (22))`) tk.MustExec(`insert into t values (1), (2), (3), (10), (11)`) tk.MustExec(`analyze table t`) - require.True(t, tk.MustNoGlobalStats("t")) + require.True(t, tk.MustGlobalStats("t")) tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Dynamic) + "'") - require.True(t, tk.MustNoGlobalStats("t")) + require.True(t, tk.MustGlobalStats("t")) tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Static) + "'") tk.MustExec(`insert into t values (4), (5), (6)`) tk.MustExec(`analyze table t partition p0`) - require.True(t, tk.MustNoGlobalStats("t")) + require.True(t, tk.MustGlobalStats("t")) tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Dynamic) + "'") - require.True(t, tk.MustNoGlobalStats("t")) + require.True(t, tk.MustGlobalStats("t")) tk.MustExec("set @@tidb_partition_prune_mode='" + string(variable.Static) + "'") } @@ -2149,9 +2153,6 @@ func TestAnalyzeWithDynamicPartitionPruneMode(t *testing.T) { } func TestPartitionPruneModeSessionVariable(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") diff --git a/statistics/integration_test.go b/statistics/integration_test.go index c4e4d315c7dbe..deb98f481c5fa 100644 --- a/statistics/integration_test.go +++ b/statistics/integration_test.go @@ -300,8 +300,6 @@ func TestExpBackoffEstimation(t *testing.T) { } func TestGlobalStats(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -344,10 +342,8 @@ func TestGlobalStats(t *testing.T) { // When we set the mode to `static`, using analyze will not report an error and will not generate global-stats. // In addition, when using explain to view the plan of the related query, it was found that `Union` was used. tk.MustExec("analyze table t;") - result := tk.MustQuery("show stats_meta where table_name = 't'").Sort() - require.Len(t, result.Rows(), 2) - require.Equal(t, "2", result.Rows()[0][5]) - require.Equal(t, "3", result.Rows()[1][5]) + tk.MustQuery("show stats_meta where table_name = 't'").Sort().CheckAt([]int{2, 4, 5}, + [][]interface{}{{"global", "0", "5"}, {"p0", "0", "2"}, {"p1", "0", "3"}}) tk.MustQuery("explain format = 'brief' select a from t where a > 3;").Check(testkit.Rows( "PartitionUnion 2.00 root ", "├─IndexReader 1.00 root index:IndexRangeScan", @@ -355,16 +351,15 @@ func TestGlobalStats(t *testing.T) { "└─IndexReader 1.00 root index:IndexRangeScan", " └─IndexRangeScan 1.00 cop[tikv] table:t, partition:p1, index:a(a) range:(3,+inf], keep order:false")) - // When we turned on the switch, we found that pseudo-stats will be used in the plan instead of `Union`. tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") tk.MustQuery("explain format = 'brief' select a from t where a > 3;").Check(testkit.Rows( - "IndexReader 3333.33 root partition:all index:IndexRangeScan", - "└─IndexRangeScan 3333.33 cop[tikv] table:t, index:a(a) range:(3,+inf], keep order:false, stats:pseudo")) + "IndexReader 2.00 root partition:all index:IndexRangeScan", + "└─IndexRangeScan 2.00 cop[tikv] table:t, index:a(a) range:(3,+inf], keep order:false")) // Execute analyze again without error and can generate global-stats. // And when executing related queries, neither Union nor pseudo-stats are used. tk.MustExec("analyze table t;") - result = tk.MustQuery("show stats_meta where table_name = 't'").Sort() + result := tk.MustQuery("show stats_meta where table_name = 't'").Sort() require.Len(t, result.Rows(), 3) require.Equal(t, "5", result.Rows()[0][5]) require.Equal(t, "2", result.Rows()[1][5]) diff --git a/table/tables/partition_test.go b/table/tables/partition_test.go index cc8dd90a44737..f559a5b057175 100644 --- a/table/tables/partition_test.go +++ b/table/tables/partition_test.go @@ -18,7 +18,6 @@ import ( "context" "testing" - "github.com/pingcap/failpoint" mysql "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -392,8 +391,6 @@ func TestLocatePartitionSingleColumn(t *testing.T) { } func TestLocatePartition(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune") store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") diff --git a/testkit/testkit.go b/testkit/testkit.go index 1617b743efa65..460b7a319544f 100644 --- a/testkit/testkit.go +++ b/testkit/testkit.go @@ -463,6 +463,23 @@ func (tk *TestKit) MustNoGlobalStats(table string) bool { return true } +// MustGlobalStats checks if there is no global stats. +func (tk *TestKit) MustGlobalStats(table string) bool { + if !containGlobal(tk.MustQuery("show stats_meta where table_name like '" + table + "'")) { + return false + } + // may be empty + /* + if !containGlobal(tk.MustQuery("show stats_buckets where table_name like '" + table + "'")) { + return false + } + */ + if !containGlobal(tk.MustQuery("show stats_histograms where table_name like '" + table + "'")) { + return false + } + return true +} + // CheckLastMessage checks last message after executing MustExec func (tk *TestKit) CheckLastMessage(msg string) { tk.require.Equal(tk.Session().LastMessage(), msg)