Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

planner, executor: support index merge's order prop push down at the normal way #43881

Merged
merged 16 commits into from Aug 1, 2023
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
63 changes: 32 additions & 31 deletions cmd/explaintest/r/index_merge.result
Expand Up @@ -390,15 +390,15 @@ Delete_11 N/A root N/A
└─Sort_15 4056.68 root test.t1.c1
└─SelectLock_17 4056.68 root for update 0
└─HashJoin_33 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)]
├─HashAgg_36(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1
│ └─IndexMerge_45 2248.30 root type: union
│ ├─IndexRangeScan_41(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
│ ├─IndexRangeScan_42(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
│ └─Selection_44(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10)))
│ └─TableRowIDScan_43 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_48(Probe) 9990.00 root data:Selection_47
└─Selection_47 9990.00 cop[tikv] not(isnull(test.t1.c1))
└─TableFullScan_46 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
├─HashAgg_35(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1
│ └─IndexMerge_41 2248.30 root type: union
│ ├─IndexRangeScan_37(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
│ ├─IndexRangeScan_38(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
│ └─Selection_40(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10)))
│ └─TableRowIDScan_39 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_49(Probe) 9990.00 root data:Selection_48
└─Selection_48 9990.00 cop[tikv] not(isnull(test.t1.c1))
└─TableFullScan_47 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1;
select * from t1;
c1 c2 c3
Expand All @@ -408,15 +408,15 @@ id estRows task access object operator info
Update_10 N/A root N/A
└─SelectLock_14 4056.68 root for update 0
└─HashJoin_30 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)]
├─HashAgg_33(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1
│ └─IndexMerge_42 2248.30 root type: union
│ ├─IndexRangeScan_38(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
│ ├─IndexRangeScan_39(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
│ └─Selection_41(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10)))
│ └─TableRowIDScan_40 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_45(Probe) 9990.00 root data:Selection_44
└─Selection_44 9990.00 cop[tikv] not(isnull(test.t1.c1))
└─TableFullScan_43 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
├─HashAgg_32(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1
│ └─IndexMerge_38 2248.30 root type: union
│ ├─IndexRangeScan_34(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
│ ├─IndexRangeScan_35(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
│ └─Selection_37(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10)))
│ └─TableRowIDScan_36 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
└─TableReader_46(Probe) 9990.00 root data:Selection_45
└─Selection_45 9990.00 cop[tikv] not(isnull(test.t1.c1))
└─TableFullScan_44 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10);
select * from t1;
c1 c2 c3
Expand Down Expand Up @@ -469,26 +469,27 @@ create table t1(c1 int, c2 int, c3 int, key(c1), key(c2));
insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5);
explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and c3 < 10 order by 1 limit 1 offset 2;
id estRows task access object operator info
TopN_10 1.00 root test.t1.c1, offset:2, count:1
└─IndexMerge_23 1841.86 root type: union
├─IndexRangeScan_19(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
├─IndexRangeScan_20(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
└─Selection_22(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10)
└─TableRowIDScan_21 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
TopN_9 1.00 root test.t1.c1, offset:2, count:1
└─IndexMerge_18 3.00 root type: union
├─IndexRangeScan_13(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
├─IndexRangeScan_14(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
└─TopN_17(Probe) 3.00 cop[tikv] test.t1.c1, offset:0, count:3
└─Selection_16 1841.86 cop[tikv] lt(test.t1.c3, 10)
└─TableRowIDScan_15 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and c3 < 10 order by 1 limit 1 offset 2;
c1 c2 c3
3 3 3
///// GROUP BY
explain select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < 10) and c3 < 10 group by c1 order by 1;
id estRows task access object operator info
Sort_6 1473.49 root Column#5
└─HashAgg_11 1473.49 root group by:Column#13, funcs:sum(Column#12)->Column#5
└─Projection_22 1841.86 root cast(test.t1.c1, decimal(10,0) BINARY)->Column#12, test.t1.c1->Column#13
└─IndexMerge_20 1841.86 root type: union
├─IndexRangeScan_16(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
├─IndexRangeScan_17(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
└─Selection_19(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10)
└─TableRowIDScan_18 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
└─HashAgg_10 1473.49 root group by:Column#13, funcs:sum(Column#12)->Column#5
└─Projection_23 1841.86 root cast(test.t1.c1, decimal(10,0) BINARY)->Column#12, test.t1.c1->Column#13
└─IndexMerge_16 1841.86 root type: union
├─IndexRangeScan_12(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo
├─IndexRangeScan_13(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo
└─Selection_15(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10)
└─TableRowIDScan_14 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo
select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < 10) and c3 < 10 group by c1 order by 1;
sum(c1)
1
Expand Down
2 changes: 1 addition & 1 deletion executor/builder.go
Expand Up @@ -4191,7 +4191,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd
dataReaderBuilder: readerBuilder,
feedbacks: feedbacks,
paging: paging,
handleCols: ts.HandleCols,
handleCols: v.HandleCols,
isCorColInPartialFilters: isCorColInPartialFilters,
isCorColInTableFilter: isCorColInTableFilter,
isCorColInPartialAccess: isCorColInPartialAccess,
Expand Down
72 changes: 56 additions & 16 deletions executor/index_merge_reader.go
Expand Up @@ -150,7 +150,7 @@
// parTblIdx are only used in indexMergeProcessWorker.fetchLoopIntersection.
parTblIdx int

// partialPlanID are only used for indexMergeProcessWorker.fetchLoopUnionWithOrderByAndPushedLimit.
// partialPlanID are only used for indexMergeProcessWorker.fetchLoopUnionWithOrderBy.
partialPlanID int
}

Expand Down Expand Up @@ -296,9 +296,12 @@
util.WithRecovery(
func() {
if e.isIntersection {
if e.pushedLimit != nil || e.keepOrder {
panic("Not support intersection with pushedLimit or keepOrder = true")

Check warning on line 300 in executor/index_merge_reader.go

View check run for this annotation

Codecov / codecov/patch

executor/index_merge_reader.go#L300

Added line #L300 was not covered by tests
}
idxMergeProcessWorker.fetchLoopIntersection(ctx, fetch, workCh, e.resultCh, e.finished)
} else if e.pushedLimit != nil && len(e.byItems) != 0 {
idxMergeProcessWorker.fetchLoopUnionWithOrderByAndPushedLimit(ctx, fetch, workCh, e.resultCh, e.finished)
} else if len(e.byItems) != 0 {
idxMergeProcessWorker.fetchLoopUnionWithOrderBy(ctx, fetch, workCh, e.resultCh, e.finished)
} else {
idxMergeProcessWorker.fetchLoopUnion(ctx, fetch, workCh, e.resultCh, e.finished)
}
Expand Down Expand Up @@ -340,6 +343,7 @@
util.WithRecovery(
func() {
failpoint.Inject("testIndexMergePanicPartialIndexWorker", nil)
is := e.partialPlans[workID][0].(*plannercore.PhysicalIndexScan)
worker := &partialIndexWorker{
stats: e.stats,
idxID: e.getPartitalPlanID(workID),
Expand All @@ -352,10 +356,9 @@
memTracker: e.memTracker,
partitionTableMode: e.partitionTableMode,
prunedPartitions: e.prunedPartitions,
byItems: e.byItems,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like we don't need to change here.
Because we will set this in PhysicalIndexMergeReader.ByItems (Init()) then IndexMergeReaderExecutor.byItems (buildNoRangeIndexMergeReader), so finally they are the same.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I want to do the remained part in the later pull.

byItems: is.ByItems,
pushedLimit: e.pushedLimit,
}

if e.isCorColInPartialFilters[workID] {
// We got correlated column, so need to refresh Selection operator.
var err error
Expand Down Expand Up @@ -470,7 +473,7 @@
ranges: e.ranges[workID],
netDataSize: e.partialNetDataSizes[workID],
keepOrder: ts.KeepOrder,
byItems: e.byItems,
byItems: ts.ByItems,
}

worker := &partialTableWorker{
Expand All @@ -483,7 +486,7 @@
memTracker: e.memTracker,
partitionTableMode: e.partitionTableMode,
prunedPartitions: e.prunedPartitions,
byItems: e.byItems,
byItems: ts.ByItems,
pushedLimit: e.pushedLimit,
}

Expand Down Expand Up @@ -923,7 +926,9 @@
}

type handleHeap struct {
// requiredCnt == 0 means need all handles
requiredCnt uint64
tracker *memory.Tracker
taskMap map[int][]*indexMergeTableTask

idx []rowIdx
Expand Down Expand Up @@ -960,23 +965,34 @@
func (h *handleHeap) Push(x interface{}) {
idx := x.(rowIdx)
h.idx = append(h.idx, idx)
if h.tracker != nil {
h.tracker.Consume(int64(unsafe.Sizeof(h.idx)))
}
}

func (h *handleHeap) Pop() interface{} {
idxRet := h.idx[len(h.idx)-1]
h.idx = h.idx[:len(h.idx)-1]
if h.tracker != nil {
h.tracker.Consume(-int64(unsafe.Sizeof(h.idx)))
}
return idxRet
}

func (w *indexMergeProcessWorker) NewHandleHeap(taskMap map[int][]*indexMergeTableTask) *handleHeap {
func (w *indexMergeProcessWorker) NewHandleHeap(taskMap map[int][]*indexMergeTableTask, memTracker *memory.Tracker) *handleHeap {
compareFuncs := make([]chunk.CompareFunc, 0, len(w.indexMerge.byItems))
for _, item := range w.indexMerge.byItems {
keyType := item.Expr.GetType()
compareFuncs = append(compareFuncs, chunk.GetCompareFunc(keyType))
}
requiredCnt := w.indexMerge.pushedLimit.Count + w.indexMerge.pushedLimit.Offset

requiredCnt := uint64(0)
if w.indexMerge.pushedLimit != nil {
requiredCnt = mathutil.Max(requiredCnt, w.indexMerge.pushedLimit.Count+w.indexMerge.pushedLimit.Offset)
}
return &handleHeap{
requiredCnt: requiredCnt,
tracker: memTracker,
taskMap: taskMap,
idx: make([]rowIdx, 0, requiredCnt),
compareFunc: compareFuncs,
Expand All @@ -1003,7 +1019,7 @@
}
}

func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderByAndPushedLimit(ctx context.Context, fetchCh <-chan *indexMergeTableTask,
func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderBy(ctx context.Context, fetchCh <-chan *indexMergeTableTask,
workCh chan<- *indexMergeTableTask, resultCh chan<- *indexMergeTableTask, finished <-chan struct{}) {
memTracker := memory.NewTracker(w.indexMerge.ID(), -1)
memTracker.AttachTo(w.indexMerge.memTracker)
Expand All @@ -1020,8 +1036,7 @@
distinctHandles := kv.NewHandleMap()
taskMap := make(map[int][]*indexMergeTableTask)
uselessMap := make(map[int]struct{})
taskHeap := w.NewHandleHeap(taskMap)
memTracker.Consume(int64(taskHeap.requiredCnt) * int64(unsafe.Sizeof(rowIdx{0, 0, 0})))
taskHeap := w.NewHandleHeap(taskMap, memTracker)

for task := range fetchCh {
select {
Expand All @@ -1037,15 +1052,15 @@
continue
}
if _, ok := taskMap[task.partialPlanID]; !ok {
taskMap[task.partialPlanID] = make([]*indexMergeTableTask, 0)
taskMap[task.partialPlanID] = make([]*indexMergeTableTask, 0, 1)
}
w.pruneTableWorkerTaskIdxRows(task)
taskMap[task.partialPlanID] = append(taskMap[task.partialPlanID], task)
for i, h := range task.handles {
if _, ok := distinctHandles.Get(h); !ok {
distinctHandles.Set(h, true)
heap.Push(taskHeap, rowIdx{task.partialPlanID, len(taskMap[task.partialPlanID]) - 1, i})
if taskHeap.Len() > int(taskHeap.requiredCnt) {
if int(taskHeap.requiredCnt) != 0 && taskHeap.Len() > int(taskHeap.requiredCnt) {
top := heap.Pop(taskHeap).(rowIdx)
if top.partialID == task.partialPlanID && top.taskID == len(taskMap[task.partialPlanID])-1 && top.rowID == i {
uselessMap[task.partialPlanID] = struct{}{}
Expand All @@ -1067,7 +1082,10 @@
}
}

needCount := mathutil.Max(0, taskHeap.Len()-int(w.indexMerge.pushedLimit.Offset))
needCount := taskHeap.Len()
if w.indexMerge.pushedLimit != nil {
needCount = mathutil.Max(0, taskHeap.Len()-int(w.indexMerge.pushedLimit.Offset))
}
if needCount == 0 {
return
}
Expand Down Expand Up @@ -1125,10 +1143,17 @@
defer close(workCh)
failpoint.Inject("testIndexMergePanicProcessWorkerUnion", nil)

var pushedLimit *plannercore.PushedDownLimit
if w.indexMerge.pushedLimit != nil {
pushedLimit = w.indexMerge.pushedLimit.Clone()
}
distinctHandles := make(map[int64]*kv.HandleMap)
for {
var ok bool
var task *indexMergeTableTask
if pushedLimit != nil && pushedLimit.Count == 0 {
return
}
select {
case <-ctx.Done():
return
Expand Down Expand Up @@ -1175,6 +1200,21 @@
if len(fhs) == 0 {
continue
}
if pushedLimit != nil {
fhsLen := uint64(len(fhs))
if fhsLen <= pushedLimit.Offset {
winoros marked this conversation as resolved.
Show resolved Hide resolved
pushedLimit.Offset -= fhsLen
continue

Check warning on line 1207 in executor/index_merge_reader.go

View check run for this annotation

Codecov / codecov/patch

executor/index_merge_reader.go#L1206-L1207

Added lines #L1206 - L1207 were not covered by tests
}
fhs = fhs[pushedLimit.Offset:]
pushedLimit.Offset = 0

fhsLen = uint64(len(fhs))
if fhsLen > pushedLimit.Count {
winoros marked this conversation as resolved.
Show resolved Hide resolved
fhs = fhs[:pushedLimit.Count]
}

Check warning on line 1215 in executor/index_merge_reader.go

View check run for this annotation

Codecov / codecov/patch

executor/index_merge_reader.go#L1214-L1215

Added lines #L1214 - L1215 were not covered by tests
pushedLimit.Count -= mathutil.Min(pushedLimit.Count, fhsLen)
}
winoros marked this conversation as resolved.
Show resolved Hide resolved
task = &indexMergeTableTask{
lookupTableTask: lookupTableTask{
handles: fhs,
Expand Down Expand Up @@ -1748,7 +1788,7 @@
if err != nil {
return err
}
if physicalTableIDIdx != -1 {
if w.indexMergeExec.partitionTableMode && physicalTableIDIdx != -1 {
handle = kv.NewPartitionHandle(row.GetInt64(physicalTableIDIdx), handle)
}
rowIdx, _ := task.indexOrder.Get(handle)
Expand Down
2 changes: 1 addition & 1 deletion executor/test/indexmergereadtest/BUILD.bazel
Expand Up @@ -9,7 +9,7 @@ go_test(
],
flaky = True,
race = "on",
shard_count = 30,
shard_count = 32,
deps = [
"//config",
"//meta/autoid",
Expand Down
50 changes: 50 additions & 0 deletions executor/test/indexmergereadtest/index_merge_reader_test.go
Expand Up @@ -1164,3 +1164,53 @@ func TestProcessInfoRaceWithIndexScan(t *testing.T) {
}
wg.Wait()
}

func TestIndexMergeLimitNotPushedOnPartialSideButKeepOrder(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, b int, c int, index idx(a, c), index idx2(b, c), index idx3(a, b, c))")
valsInsert := make([]string, 0, 1000)
for i := 0; i < 1000; i++ {
valsInsert = append(valsInsert, fmt.Sprintf("(%v, %v, %v)", rand.Intn(100), rand.Intn(100), rand.Intn(100)))
}
tk.MustExec("analyze table t")
tk.MustExec("insert into t values " + strings.Join(valsInsert, ","))
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceIndexMergeKeepOrder", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceIndexMergeKeepOrder")
for i := 0; i < 100; i++ {
valA, valB, valC, offset := rand.Intn(100), rand.Intn(100), rand.Intn(50), rand.Intn(100)+1
winoros marked this conversation as resolved.
Show resolved Hide resolved
maxEle := tk.MustQuery(fmt.Sprintf("select ifnull(max(c), 100) from (select c from t use index(idx3) where (a = %d or b = %d) and c >= %d order by c limit %d) t", valA, valB, valC, offset)).Rows()[0][0]
queryWithIndexMerge := fmt.Sprintf("select /*+ USE_INDEX_MERGE(t, idx, idx2) */ * from t where (a = %d or b = %d) and c >= %d and c < greatest(%d, %v) order by c limit %d", valA, valB, valC, valC+1, maxEle, offset)
winoros marked this conversation as resolved.
Show resolved Hide resolved
queryWithNormalIndex := fmt.Sprintf("select * from t use index(idx3) where (a = %d or b = %d) and c >= %d and c < greatest(%d, %v) order by c limit %d", valA, valB, valC, valC+1, maxEle, offset)
require.True(t, tk.HasPlan(queryWithIndexMerge, "IndexMerge"))
require.True(t, tk.HasPlan(queryWithIndexMerge, "Limit"))
normalResult := tk.MustQuery(queryWithNormalIndex).Sort().Rows()
tk.MustQuery(queryWithIndexMerge).Sort().Check(normalResult)
}
for i := 0; i < 100; i++ {
valA, valB, valC, limit, offset := rand.Intn(100), rand.Intn(100), rand.Intn(50), rand.Intn(100)+1, rand.Intn(20)
maxEle := tk.MustQuery(fmt.Sprintf("select ifnull(max(c), 100) from (select c from t use index(idx3) where (a = %d or b = %d) and c >= %d order by c limit %d offset %d) t", valA, valB, valC, limit, offset)).Rows()[0][0]
queryWithIndexMerge := fmt.Sprintf("select /*+ USE_INDEX_MERGE(t, idx, idx2) */ c from t where (a = %d or b = %d) and c >= %d and c < greatest(%d, %v) order by c limit %d offset %d", valA, valB, valC, valC+1, maxEle, limit, offset)
queryWithNormalIndex := fmt.Sprintf("select c from t use index(idx3) where (a = %d or b = %d) and c >= %d and c < greatest(%d, %v) order by c limit %d offset %d", valA, valB, valC, valC+1, maxEle, limit, offset)
require.True(t, tk.HasPlan(queryWithIndexMerge, "IndexMerge"))
require.True(t, tk.HasPlan(queryWithIndexMerge, "Limit"))
normalResult := tk.MustQuery(queryWithNormalIndex).Sort().Rows()
tk.MustQuery(queryWithIndexMerge).Sort().Check(normalResult)
}
}

func TestIndexMergeNoOrderLimitPushed(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, b int, c int, index idx(a, c), index idx2(b, c))")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2)")
sql := "select /*+ USE_INDEX_MERGE(t, idx, idx2) */ * from t where a = 1 or b = 1 limit 1"
winoros marked this conversation as resolved.
Show resolved Hide resolved
require.True(t, tk.HasPlan(sql, "IndexMerge"))
require.True(t, tk.HasPlan(sql, "Limit"))
// 6 means that IndexMerge(embedded limit){Limit->PartialIndexScan, Limit->PartialIndexScan, FinalTableScan}
require.Equal(t, 6, len(tk.MustQuery("explain "+sql).Rows()))
// The result is not stable. So we just check that it can run successfully.
tk.MustQuery(sql)
}