Skip to content

Commit

Permalink
MDEV-24353: Adding GROUP BY slows down a query
Browse files Browse the repository at this point in the history
A heuristic in best_access_path says that if for an index
ref access involved key parts which are greater than equal to that
for range access, then range access should not be considered.
The assumption made by this heuristic does not hold when
the range optimizer opted to use the group-by min-max optimization.
So the fix here would be to not consider the heuristic if
the range optimizer picked the usage of group-by min-max
optimization.
  • Loading branch information
Varun Gupta committed Dec 11, 2020
1 parent be4d266 commit d79c3f3
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 3 deletions.
23 changes: 22 additions & 1 deletion mysql-test/main/group_min_max.result
Original file line number Diff line number Diff line change
Expand Up @@ -2664,7 +2664,7 @@ a b
3 13
explain extended select sql_buffer_result a, max(b)+1 from t1 where a = 0 group by a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref a,index a 5 const 15 20.00 Using index; Using temporary
1 SIMPLE t1 range a,index a 5 NULL 3 100.00 Using where; Using index for group-by; Using temporary
Warnings:
Note 1003 select sql_buffer_result `test`.`t1`.`a` AS `a`,max(`test`.`t1`.`b`) + 1 AS `max(b)+1` from `test`.`t1` where `test`.`t1`.`a` = 0 group by `test`.`t1`.`a`
drop table t1;
Expand Down Expand Up @@ -4027,3 +4027,24 @@ drop table t1;
#
# End of 10.1 tests
#
#
# MDEV-24353: Adding GROUP BY slows down a query
#
CREATE TABLE t1 (p int NOT NULL, a int NOT NULL, PRIMARY KEY (p,a));
insert into t1 select 2,seq from seq_0_to_1000;
EXPLAIN select MIN(a) from t1 where p = 2 group by p;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 10 Using where; Using index for group-by
SELECT MIN(a) from t1 where p = 2 group by p;
MIN(a)
0
EXPLAIN select MIN(a) from t1 group by p;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL PRIMARY 4 NULL 10 Using index for group-by
SELECT MIN(a) from t1 where p = 2;
MIN(a)
0
drop table t1;
#
# End of 10.6 tests
#
18 changes: 18 additions & 0 deletions mysql-test/main/group_min_max.test
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#

--source include/default_optimizer_switch.inc
--source include/have_sequence.inc

#
# TODO:
Expand Down Expand Up @@ -1689,3 +1690,20 @@ drop table t1;
--echo #
--echo # End of 10.1 tests
--echo #

--echo #
--echo # MDEV-24353: Adding GROUP BY slows down a query
--echo #

CREATE TABLE t1 (p int NOT NULL, a int NOT NULL, PRIMARY KEY (p,a));
insert into t1 select 2,seq from seq_0_to_1000;

EXPLAIN select MIN(a) from t1 where p = 2 group by p;
SELECT MIN(a) from t1 where p = 2 group by p;
EXPLAIN select MIN(a) from t1 group by p;
SELECT MIN(a) from t1 where p = 2;
drop table t1;

--echo #
--echo # End of 10.6 tests
--echo #
7 changes: 5 additions & 2 deletions sql/sql_select.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7926,7 +7926,8 @@ best_access_path(JOIN *join,
access is to use the same index IDX, with the same or more key parts.
(note: it is not clear how this rule is/should be extended to
index_merge quick selects). Also if we have a hash join we prefer that
over a table scan
over a table scan. This heuristic doesn't apply if the quick select
uses the group-by min-max optimization.
(3) See above note about InnoDB.
(4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access
path, but there is no quick select)
Expand All @@ -7944,7 +7945,9 @@ best_access_path(JOIN *join,
Json_writer_object trace_access_scan(thd);
if ((records >= s->found_records || best > s->read_time) && // (1)
!(best_key && best_key->key == MAX_KEY) && // (2)
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
!(s->quick &&
s->quick->get_type() != QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX && // (2)
best_key && s->quick->index == best_key->key && // (2)
best_max_key_part >= s->table->opt_range[best_key->key].key_parts) &&// (2)
!((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
! s->table->covering_keys.is_clear_all() && best_key && !s->quick) &&// (3)
Expand Down

0 comments on commit d79c3f3

Please sign in to comment.