Skip to content

Commit

Permalink
Bug #27753193 ASSERTION `PREBUILT->TRX->ERROR_KEY_NUM <
Browse files Browse the repository at this point in the history
HA_ALTER_INFO->KEY_COUNT'

Problem:
During OPTIMIZE TABLE operation, Simultaneously INSERT duplicate
entry into the table, then alter operation corresponds to optimize
table catches duplicate entry error while applying row logs.
If the table holds innodb generated GEN_CLUST_INDEX and the
duplicate entry error is on last unique column then
ha_innobase::inplace_alter_table() hits assert on
(m_prebuilt->trx->error_key_num < ha_alter_info->key_count).

Fix:
Added a check inside ha_innobase::inplace_alter_table(). If there
is innodb generated GEN_CLUST_INDEX in the table then
m_prebuilt->trx->error_key_num  and ha_alter_info->key_count could
be equal.

RB: 20081
Reviewed by : jimmy.yang@oracle.com
  • Loading branch information
sachinagarwal1111 committed Jul 16, 2018
1 parent 496e613 commit 1452ca7
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 8 deletions.
20 changes: 20 additions & 0 deletions mysql-test/suite/innodb/r/innodb-alter-debug.result
Expand Up @@ -52,3 +52,23 @@ SET DEBUG_SYNC = 'now SIGNAL s2';
/* reap */ alter table t1 add b int, ALGORITHM=inplace;
ERROR 23000: Duplicate entry '1' for key 'uk'
drop table t1;
#
# Bug #27753193 ASSERTION `PREBUILT->TRX->ERROR_KEY_NUM <
# HA_ALTER_INFO->KEY_COUNT'
CREATE TABLE t1 (a INT, UNIQUE KEY(a));
INSERT INTO t1 VALUES (1);
SET DEBUG_SYNC = 'row_log_table_apply1_before signal S1 WAIT_FOR S2';
OPTIMIZE TABLE t1;;
SET DEBUG_SYNC = 'now WAIT_FOR S1';
INSERT INTO t1 VALUES (1);
ERROR 23000: Duplicate entry '1' for key 'a'
SET DEBUG_SYNC = 'now SIGNAL S2';
/* reap */ OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
test.t1 optimize error Duplicate entry '1' for key 'a'
test.t1 optimize status Operation failed
Warnings:
Error 1062 Duplicate entry '1' for key 'a'
SET DEBUG_SYNC='RESET';
DROP TABLE t1;
28 changes: 28 additions & 0 deletions mysql-test/suite/innodb/t/innodb-alter-debug.test
Expand Up @@ -75,3 +75,31 @@ drop table t1;

# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc

--echo #
--echo # Bug #27753193 ASSERTION `PREBUILT->TRX->ERROR_KEY_NUM <
--echo # HA_ALTER_INFO->KEY_COUNT'

CREATE TABLE t1 (a INT, UNIQUE KEY(a));
INSERT INTO t1 VALUES (1);

SET DEBUG_SYNC = 'row_log_table_apply1_before signal S1 WAIT_FOR S2';
--send OPTIMIZE TABLE t1;

CONNECT (con1,localhost,root,,);
CONNECTION con1;
SET DEBUG_SYNC = 'now WAIT_FOR S1';
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES (1);
SET DEBUG_SYNC = 'now SIGNAL S2';

CONNECTION default;
--echo /* reap */ OPTIMIZE TABLE t1;
--reap
DISCONNECT con1;
SET DEBUG_SYNC='RESET';

DROP TABLE t1;

# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
34 changes: 26 additions & 8 deletions storage/innobase/handler/handler0alter.cc
Expand Up @@ -4014,10 +4014,19 @@ ha_innobase::inplace_alter_table(
reporting a bogus duplicate key error. */
dup_key = NULL;
} else {
DBUG_ASSERT(prebuilt->trx->error_key_num
< ha_alter_info->key_count);
dup_key = &ha_alter_info->key_info_buffer[
prebuilt->trx->error_key_num];
/* Check if there is generated cluster index column */
if (ctx->num_to_add_index > ha_alter_info->key_count) {
DBUG_ASSERT(prebuilt->trx->error_key_num
<= ha_alter_info->key_count);
dup_key = &ha_alter_info->key_info_buffer[
prebuilt->trx->error_key_num - 1];
}
else {
DBUG_ASSERT(prebuilt->trx->error_key_num
< ha_alter_info->key_count);
dup_key = &ha_alter_info->key_info_buffer[
prebuilt->trx->error_key_num];
}
}
print_keydup_error(altered_table, dup_key, MYF(0));
break;
Expand Down Expand Up @@ -4938,11 +4947,20 @@ commit_try_rebuild(
FTS_DOC_ID. */
dup_key = NULL;
} else {
DBUG_ASSERT(err_key <
ha_alter_info->key_count);
dup_key = &ha_alter_info
->key_info_buffer[err_key];
if (ctx->num_to_add_index > ha_alter_info->key_count) {
DBUG_ASSERT(err_key <=
ha_alter_info->key_count);
dup_key = &ha_alter_info
->key_info_buffer[err_key - 1];
}
else {
DBUG_ASSERT(err_key <
ha_alter_info->key_count);
dup_key = &ha_alter_info
->key_info_buffer[err_key];
}
}

print_keydup_error(altered_table, dup_key, MYF(0));
DBUG_RETURN(true);
case DB_ONLINE_LOG_TOO_BIG:
Expand Down

0 comments on commit 1452ca7

Please sign in to comment.