Skip to content

Commit

Permalink
MDEV-16515 InnoDB: Failing assertion: ++retries < 10000 in file
Browse files Browse the repository at this point in the history
dict0dict.cc

buf_LRU_drop_page_hash_for_tablespace(): Return whether any adaptive
hash index entries existed. If yes, the caller should keep retrying to
drop the adaptive hash index.

row_import_for_mysql(), row_truncate_table_for_mysql(),
row_drop_table_for_mysql(): Ensure that the adaptive hash index was
entirely dropped for the table.
  • Loading branch information
dr-m committed Jun 26, 2018
1 parent c09a8b5 commit c4eb4bc
Show file tree
Hide file tree
Showing 10 changed files with 118 additions and 73 deletions.
11 changes: 7 additions & 4 deletions storage/innobase/buf/buf0lru.cc
Original file line number Diff line number Diff line change
Expand Up @@ -356,9 +356,10 @@ buf_LRU_drop_page_hash_for_tablespace(
ut_free(page_arr);
}

/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
{
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
Expand All @@ -369,13 +370,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}

return;
return false;
drop_ahi:
ulint id = table->space;
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
id);
}

return true;
}

/******************************************************************//**
Expand Down
5 changes: 2 additions & 3 deletions storage/innobase/dict/dict0dict.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2719,12 +2719,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */

do {
if (!btr_search_info_get_ref_count(info)) {
if (!btr_search_info_get_ref_count(info)
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
break;
}

buf_LRU_drop_page_hash_for_tablespace(table);

ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);

Expand Down
8 changes: 5 additions & 3 deletions storage/innobase/include/buf0lru.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,11 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */

/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
MY_ATTRIBUTE((warn_unused_result,nonnull));

/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
Expand Down
28 changes: 17 additions & 11 deletions storage/innobase/row/row0import.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3983,6 +3983,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);

/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if (trx_is_interrupted(trx)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
break;
}
}

if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];

Expand All @@ -4000,17 +4017,6 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}

/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
buf_LRU_drop_page_hash_for_tablespace(table);

row_mysql_lock_data_dictionary(trx);

/* If the table is stored in a remote tablespace, we need to
Expand Down
44 changes: 28 additions & 16 deletions storage/innobase/row/row0mysql.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3516,7 +3516,13 @@ row_truncate_table_for_mysql(
fil_space_release(space);
}

buf_LRU_drop_page_hash_for_tablespace(table);
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if (trx_is_interrupted(trx)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
goto funct_exit;
}
}

if (flags != ULINT_UNDEFINED
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
Expand Down Expand Up @@ -4172,6 +4178,27 @@ row_drop_table_for_mysql(

ut_a(!lock_table_has_locks(table));

if (table->space != TRX_SYS_SPACE) {
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if (trx_is_interrupted(trx)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
goto funct_exit;
}
}
}

switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
Expand Down Expand Up @@ -4211,21 +4238,6 @@ row_drop_table_for_mysql(
rw_lock_x_unlock(dict_index_get_lock(index));
}

if (table->space != TRX_SYS_SPACE) {
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
buf_LRU_drop_page_hash_for_tablespace(table);
}

/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
Expand Down
11 changes: 7 additions & 4 deletions storage/xtradb/buf/buf0lru.cc
Original file line number Diff line number Diff line change
Expand Up @@ -354,9 +354,10 @@ buf_LRU_drop_page_hash_for_tablespace(
ut_free(page_arr);
}

/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
{
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
Expand All @@ -367,13 +368,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}

return;
return false;
drop_ahi:
ulint id = table->space;
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
id);
}

return true;
}

/******************************************************************//**
Expand Down
4 changes: 2 additions & 2 deletions storage/xtradb/dict/dict0dict.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2729,11 +2729,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */

do {
if (!btr_search_info_get_ref_count(info, index)) {
if (!btr_search_info_get_ref_count(info, index)
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
break;
}

buf_LRU_drop_page_hash_for_tablespace(table);
ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);

Expand Down
8 changes: 5 additions & 3 deletions storage/xtradb/include/buf0lru.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,11 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */

/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
MY_ATTRIBUTE((warn_unused_result,nonnull));

/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
Expand Down
28 changes: 17 additions & 11 deletions storage/xtradb/row/row0import.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3982,6 +3982,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);

/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if (trx_is_interrupted(trx)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
break;
}
}

if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];

Expand All @@ -3999,17 +4016,6 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}

/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
buf_LRU_drop_page_hash_for_tablespace(table);

row_mysql_lock_data_dictionary(trx);

/* If the table is stored in a remote tablespace, we need to
Expand Down
44 changes: 28 additions & 16 deletions storage/xtradb/row/row0mysql.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3540,7 +3540,13 @@ row_truncate_table_for_mysql(
fil_space_release(space);
}

buf_LRU_drop_page_hash_for_tablespace(table);
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if (trx_is_interrupted(trx)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
goto funct_exit;
}
}

if (flags != ULINT_UNDEFINED
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
Expand Down Expand Up @@ -4202,6 +4208,27 @@ row_drop_table_for_mysql(

ut_a(!lock_table_has_locks(table));

if (table->space != TRX_SYS_SPACE) {
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
if (trx_is_interrupted(trx)
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
err = DB_INTERRUPTED;
goto funct_exit;
}
}
}

switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
Expand Down Expand Up @@ -4241,21 +4268,6 @@ row_drop_table_for_mysql(
rw_lock_x_unlock(dict_index_get_lock(index));
}

if (table->space != TRX_SYS_SPACE) {
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
buf_LRU_drop_page_hash_for_tablespace(table);
}

/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
Expand Down

0 comments on commit c4eb4bc

Please sign in to comment.