Skip to content

Commit 1b4ac07

Browse files
committed
Merge 10.1 into 10.2
2 parents 7d0d934 + c4eb4bc commit 1b4ac07

File tree

10 files changed

+119
-77
lines changed

10 files changed

+119
-77
lines changed

storage/innobase/buf/buf0lru.cc

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -356,9 +356,10 @@ buf_LRU_drop_page_hash_for_tablespace(
356356
ut_free(page_arr);
357357
}
358358

359-
/** Drop the adaptive hash index for a tablespace.
360-
@param[in,out] table table */
361-
void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
359+
/** Try to drop the adaptive hash index for a tablespace.
360+
@param[in,out] table table
361+
@return whether anything was dropped */
362+
bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
362363
{
363364
for (dict_index_t* index = dict_table_get_first_index(table);
364365
index != NULL;
@@ -369,13 +370,15 @@ void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
369370
}
370371
}
371372

372-
return;
373+
return false;
373374
drop_ahi:
374375
ulint id = table->space;
375376
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
376377
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
377378
id);
378379
}
380+
381+
return true;
379382
}
380383
#endif /* BTR_CUR_HASH_ADAPT */
381384

storage/innobase/dict/dict0dict.cc

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2653,12 +2653,11 @@ dict_index_remove_from_cache_low(
26532653
zero. See also: dict_table_can_be_evicted() */
26542654

26552655
do {
2656-
if (!btr_search_info_get_ref_count(info, index)) {
2656+
if (!btr_search_info_get_ref_count(info, index)
2657+
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
26572658
break;
26582659
}
26592660

2660-
buf_LRU_drop_page_hash_for_tablespace(table);
2661-
26622661
ut_a(++retries < 10000);
26632662
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
26642663
#endif /* BTR_CUR_HASH_ADAPT */

storage/innobase/include/buf0lru.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,11 @@ These are low-level functions
5252

5353
#ifdef BTR_CUR_HASH_ADAPT
5454
struct dict_table_t;
55-
/** Drop the adaptive hash index for a tablespace.
56-
@param[in,out] table table */
57-
void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
55+
/** Try to drop the adaptive hash index for a tablespace.
56+
@param[in,out] table table
57+
@return whether anything was dropped */
58+
bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
59+
MY_ATTRIBUTE((warn_unused_result,nonnull));
5860
#else
5961
# define buf_LRU_drop_page_hash_for_tablespace(table)
6062
#endif /* BTR_CUR_HASH_ADAPT */

storage/innobase/row/row0import.cc

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3886,6 +3886,23 @@ row_import_for_mysql(
38863886
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
38873887
err = DB_TOO_MANY_CONCURRENT_TRXS;);
38883888

3889+
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
3890+
index entries. If we replaced the discarded tablespace with a
3891+
smaller one here, there could still be some adaptive hash
3892+
index entries that point to cached garbage pages in the buffer
3893+
pool, because PageConverter::operator() only evicted those
3894+
pages that were replaced by the imported pages. We must
3895+
discard all remaining adaptive hash index entries, because the
3896+
adaptive hash index must be a subset of the table contents;
3897+
false positives are not tolerated. */
3898+
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
3899+
if (trx_is_interrupted(trx)
3900+
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
3901+
err = DB_INTERRUPTED;
3902+
break;
3903+
}
3904+
}
3905+
38893906
if (err != DB_SUCCESS) {
38903907
char table_name[MAX_FULL_NAME_LEN + 1];
38913908

@@ -3904,17 +3921,6 @@ row_import_for_mysql(
39043921
return(row_import_cleanup(prebuilt, trx, err));
39053922
}
39063923

3907-
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
3908-
index entries. If we replaced the discarded tablespace with a
3909-
smaller one here, there could still be some adaptive hash
3910-
index entries that point to cached garbage pages in the buffer
3911-
pool, because PageConverter::operator() only evicted those
3912-
pages that were replaced by the imported pages. We must
3913-
discard all remaining adaptive hash index entries, because the
3914-
adaptive hash index must be a subset of the table contents;
3915-
false positives are not tolerated. */
3916-
buf_LRU_drop_page_hash_for_tablespace(table);
3917-
39183924
row_mysql_lock_data_dictionary(trx);
39193925

39203926
/* If the table is stored in a remote tablespace, we need to

storage/innobase/row/row0mysql.cc

Lines changed: 29 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ Created 9/17/2000 Heikki Tuuri
6363
#include "trx0rec.h"
6464
#include "trx0roll.h"
6565
#include "trx0undo.h"
66+
#include "srv0start.h"
6667
#include "row0ext.h"
6768
#include "ut0new.h"
6869

@@ -3422,12 +3423,35 @@ row_drop_table_for_mysql(
34223423
/* make sure background stats thread is not running on the table */
34233424
ut_ad(!(table->stats_bg_flag & BG_STAT_IN_PROGRESS));
34243425

3425-
/* Delete the link file if used. */
3426-
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
3427-
RemoteDatafile::delete_link_file(name);
3428-
}
3429-
34303426
if (!dict_table_is_temporary(table)) {
3427+
if (table->space != TRX_SYS_SPACE) {
3428+
/* On DISCARD TABLESPACE, we would not drop the
3429+
adaptive hash index entries. If the tablespace is
3430+
missing here, delete-marking the record in SYS_INDEXES
3431+
would not free any pages in the buffer pool. Thus,
3432+
dict_index_remove_from_cache() would hang due to
3433+
adaptive hash index entries existing in the buffer
3434+
pool. To prevent this hang, and also to guarantee
3435+
that btr_search_drop_page_hash_when_freed() will avoid
3436+
calling btr_search_drop_page_hash_index() while we
3437+
hold the InnoDB dictionary lock, we will drop any
3438+
adaptive hash index entries upfront. */
3439+
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
3440+
if (trx_is_interrupted(trx)
3441+
|| srv_shutdown_state
3442+
!= SRV_SHUTDOWN_NONE) {
3443+
err = DB_INTERRUPTED;
3444+
table->to_be_dropped = false;
3445+
dict_table_close(table, true, false);
3446+
goto funct_exit;
3447+
}
3448+
}
3449+
3450+
/* Delete the link file if used. */
3451+
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
3452+
RemoteDatafile::delete_link_file(name);
3453+
}
3454+
}
34313455

34323456
dict_stats_recalc_pool_del(table);
34333457
dict_stats_defrag_pool_del(table, NULL);
@@ -3626,21 +3650,6 @@ row_drop_table_for_mysql(
36263650
/* As we don't insert entries to SYSTEM TABLES for temp-tables
36273651
we need to avoid running removal of these entries. */
36283652
if (!dict_table_is_temporary(table)) {
3629-
if (table->space != TRX_SYS_SPACE) {
3630-
/* On DISCARD TABLESPACE, we would not drop the
3631-
adaptive hash index entries. If the tablespace is
3632-
missing here, delete-marking the record in SYS_INDEXES
3633-
would not free any pages in the buffer pool. Thus,
3634-
dict_index_remove_from_cache() would hang due to
3635-
adaptive hash index entries existing in the buffer
3636-
pool. To prevent this hang, and also to guarantee
3637-
that btr_search_drop_page_hash_when_freed() will avoid
3638-
calling btr_search_drop_page_hash_index() while we
3639-
hold the InnoDB dictionary lock, we will drop any
3640-
adaptive hash index entries upfront. */
3641-
buf_LRU_drop_page_hash_for_tablespace(table);
3642-
}
3643-
36443653
/* We use the private SQL parser of Innobase to generate the
36453654
query graphs needed in deleting the dictionary data from system
36463655
tables in Innobase. Deleting a row from SYS_INDEXES table also

storage/xtradb/buf/buf0lru.cc

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -354,9 +354,10 @@ buf_LRU_drop_page_hash_for_tablespace(
354354
ut_free(page_arr);
355355
}
356356

357-
/** Drop the adaptive hash index for a tablespace.
358-
@param[in,out] table table */
359-
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
357+
/** Try to drop the adaptive hash index for a tablespace.
358+
@param[in,out] table table
359+
@return whether anything was dropped */
360+
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
360361
{
361362
for (dict_index_t* index = dict_table_get_first_index(table);
362363
index != NULL;
@@ -367,13 +368,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
367368
}
368369
}
369370

370-
return;
371+
return false;
371372
drop_ahi:
372373
ulint id = table->space;
373374
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
374375
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
375376
id);
376377
}
378+
379+
return true;
377380
}
378381

379382
/******************************************************************//**

storage/xtradb/dict/dict0dict.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2729,11 +2729,11 @@ dict_index_remove_from_cache_low(
27292729
zero. See also: dict_table_can_be_evicted() */
27302730

27312731
do {
2732-
if (!btr_search_info_get_ref_count(info, index)) {
2732+
if (!btr_search_info_get_ref_count(info, index)
2733+
|| !buf_LRU_drop_page_hash_for_tablespace(table)) {
27332734
break;
27342735
}
27352736

2736-
buf_LRU_drop_page_hash_for_tablespace(table);
27372737
ut_a(++retries < 10000);
27382738
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
27392739

storage/xtradb/include/buf0lru.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,11 @@ These are low-level functions
5555
/** Minimum LRU list length for which the LRU_old pointer is defined */
5656
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
5757

58-
/** Drop the adaptive hash index for a tablespace.
59-
@param[in,out] table table */
60-
UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
58+
/** Try to drop the adaptive hash index for a tablespace.
59+
@param[in,out] table table
60+
@return whether anything was dropped */
61+
UNIV_INTERN bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
62+
MY_ATTRIBUTE((warn_unused_result,nonnull));
6163

6264
/** Empty the flush list for all pages belonging to a tablespace.
6365
@param[in] id tablespace identifier

storage/xtradb/row/row0import.cc

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3982,6 +3982,23 @@ row_import_for_mysql(
39823982
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
39833983
err = DB_TOO_MANY_CONCURRENT_TRXS;);
39843984

3985+
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
3986+
index entries. If we replaced the discarded tablespace with a
3987+
smaller one here, there could still be some adaptive hash
3988+
index entries that point to cached garbage pages in the buffer
3989+
pool, because PageConverter::operator() only evicted those
3990+
pages that were replaced by the imported pages. We must
3991+
discard all remaining adaptive hash index entries, because the
3992+
adaptive hash index must be a subset of the table contents;
3993+
false positives are not tolerated. */
3994+
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
3995+
if (trx_is_interrupted(trx)
3996+
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
3997+
err = DB_INTERRUPTED;
3998+
break;
3999+
}
4000+
}
4001+
39854002
if (err != DB_SUCCESS) {
39864003
char table_name[MAX_FULL_NAME_LEN + 1];
39874004

@@ -3999,17 +4016,6 @@ row_import_for_mysql(
39994016
return(row_import_cleanup(prebuilt, trx, err));
40004017
}
40014018

4002-
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
4003-
index entries. If we replaced the discarded tablespace with a
4004-
smaller one here, there could still be some adaptive hash
4005-
index entries that point to cached garbage pages in the buffer
4006-
pool, because PageConverter::operator() only evicted those
4007-
pages that were replaced by the imported pages. We must
4008-
discard all remaining adaptive hash index entries, because the
4009-
adaptive hash index must be a subset of the table contents;
4010-
false positives are not tolerated. */
4011-
buf_LRU_drop_page_hash_for_tablespace(table);
4012-
40134019
row_mysql_lock_data_dictionary(trx);
40144020

40154021
/* If the table is stored in a remote tablespace, we need to

storage/xtradb/row/row0mysql.cc

Lines changed: 28 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3540,7 +3540,13 @@ row_truncate_table_for_mysql(
35403540
fil_space_release(space);
35413541
}
35423542

3543-
buf_LRU_drop_page_hash_for_tablespace(table);
3543+
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
3544+
if (trx_is_interrupted(trx)
3545+
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
3546+
err = DB_INTERRUPTED;
3547+
goto funct_exit;
3548+
}
3549+
}
35443550

35453551
if (flags != ULINT_UNDEFINED
35463552
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
@@ -4202,6 +4208,27 @@ row_drop_table_for_mysql(
42024208

42034209
ut_a(!lock_table_has_locks(table));
42044210

4211+
if (table->space != TRX_SYS_SPACE) {
4212+
/* On DISCARD TABLESPACE, we would not drop the
4213+
adaptive hash index entries. If the tablespace is
4214+
missing here, delete-marking the record in SYS_INDEXES
4215+
would not free any pages in the buffer pool. Thus,
4216+
dict_index_remove_from_cache() would hang due to
4217+
adaptive hash index entries existing in the buffer
4218+
pool. To prevent this hang, and also to guarantee
4219+
that btr_search_drop_page_hash_when_freed() will avoid
4220+
calling btr_search_drop_page_hash_index() while we
4221+
hold the InnoDB dictionary lock, we will drop any
4222+
adaptive hash index entries upfront. */
4223+
while (buf_LRU_drop_page_hash_for_tablespace(table)) {
4224+
if (trx_is_interrupted(trx)
4225+
|| srv_shutdown_state != SRV_SHUTDOWN_NONE) {
4226+
err = DB_INTERRUPTED;
4227+
goto funct_exit;
4228+
}
4229+
}
4230+
}
4231+
42054232
switch (trx_get_dict_operation(trx)) {
42064233
case TRX_DICT_OP_NONE:
42074234
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
@@ -4241,21 +4268,6 @@ row_drop_table_for_mysql(
42414268
rw_lock_x_unlock(dict_index_get_lock(index));
42424269
}
42434270

4244-
if (table->space != TRX_SYS_SPACE) {
4245-
/* On DISCARD TABLESPACE, we would not drop the
4246-
adaptive hash index entries. If the tablespace is
4247-
missing here, delete-marking the record in SYS_INDEXES
4248-
would not free any pages in the buffer pool. Thus,
4249-
dict_index_remove_from_cache() would hang due to
4250-
adaptive hash index entries existing in the buffer
4251-
pool. To prevent this hang, and also to guarantee
4252-
that btr_search_drop_page_hash_when_freed() will avoid
4253-
calling btr_search_drop_page_hash_index() while we
4254-
hold the InnoDB dictionary lock, we will drop any
4255-
adaptive hash index entries upfront. */
4256-
buf_LRU_drop_page_hash_for_tablespace(table);
4257-
}
4258-
42594271
/* We use the private SQL parser of Innobase to generate the
42604272
query graphs needed in deleting the dictionary data from system
42614273
tables in Innobase. Deleting a row from SYS_INDEXES table also

0 commit comments

Comments
 (0)