Skip to content

Commit

Permalink
MDEV-14511 Use fewer transactions for updating InnoDB persistent stat…
Browse files Browse the repository at this point in the history
…istics

dict_stats_exec_sql(): Expect the caller to always provide a transaction.
Remove some redundant assertions. The caller must hold dict_sys->mutex,
but holding dict_operation_lock is only necessary for accessing
data dictionary tables, which we are not accessing.

dict_stats_save_index_stat(): Acquire dict_sys->mutex
for invoking dict_stats_exec_sql().

dict_stats_save(), dict_stats_update_for_index(), dict_stats_update(),
dict_stats_drop_index(), dict_stats_delete_from_table_stats(),
dict_stats_delete_from_index_stats(), dict_stats_drop_table(),
dict_stats_rename_in_table_stats(), dict_stats_rename_in_index_stats(),
dict_stats_rename_table(): Use a single caller-provided
transaction that is started and committed or rolled back by the caller.

dict_stats_process_entry_from_recalc_pool(): Let the caller provide
a transaction object.

ha_innobase::open(): Pass a transaction to dict_stats_init().

ha_innobase::create(), ha_innobase::discard_or_import_tablespace():
Pass a transaction to dict_stats_update().

ha_innobase::rename_table(): Pass a transaction to
dict_stats_rename_table(). We do not use the same transaction
as the one that updated the data dictionary tables, because
we already released the dict_operation_lock. (FIXME: there is
a race condition; a lock wait on SYS_* tables could occur
in another DDL transaction until the data dictionary transaction
is committed.)

ha_innobase::info_low(): Pass a transaction to dict_stats_update()
when calculating persistent statistics.

alter_stats_norebuild(), alter_stats_rebuild(): Update the
persistent statistics as well. In this way, a single transaction
will be used for updating the statistics of a whole table, even
for partitioned tables.

ha_innobase::commit_inplace_alter_table(): Drop statistics for
all partitions when adding or dropping virtual columns, so that
the statistics will be recalculated on the next handler::open().
This is a refactored version of Oracle Bug#22469660 fix.

RecLock::add_to_waitq(), lock_table_enqueue_waiting():
Do not allow a lock wait to occur for updating statistics
in a data dictionary transaction, such as DROP TABLE. Instead,
return the previously unused error code DB_QUE_THR_SUSPENDED.

row_merge_lock_table(), row_mysql_lock_table(): Remove dead code
for handling DB_QUE_THR_SUSPENDED.

row_drop_table_for_mysql(), row_truncate_table_for_mysql():
Drop the statistics as part of the data dictionary transaction.
After TRUNCATE TABLE, the statistics will be recalculated on
subsequent ha_innobase::open(), similar to how the logic after
the above-mentioned Oracle Bug#22469660 fix in
ha_innobase::commit_inplace_alter_table() works.

btr_defragment_thread(): Use a single transaction object for
updating defragmentation statistics.

dict_stats_save_defrag_stats(), dict_stats_save_defrag_stats(),
dict_stats_process_entry_from_defrag_pool(),
dict_defrag_process_entries_from_defrag_pool(),
dict_stats_save_defrag_summary(), dict_stats_save_defrag_stats():
Add a parameter for the transaction.

dict_stats_empty_table(): Make public. This will be called by
row_truncate_table_for_mysql() after dropping persistent statistics,
to clear the memory-based statistics as well.
  • Loading branch information
dr-m committed Dec 6, 2017
1 parent 2c1e4d4 commit 7dc6066
Show file tree
Hide file tree
Showing 15 changed files with 627 additions and 690 deletions.
12 changes: 12 additions & 0 deletions mysql-test/suite/innodb/r/innodb_stats_debug.result
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
call mtr.add_suppression("InnoDB: Cannot save (table|index) statistics for table `test`\\.`t1`.*: Persistent statistics do not exist");
CREATE TABLE t1 (a INT, KEY(a)) ENGINE=INNODB STATS_PERSISTENT=1;
SET @save_debug= @@SESSION.debug_dbug;
SET debug_dbug= '+d,stats_index_error';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Operation failed
SET debug_dbug= @save_debug;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
DROP TABLE t1;
13 changes: 13 additions & 0 deletions mysql-test/suite/innodb/t/innodb_stats_debug.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
--source include/have_innodb.inc
--source include/have_debug.inc

call mtr.add_suppression("InnoDB: Cannot save (table|index) statistics for table `test`\\.`t1`.*: Persistent statistics do not exist");

CREATE TABLE t1 (a INT, KEY(a)) ENGINE=INNODB STATS_PERSISTENT=1;
SET @save_debug= @@SESSION.debug_dbug;
SET debug_dbug= '+d,stats_index_error';
ANALYZE TABLE t1;
SET debug_dbug= @save_debug;
ANALYZE TABLE t1;

DROP TABLE t1;
2 changes: 1 addition & 1 deletion mysql-test/suite/innodb/t/innodb_stats_drop_locked.test
Original file line number Diff line number Diff line change
Expand Up @@ -57,5 +57,5 @@ SELECT table_name FROM mysql.innodb_index_stats
WHERE table_name='innodb_stats_drop_locked';

--disable_query_log
call mtr.add_suppression("Unable to delete statistics for table test.innodb_stats_drop_locked: Lock wait timeout. They can be deleted later using DELETE FROM mysql.innodb_index_stats WHERE database_name");
call mtr.add_suppression("Unable to delete statistics for table test\\.innodb_stats_drop_locked: Lock wait");
--enable_query_log
32 changes: 18 additions & 14 deletions storage/innobase/btr/btr0defragment.cc
Original file line number Diff line number Diff line change
Expand Up @@ -751,6 +751,8 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
buf_block_t* first_block;
buf_block_t* last_block;

trx_t* trx = trx_allocate_for_background();

while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
ut_ad(btr_defragment_thread_active);

Expand Down Expand Up @@ -826,31 +828,33 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
/* Update the last_processed time of this index. */
item->last_processed = now;
} else {
dberr_t err = DB_SUCCESS;
mtr_commit(&mtr);
/* Reaching the end of the index. */
dict_stats_empty_defrag_stats(index);
err = dict_stats_save_defrag_stats(index);
++trx->will_lock;
dberr_t err = dict_stats_save_defrag_stats(index, trx);
if (err == DB_SUCCESS) {
err = dict_stats_save_defrag_summary(
index, trx);
}

if (err != DB_SUCCESS) {
trx_rollback_to_savepoint(trx, NULL);
ib::error() << "Saving defragmentation stats for table "
<< index->table->name.m_name
<< " index " << index->name()
<< " failed with error " << err;
} else {
err = dict_stats_save_defrag_summary(index);

if (err != DB_SUCCESS) {
ib::error() << "Saving defragmentation summary for table "
<< index->table->name.m_name
<< " index " << index->name()
<< " failed with error " << err;
}
<< index->table->name
<< " index " << index->name
<< " failed with error "
<< ut_strerr(err);
} else if (trx->state != TRX_STATE_NOT_STARTED) {
trx_commit_for_mysql(trx);
}

btr_defragment_remove_item(item);
}
}

trx_free_for_background(trx);

btr_defragment_thread_active = false;
os_thread_exit();
OS_THREAD_DUMMY_RETURN;
Expand Down
136 changes: 62 additions & 74 deletions storage/innobase/dict/dict0defrag_bg.cc
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (c) 2016, MariaDB Corporation. All Rights Reserved.
Copyright (c) 2016, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Expand Down Expand Up @@ -29,6 +29,7 @@ Created 25/08/2016 Jan Lindström
#include "dict0defrag_bg.h"
#include "row0mysql.h"
#include "srv0start.h"
#include "trx0roll.h"
#include "ut0new.h"

#include <vector>
Expand Down Expand Up @@ -201,12 +202,12 @@ dict_stats_defrag_pool_del(
mutex_exit(&defrag_pool_mutex);
}

/*****************************************************************//**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
/** Get the first index that has been added for updating persistent defrag
stats and eventually save its stats.
@param[in,out] trx transaction that will be started and committed */
static
void
dict_stats_process_entry_from_defrag_pool()
dict_stats_process_entry_from_defrag_pool(trx_t* trx)
{
table_id_t table_id;
index_id_t index_id;
Expand Down Expand Up @@ -240,63 +241,58 @@ dict_stats_process_entry_from_defrag_pool()
return;
}

dict_stats_save_defrag_stats(index);
++trx->will_lock;
dberr_t err = dict_stats_save_defrag_stats(index, trx);

if (err != DB_SUCCESS) {
trx_rollback_to_savepoint(trx, NULL);
ib::error() << "Saving defragmentation status for table "
<< index->table->name
<< " index " << index->name
<< " failed " << err;
} else if (trx->state != TRX_STATE_NOT_STARTED) {
trx_commit_for_mysql(trx);
}

dict_table_close(table, FALSE, FALSE);
}

/*****************************************************************//**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
/** Process indexes that have been scheduled for defragmenting.
@param[in,out] trx transaction that will be started and committed */
void
dict_defrag_process_entries_from_defrag_pool()
/*==========================================*/
dict_defrag_process_entries_from_defrag_pool(trx_t* trx)
{
while (defrag_pool->size() && !dict_stats_start_shutdown) {
dict_stats_process_entry_from_defrag_pool();
dict_stats_process_entry_from_defrag_pool(trx);
}
}

/*********************************************************************//**
Save defragmentation result.
/** Save defragmentation result.
@param[in] index index that was defragmented
@param[in,out] trx transaction
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_defrag_summary(
/*============================*/
dict_index_t* index) /*!< in: index */
dict_stats_save_defrag_summary(dict_index_t* index, trx_t* trx)
{
dberr_t ret=DB_SUCCESS;
lint now = (lint) ut_time();

if (dict_index_is_ibuf(index)) {
return DB_SUCCESS;
}

rw_lock_x_lock(dict_operation_lock);
mutex_enter(&dict_sys->mutex);

ret = dict_stats_save_index_stat(index, now, "n_pages_freed",
index->stat_defrag_n_pages_freed,
NULL,
"Number of pages freed during"
" last defragmentation run.",
NULL);

mutex_exit(&dict_sys->mutex);
rw_lock_x_unlock(dict_operation_lock);

return (ret);
return dict_stats_save_index_stat(index, ut_time(), "n_pages_freed",
index->stat_defrag_n_pages_freed,
NULL,
"Number of pages freed during"
" last defragmentation run.",
trx);
}

/*********************************************************************//**
Save defragmentation stats for a given index.
/** Save defragmentation stats for a given index.
@param[in] index index that is being defragmented
@param[in,out] trx transaction
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_defrag_stats(
/*============================*/
dict_index_t* index) /*!< in: index */
dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx)
{
dberr_t ret;

if (dict_index_is_ibuf(index)) {
return DB_SUCCESS;
}
Expand All @@ -305,7 +301,6 @@ dict_stats_save_defrag_stats(
return dict_stats_report_error(index->table, true);
}

lint now = (lint) ut_time();
mtr_t mtr;
ulint n_leaf_pages;
ulint n_leaf_reserved;
Expand All @@ -322,40 +317,33 @@ dict_stats_save_defrag_stats(
return DB_SUCCESS;
}

rw_lock_x_lock(dict_operation_lock);

mutex_enter(&dict_sys->mutex);
ret = dict_stats_save_index_stat(index, now, "n_page_split",
index->stat_defrag_n_page_split,
NULL,
"Number of new page splits on leaves"
" since last defragmentation.",
NULL);
if (ret != DB_SUCCESS) {
goto end;
}

ret = dict_stats_save_index_stat(
index, now, "n_leaf_pages_defrag",
n_leaf_pages,
lint now = ut_time();
dberr_t err = dict_stats_save_index_stat(
index, now, "n_page_split",
index->stat_defrag_n_page_split,
NULL,
"Number of leaf pages when this stat is saved to disk",
NULL);
if (ret != DB_SUCCESS) {
goto end;
"Number of new page splits on leaves"
" since last defragmentation.",
trx);
if (err == DB_SUCCESS) {
err = dict_stats_save_index_stat(
index, now, "n_leaf_pages_defrag",
n_leaf_pages,
NULL,
"Number of leaf pages when this stat is saved to disk",
trx);
}

ret = dict_stats_save_index_stat(
index, now, "n_leaf_pages_reserved",
n_leaf_reserved,
NULL,
"Number of pages reserved for this index leaves when this stat "
"is saved to disk",
NULL);

end:
mutex_exit(&dict_sys->mutex);
rw_lock_x_unlock(dict_operation_lock);
if (err == DB_SUCCESS) {
err = dict_stats_save_index_stat(
index, now, "n_leaf_pages_reserved",
n_leaf_reserved,
NULL,
"Number of pages reserved for this "
"index leaves when this stat "
"is saved to disk",
trx);
}

return (ret);
return err;
}
Loading

0 comments on commit 7dc6066

Please sign in to comment.