Skip to content

Commit

Permalink
MDEV-19585 Assertion with S3 table and flush_tables
Browse files Browse the repository at this point in the history
The problem was two fault:
- flush_tables() wrongly gave errors when failing to open read only tables
- backup_block_ddl() didn't properly ignores errors from flush_tables()

The test case for this will be pushed in 10.5 as the test involves
S3 tables.
  • Loading branch information
montywi committed Jun 26, 2019
1 parent 12ce066 commit 4dbe677
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 7 deletions.
7 changes: 6 additions & 1 deletion sql/backup.cc
Expand Up @@ -253,6 +253,7 @@ static bool backup_block_ddl(THD *thd)
backup stage even if we got an error.
*/
(void) flush_tables(thd, FLUSH_NON_TRANS_TABLES);
thd->clear_error();

/*
block new DDL's, in addition to all previous blocks
Expand Down Expand Up @@ -286,7 +287,11 @@ static bool backup_block_commit(THD *thd)
MDL_BACKUP_WAIT_COMMIT,
thd->variables.lock_wait_timeout))
DBUG_RETURN(1);
flush_tables(thd, FLUSH_SYS_TABLES);

/* We can ignore errors from flush_tables () */
(void) flush_tables(thd, FLUSH_SYS_TABLES);
thd->clear_error();

DBUG_RETURN(0);
}

Expand Down
51 changes: 45 additions & 6 deletions sql/sql_base.cc
Expand Up @@ -504,6 +504,43 @@ static my_bool tc_collect_used_shares(TDC_element *element,
}


/*
Ignore errors from opening read only tables
*/

class flush_tables_error_handler : public Internal_error_handler
{
public:
int handled_errors;
int unhandled_errors;
flush_tables_error_handler() : handled_errors(0), unhandled_errors(0)
{}

bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_OPEN_AS_READONLY)
{
handled_errors++;
return TRUE;
}
if (*level == Sql_condition::WARN_LEVEL_ERROR)
unhandled_errors++;
return FALSE;
}

bool got_fatal_error()
{
return unhandled_errors > 0;
}
};


/**
Flush cached table as part of global read lock
Expand All @@ -520,9 +557,9 @@ static my_bool tc_collect_used_shares(TDC_element *element,
bool flush_tables(THD *thd, flush_tables_type flag)
{
bool result= TRUE;
uint open_errors= 0;
tc_collect_arg collect_arg;
TABLE *tmp_table;
flush_tables_error_handler error_handler;
DBUG_ENTER("flush_tables");

purge_tables(false); /* Flush unused tables and shares */
Expand Down Expand Up @@ -555,6 +592,8 @@ bool flush_tables(THD *thd, flush_tables_type flag)
}

/* Call HA_EXTRA_FLUSH on all found shares */

thd->push_internal_handler(&error_handler);
for (uint i= 0 ; i < collect_arg.shares.elements ; i++)
{
TABLE_SHARE *share= *dynamic_element(&collect_arg.shares, i,
Expand Down Expand Up @@ -584,14 +623,14 @@ bool flush_tables(THD *thd, flush_tables_type flag)
*/
closefrm(tmp_table);
}
else
open_errors++;
}
tdc_release_share(share);
}

result= open_errors ? TRUE : FALSE;
DBUG_PRINT("note", ("open_errors: %u", open_errors));
thd->pop_internal_handler();
result= error_handler.got_fatal_error();
DBUG_PRINT("note", ("open_errors: %u %u",
error_handler.handled_errors,
error_handler.unhandled_errors));
err:
my_free(tmp_table);
delete_dynamic(&collect_arg.shares);
Expand Down

0 comments on commit 4dbe677

Please sign in to comment.