Skip to content

Commit

Permalink
Merge 10.2 into 10.3
Browse files Browse the repository at this point in the history
  • Loading branch information
dr-m committed May 20, 2020
2 parents d4f97e2 + e380f44 commit f4f0ef3
Show file tree
Hide file tree
Showing 15 changed files with 92 additions and 112 deletions.
4 changes: 3 additions & 1 deletion mysql-test/lib/mtr_report.pm
Expand Up @@ -473,7 +473,7 @@ sub mtr_report_stats ($$$$) {
$comment =~ s/[\"]//g;

# if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'}) {
if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'} > 0) {
my $logcontents = $test->{'logfile-failed'} || $test->{'logfile'};

$xml_report .= qq(>\n\t\t\t<failure message="" type="MTR_RES_FAILED">\n<![CDATA[$logcontents]]>\n\t\t\t</failure>\n\t\t</testcase>\n);
Expand Down Expand Up @@ -639,6 +639,8 @@ sub mtr_error (@) {
}
else
{
use Carp qw(cluck);
cluck "Error happened" if $verbose > 0;
exit(1);
}
}
Expand Down
10 changes: 8 additions & 2 deletions mysql-test/mysql-test-run.pl
Expand Up @@ -947,8 +947,14 @@ ($$$)
if ( $result->is_failed() ) {
my $worker_logdir= $result->{savedir};
my $log_file_name=dirname($worker_logdir)."/".$result->{shortname}.".log";
$result->{'logfile-failed'} = mtr_lastlinesfromfile($log_file_name, 20);
rename $log_file_name,$log_file_name.".failed";

if (-e $log_file_name) {
$result->{'logfile-failed'} = mtr_lastlinesfromfile($log_file_name, 20);
} else {
$result->{'logfile-failed'} = "";
}

rename $log_file_name, $log_file_name.".failed";
}
delete($result->{result});
$result->{retries}= $retries+1;
Expand Down
20 changes: 9 additions & 11 deletions mysql-test/suite/galera/r/galera_toi_truncate.result
@@ -1,24 +1,22 @@
connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
connection node_2;
SET SESSION wsrep_retry_autocommit = 0;
INSERT INTO t1(f1) SELECT 1 FROM ten as a1, ten AS a2;
set debug_sync='ha_commit_trans_after_prepare WAIT_FOR go';
INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6;;
INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6, ten AS a7, ten AS a8;
connection node_1;
TRUNCATE TABLE t1;;
connection node_1;
connection node_2;
ERROR 40001: Deadlock: wsrep aborted transaction
connection node_1;
connection node_2;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
SELECT COUNT(*) AS EXPECT_0 FROM t1;
EXPECT_0
0
connection node_1;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
SELECT COUNT(*) AS EXPECT_0 FROM t1;
EXPECT_0
0
DROP TABLE t1;
DROP TABLE ten;
26 changes: 9 additions & 17 deletions mysql-test/suite/galera/t/galera_toi_truncate.test
Expand Up @@ -4,50 +4,42 @@
#

--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--source include/not_embedded.inc
--source include/have_debug.inc

#
# INSERT and TRUNCATE on different nodes
#

--connection node_1
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);

CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;

# Insert 1m rows
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 10 FROM ten;
--source include/wait_condition.inc

# Prevent autocommit retring from masking the deadlock error we expect to get
SET SESSION wsrep_retry_autocommit = 0;
INSERT INTO t1(f1) SELECT 1 FROM ten as a1, ten AS a2;

set debug_sync='ha_commit_trans_after_prepare WAIT_FOR go';
--send INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6;
--send INSERT INTO t1 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6, ten AS a7, ten AS a8

--connection node_1
# Wait for a above insert to start
--let $wait_condition = SELECT COUNT(*) >= 100 from t1;
--source include/wait_condition.inc

--send TRUNCATE TABLE t1;

--connection node_1
--reap

--connection node_2
--error ER_LOCK_DEADLOCK
--reap

--connection node_1
--reap

--connection node_2
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) AS EXPECT_0 FROM t1;

--connection node_1
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) AS EXPECT_0 FROM t1;

DROP TABLE t1;
DROP TABLE ten;
3 changes: 1 addition & 2 deletions mysql-test/suite/rpl/t/rpl_fail_register.test
Expand Up @@ -17,13 +17,12 @@ set global debug_dbug=@old_dbug;

connection master;

### Dump thread is hanging despite slave has gracefully exited.
let $id=`SELECT id from information_schema.processlist where command='Binlog Dump'`;

if ($id) {
replace_result $id DUMP_THREAD;
eval kill $id;
let $wait_condition= SELECT count(*)=0 from information_schema.processlist where command='Binlog Dump';
let $wait_condition= SELECT count(*)=0 from information_schema.processlist where command='Killed';
source include/wait_condition.inc;
}

Expand Down
2 changes: 1 addition & 1 deletion mysql-test/suite/sys_vars/r/sysvars_innodb.result
Expand Up @@ -736,7 +736,7 @@ VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Number of threads performing background key rotation and scrubbing
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
NUMERIC_MAX_VALUE 255
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
Expand Down
5 changes: 1 addition & 4 deletions storage/innobase/fsp/fsp0fsp.cc
Expand Up @@ -2991,10 +2991,7 @@ fseg_free_extent(
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
ut_d(space->modify_check(*mtr));

#if defined BTR_CUR_HASH_ADAPT || defined UNIV_DEBUG
const ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
#endif /* BTR_CUR_HASH_ADAPT || UNIV_DEBUG */
ut_d(ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE));

if (xdes_is_full(descr, mtr)) {
flst_remove(seg_inode + FSEG_FULL,
Expand Down
28 changes: 14 additions & 14 deletions storage/innobase/fts/fts0fts.cc
Expand Up @@ -583,7 +583,7 @@ fts_cache_init(

mutex_enter((ib_mutex_t*) &cache->deleted_lock);
cache->deleted_doc_ids = ib_vector_create(
cache->sync_heap, sizeof(fts_update_t), 4);
cache->sync_heap, sizeof(doc_id_t), 4);
mutex_exit((ib_mutex_t*) &cache->deleted_lock);

/* Reset the cache data for all the FTS indexes. */
Expand Down Expand Up @@ -2610,11 +2610,11 @@ dberr_t
fts_cmp_set_sync_doc_id(
/*====================*/
const dict_table_t* table, /*!< in: table */
doc_id_t doc_id_cmp, /*!< in: Doc ID to compare */
doc_id_t cmp_doc_id, /*!< in: Doc ID to compare */
ibool read_only, /*!< in: TRUE if read the
synced_doc_id only */
doc_id_t* doc_id) /*!< out: larger document id
after comparing "doc_id_cmp"
after comparing "cmp_doc_id"
to the one stored in CONFIG
table */
{
Expand Down Expand Up @@ -2685,10 +2685,10 @@ fts_cmp_set_sync_doc_id(
goto func_exit;
}

if (doc_id_cmp == 0 && *doc_id) {
if (cmp_doc_id == 0 && *doc_id) {
cache->synced_doc_id = *doc_id - 1;
} else {
cache->synced_doc_id = ut_max(doc_id_cmp, *doc_id);
cache->synced_doc_id = ut_max(cmp_doc_id, *doc_id);
}

mutex_enter(&cache->doc_id_lock);
Expand All @@ -2699,7 +2699,7 @@ fts_cmp_set_sync_doc_id(
}
mutex_exit(&cache->doc_id_lock);

if (doc_id_cmp > *doc_id) {
if (cmp_doc_id > *doc_id) {
error = fts_update_sync_doc_id(
table, cache->synced_doc_id, trx);
}
Expand Down Expand Up @@ -2821,7 +2821,7 @@ fts_doc_ids_create(void)
fts_doc_ids->self_heap = ib_heap_allocator_create(heap);

fts_doc_ids->doc_ids = static_cast<ib_vector_t*>(ib_vector_create(
fts_doc_ids->self_heap, sizeof(fts_update_t), 32));
fts_doc_ids->self_heap, sizeof(doc_id_t), 32));

return(fts_doc_ids);
}
Expand Down Expand Up @@ -3921,7 +3921,7 @@ fts_sync_add_deleted_cache(

ut_a(ib_vector_size(doc_ids) > 0);

ib_vector_sort(doc_ids, fts_update_doc_id_cmp);
ib_vector_sort(doc_ids, fts_doc_id_cmp);

info = pars_info_create();

Expand All @@ -3939,13 +3939,13 @@ fts_sync_add_deleted_cache(
"BEGIN INSERT INTO $table_name VALUES (:doc_id);");

for (i = 0; i < n_elems && error == DB_SUCCESS; ++i) {
fts_update_t* update;
doc_id_t* update;
doc_id_t write_doc_id;

update = static_cast<fts_update_t*>(ib_vector_get(doc_ids, i));
update = static_cast<doc_id_t*>(ib_vector_get(doc_ids, i));

/* Convert to "storage" byte order. */
fts_write_doc_id((byte*) &write_doc_id, update->doc_id);
fts_write_doc_id((byte*) &write_doc_id, *update);
fts_bind_doc_id(info, "doc_id", &write_doc_id);

error = fts_eval_sql(sync->trx, graph);
Expand Down Expand Up @@ -5270,12 +5270,12 @@ fts_cache_append_deleted_doc_ids(


for (ulint i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) {
fts_update_t* update;
doc_id_t* update;

update = static_cast<fts_update_t*>(
update = static_cast<doc_id_t*>(
ib_vector_get(cache->deleted_doc_ids, i));

ib_vector_push(vector, &update->doc_id);
ib_vector_push(vector, &update);
}

mutex_exit((ib_mutex_t*) &cache->deleted_lock);
Expand Down

0 comments on commit f4f0ef3

Please sign in to comment.