diff --git a/.gitmodules b/.gitmodules index 3847bf6bff37d..18bcb465fa251 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,25 +1,19 @@ [submodule "libmariadb"] path = libmariadb url = https://github.com/MariaDB/mariadb-connector-c.git - ignore = all [submodule "storage/rocksdb/rocksdb"] path = storage/rocksdb/rocksdb url = https://github.com/facebook/rocksdb.git - ignore = all [submodule "wsrep-lib"] path = wsrep-lib url = https://github.com/codership/wsrep-lib.git branch = master - ignore = all [submodule "extra/wolfssl/wolfssl"] path = extra/wolfssl/wolfssl url = https://github.com/wolfSSL/wolfssl.git - ignore = all [submodule "storage/maria/libmarias3"] path = storage/maria/libmarias3 url = https://github.com/mariadb-corporation/libmarias3.git - ignore = all [submodule "storage/columnstore/columnstore"] path = storage/columnstore/columnstore url = https://github.com/mariadb-corporation/mariadb-columnstore-engine.git - ignore = all diff --git a/client/mysql.cc b/client/mysql.cc index 478b8ed4ac98e..6b7c445d3c2c3 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -5599,6 +5599,7 @@ static void init_username() full_username=my_strdup(PSI_NOT_INSTRUMENTED, cur[0],MYF(MY_WME)); part_username=my_strdup(PSI_NOT_INSTRUMENTED, strtok(cur[0],"@"),MYF(MY_WME)); (void) mysql_fetch_row(result); // Read eof + mysql_free_result(result); } } diff --git a/client/mysql_plugin.c b/client/mysql_plugin.c index dab98b10043db..a96af01509ef6 100644 --- a/client/mysql_plugin.c +++ b/client/mysql_plugin.c @@ -102,7 +102,7 @@ int main(int argc,char *argv[]) MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ plugin_data.name= 0; /* initialize name */ - + /* The following operations comprise the method for enabling or disabling a plugin. We begin by processing the command options then check the @@ -110,15 +110,15 @@ int main(int argc,char *argv[]) --plugin-ini (if specified). If the directories are Ok, we then look for the mysqld executable and the plugin soname. Finally, we build a bootstrap command file for use in bootstraping the server. - + If any step fails, the method issues an error message and the tool exits. - + 1) Parse, execute, and verify command options. 2) Check access to directories. 3) Look for mysqld executable. 4) Look for the plugin. 5) Build a bootstrap file with commands to enable or disable plugin. - + */ if ((error= process_options(argc, argv, operation)) || (error= check_access()) || @@ -126,11 +126,11 @@ int main(int argc,char *argv[]) (error= find_plugin(tp_path)) || (error= build_bootstrap_file(operation, bootstrap))) goto exit; - + /* Dump the bootstrap file if --verbose specified. */ if (opt_verbose && ((error= dump_bootstrap_file(bootstrap)))) goto exit; - + /* Start the server in bootstrap mode and execute bootstrap commands */ error= bootstrap_server(server_path, bootstrap); @@ -238,7 +238,7 @@ static int run_command(char* cmd, const char *mode) #ifdef _WIN32 /** Check to see if there are spaces in a path. - + @param[in] path The Windows path to examine. @retval int spaces found = 1, no spaces = 0 @@ -253,7 +253,7 @@ static int has_spaces(const char *path) /** Convert a Unix path to a Windows path. - + @param[in] path The Windows path to examine. @returns string containing path with / changed to \\ @@ -335,12 +335,12 @@ static int get_default_values() #ifdef _WIN32 { char *format_str= 0; - + if (has_spaces(tool_path) || has_spaces(defaults_file)) format_str = "\"%s --mysqld > %s\""; else format_str = "%s --mysqld > %s"; - + snprintf(defaults_cmd, sizeof(defaults_cmd), format_str, add_quotes(tool_path), add_quotes(defaults_file)); if (opt_verbose) @@ -675,7 +675,7 @@ static int load_plugin_data(char *plugin_name, char *config_file) { reason= "Bad format in plugin configuration file."; fclose(file_ptr); - goto error; + goto error; } break; } @@ -709,7 +709,7 @@ static int load_plugin_data(char *plugin_name, char *config_file) } } } - + fclose(file_ptr); return 0; @@ -740,7 +740,7 @@ static int check_options(int argc, char **argv, char *operation) int num_found= 0; /* number of options found (shortcut loop) */ char config_file[FN_REFLEN]; /* configuration file name */ char plugin_name[FN_REFLEN]; /* plugin name */ - + /* Form prefix strings for the options. */ const char *basedir_prefix = "--basedir="; size_t basedir_len= strlen(basedir_prefix); @@ -815,7 +815,7 @@ static int check_options(int argc, char **argv, char *operation) return 1; } /* If a plugin was specified, read the config file. */ - else if (strlen(plugin_name) > 0) + else if (strlen(plugin_name) > 0) { if (load_plugin_data(plugin_name, config_file)) { @@ -847,22 +847,22 @@ static int check_options(int argc, char **argv, char *operation) /** Parse, execute, and verify command options. - + This method handles all of the option processing including the optional features for displaying data (--print-defaults, --help ,etc.) that do not result in an attempt to ENABLE or DISABLE of a plugin. - + @param[in] arc Count of arguments @param[in] argv Array of arguments @param[out] operation Operation (ENABLE or DISABLE) - + @retval int error = 1, success = 0, exit program = -1 */ static int process_options(int argc, char *argv[], char *operation) { int error= 0; - + /* Parse and execute command-line options */ if ((error= handle_options(&argc, &argv, my_long_options, get_one_option))) return error; @@ -890,7 +890,7 @@ static int process_options(int argc, char *argv[], char *operation) opt_basedir= my_strdup(PSI_NOT_INSTRUMENTED, buff, MYF(MY_FAE)); } } - + /* If the user did not specify the option to skip loading defaults from a config file and the required options are not present or there was an error @@ -925,18 +925,18 @@ static int process_options(int argc, char *argv[], char *operation) /** Check access - + This method checks to ensure all of the directories (opt_basedir, opt_plugin_dir, opt_datadir, and opt_plugin_ini) are accessible by the user. - + @retval int error = 1, success = 0 */ static int check_access() { int error= 0; - + if ((error= my_access(opt_basedir, F_OK))) { fprintf(stderr, "ERROR: Cannot access basedir at '%s'.\n", @@ -1048,13 +1048,13 @@ static int find_plugin(char *tp_path) /** Build the bootstrap file. - + Create a new file and populate it with SQL commands to ENABLE or DISABLE the plugin via REPLACE and DELETE operations on the mysql.plugin table. param[in] operation The type of operation (ENABLE or DISABLE) param[out] bootstrap A FILE* pointer - + @retval int error = 1, success = 0 */ @@ -1062,7 +1062,7 @@ static int build_bootstrap_file(char *operation, char *bootstrap) { int error= 0; FILE *file= 0; - + /* Perform plugin operation : ENABLE or DISABLE @@ -1073,10 +1073,10 @@ static int build_bootstrap_file(char *operation, char *bootstrap) .ini configuration file. Once the file is built, a call to mysqld is made in read only, bootstrap modes to read the SQL statements and execute them. - + Note: Replace was used so that if a user loads a newer version of a library with a different library name, the new library name is - used for symbols that match. + used for symbols that match. */ if ((error= make_tempfile(bootstrap, "sql"))) { @@ -1123,7 +1123,7 @@ static int build_bootstrap_file(char *operation, char *bootstrap) printf("# Disabling %s...\n", plugin_data.name); } } - + exit: fclose(file); return error; @@ -1132,11 +1132,11 @@ static int build_bootstrap_file(char *operation, char *bootstrap) /** Dump bootstrap file. - + Read the contents of the bootstrap file and print it out. - + @param[in] bootstrap_file Name of bootstrap file to read - + @retval int error = 1, success = 0 */ @@ -1173,7 +1173,7 @@ static int dump_bootstrap_file(char *bootstrap_file) /** Bootstrap the server - + Create a command line sequence to launch mysqld in bootstrap mode. This will allow mysqld to launch a minimal server instance to read and execute SQL commands from a file piped in (the bootstrap file). We use @@ -1194,47 +1194,39 @@ static int dump_bootstrap_file(char *bootstrap_file) static int bootstrap_server(char *server_path, char *bootstrap_file) { - char bootstrap_cmd[FN_REFLEN]; + char bootstrap_cmd[FN_REFLEN]= {0}; + char lc_messages_dir_str[FN_REFLEN]= {0}; int error= 0; #ifdef _WIN32 char *format_str= 0; const char *verbose_str= NULL; - - +#endif + + if (opt_lc_messages_dir != NULL) + snprintf(lc_messages_dir_str, sizeof(lc_messages_dir_str), "--lc-messages-dir=%s", + opt_lc_messages_dir); + +#ifdef _WIN32 if (opt_verbose) verbose_str= "--console"; else verbose_str= ""; + if (has_spaces(opt_datadir) || has_spaces(opt_basedir) || - has_spaces(bootstrap_file)) - { - if (opt_lc_messages_dir != NULL) - format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s --lc-messages-dir=%s <%s\""; - else - format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s <%s\""; - } + has_spaces(bootstrap_file) || has_spaces(lc_messages_dir_str)) + format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s %s <%s\""; else - { - if (opt_lc_messages_dir != NULL) - format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s --lc-messages-dir=%s <%s\""; - else - format_str= "%s %s --bootstrap --datadir=%s --basedir=%s <%s"; - } + format_str= "%s %s --bootstrap --datadir=%s --basedir=%s %s <%s"; + snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), format_str, add_quotes(convert_path(server_path)), verbose_str, add_quotes(opt_datadir), add_quotes(opt_basedir), - add_quotes(bootstrap_file)); + add_quotes(lc_messages_dir_str), add_quotes(bootstrap_file)); #else - if (opt_lc_messages_dir != NULL) - snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), - "%s --no-defaults --bootstrap --datadir=%s --basedir=%s --lc-messages-dir=%s" - " <%s", server_path, opt_datadir, opt_basedir, opt_lc_messages_dir, bootstrap_file); - else - snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), - "%s --no-defaults --bootstrap --datadir=%s --basedir=%s" - " <%s", server_path, opt_datadir, opt_basedir, bootstrap_file); - + snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), + "%s --no-defaults --bootstrap --datadir=%s --basedir=%s %s" + " <%s", server_path, opt_datadir, opt_basedir, lc_messages_dir_str, bootstrap_file); #endif /* Execute the command */ @@ -1247,6 +1239,6 @@ static int bootstrap_server(char *server_path, char *bootstrap_file) fprintf(stderr, "ERROR: Unexpected result from bootstrap. Error code: %d.\n", error); - + return error; } diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index d026c347c828a..b4902d32e0233 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -76,6 +76,8 @@ char upgrade_from_version[1024]; static my_bool opt_write_binlog; +static void print_conn_args(const char *tool_name); + #define OPT_SILENT OPT_MAX_CLIENT_OPTION static struct my_option my_long_options[]= @@ -155,7 +157,10 @@ static struct my_option my_long_options[]= GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login.", &opt_user, &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"verbose", 'v', "Display more output about the process; Using it twice will print connection argument; Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase.", + {"verbose", 'v', "Display more output about the process; Using it twice will print connection argument;" + "Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase;" + "Using it 4 times (added in MariaDB 10.0.14) will also write out all mariadb-check commands used;" + "Using it 5 times will print all the mariadb commands used and their results while running mysql_fix_privilege_tables script.", &opt_not_used, &opt_not_used, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -210,6 +215,7 @@ static void die(const char *fmt, ...) DBUG_ENTER("die"); /* Print the error message */ + print_conn_args("mariadb-check"); fflush(stdout); va_start(args, fmt); if (fmt) @@ -640,6 +646,7 @@ static int run_query(const char *query, DYNAMIC_STRING *ds_res, "--database=mysql", "--batch", /* Turns off pager etc. */ force ? "--force": "--skip-force", + opt_verbose >= 5 ? "--verbose" : "", ds_res || opt_silent ? "--silent": "", "<", query_file_path, @@ -866,8 +873,7 @@ static int upgrade_already_done(int silent) "There is no need to run mysql_upgrade again for %s.", upgrade_from_version, version); if (!opt_check_upgrade) - verbose("You can use --force if you still want to run mysql_upgrade", - upgrade_from_version, version); + verbose("You can use --force if you still want to run mysql_upgrade"); } return 0; } @@ -1329,9 +1335,7 @@ static int run_sql_fix_privilege_tables(void) dynstr_append(&ds_script, *query_ptr); } - run_query(ds_script.str, - &ds_result, /* Collect result */ - TRUE); + run_query(ds_script.str, (opt_verbose >= 5) ? NULL : &ds_result, TRUE); { /* @@ -1502,6 +1506,7 @@ int main(int argc, char **argv) DBUG_ASSERT(phase == phases_total); end: + print_conn_args("mariadb-check"); free_used_memory(); my_end(my_end_arg); exit(0); diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 38b3be837bc6c..c855eac753071 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -1600,7 +1600,8 @@ static void print_relative_row_vert(MYSQL_RES *result __attribute__((unused)), llstr((tmp - last_values[row]), buff)); /* Find the minimum row length needed to output the relative value */ - if ((length=(uint) strlen(buff) > ex_val_max_len[row]) && ex_status_printed) + length=(uint) strlen(buff); + if (length > ex_val_max_len[row] && ex_status_printed) ex_val_max_len[row] = length; last_values[row] = tmp; } diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 8942ee6d1dca1..e45385f523b24 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -2059,6 +2059,11 @@ static void cleanup() my_free_open_file_info(); load_processor.destroy(); mysql_server_end(); + if (opt_flashback) + { + delete_dynamic(&binlog_events); + delete_dynamic(&events_in_stmt); + } DBUG_VOID_RETURN; } @@ -2146,11 +2151,11 @@ static void extend_main_gtid_event_filter(Gtid_event_filter *new_filter) } } -static void die() +static void die(int err) { cleanup(); my_end(MY_DONT_FREE_DBUG); - exit(1); + exit(err); } @@ -2187,7 +2192,7 @@ static my_time_t convert_str_to_timestamp(const char* str) l_time.time_type != MYSQL_TIMESTAMP_DATETIME || status.warnings) { error("Incorrect date and time argument: %s", str); - die(); + die(1); } /* Note that Feb 30th, Apr 31st cause no error messages and are mapped to @@ -2358,7 +2363,7 @@ get_one_option(const struct my_option *opt, const char *argument, const char *fi opt->name)) <= 0) { sf_leaking_memory= 1; /* no memory leak reports here */ - die(); + die(1); } /* Specification of protocol via CLI trumps implicit overrides */ @@ -2387,7 +2392,7 @@ get_one_option(const struct my_option *opt, const char *argument, const char *fi opt->name)) <= 0) { sf_leaking_memory= 1; /* no memory leak reports here */ - die(); + die(1); } opt_base64_output_mode= (enum_base64_output_mode)(val - 1); break; @@ -2574,7 +2579,7 @@ static int parse_args(int *argc, char*** argv) if ((ho_error=handle_options(argc, argv, my_options, get_one_option))) { - die(); + die(ho_error); } if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; @@ -2607,7 +2612,7 @@ static int parse_args(int *argc, char*** argv) quit in error. Note that any specific error messages will have already been written. */ - die(); + die(1); } extend_main_gtid_event_filter(position_gtid_filter); @@ -3619,6 +3624,12 @@ int main(int argc, char** argv) my_set_max_open_files(open_files_limit); + if (opt_flashback && opt_raw_mode) + { + error("The --raw mode is not allowed with --flashback mode"); + die(1); + } + if (opt_flashback) { my_init_dynamic_array(PSI_NOT_INSTRUMENTED, &binlog_events, @@ -3634,7 +3645,7 @@ int main(int argc, char** argv) if (!remote_opt) { error("The --raw mode only works with --read-from-remote-server"); - die(); + die(1); } if (one_database) warning("The --database option is ignored in raw mode"); @@ -3656,7 +3667,7 @@ int main(int argc, char** argv) O_WRONLY | O_BINARY, MYF(MY_WME)))) { error("Could not create log file '%s'", result_file_name); - die(); + die(1); } } else @@ -3745,7 +3756,7 @@ int main(int argc, char** argv) /* Set delimiter back to semicolon */ if (retval != ERROR_STOP) { - if (!stop_event_string.is_empty()) + if (!stop_event_string.is_empty() && result_file) fprintf(result_file, "%s", stop_event_string.ptr()); if (!opt_raw_mode && opt_flashback) fprintf(result_file, "DELIMITER ;\n"); diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index 12804b3c6674f..4c6114a2ad8d2 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -1002,6 +1002,7 @@ static int handle_request_for_tables(char *tables, size_t length, DBUG_RETURN(1); if (dont_quote) { + DBUG_ASSERT(op); DBUG_ASSERT(strlen(op)+strlen(tables)+strlen(options)+8+1 <= query_size); /* No backticks here as we added them before */ diff --git a/client/mysqldump.c b/client/mysqldump.c index 5c1a3d5588023..ac3c4b5fa713d 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -2616,7 +2616,10 @@ static uint dump_events_for_db(char *db) /* Get database collation. */ if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) + { + mysql_free_result(event_list_res); DBUG_RETURN(1); + } } if (switch_character_set_results(mysql, "binary")) @@ -3439,7 +3442,10 @@ static uint get_table_structure(const char *table, const char *db, char *table_t if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) + { + mysql_free_result(result); DBUG_RETURN(0); + } write_header(sql_file, db); } @@ -3840,7 +3846,7 @@ static int dump_triggers_for_table(char *table_name, char *db_name) char name_buff[NAME_LEN*4+3]; char query_buff[QUERY_LENGTH]; uint old_opt_compatible_mode= opt_compatible_mode; - MYSQL_RES *show_triggers_rs; + MYSQL_RES *show_triggers_rs= NULL; MYSQL_ROW row; FILE *sql_file= md_result_file; @@ -3924,8 +3930,6 @@ static int dump_triggers_for_table(char *table_name, char *db_name) } skip: - mysql_free_result(show_triggers_rs); - if (switch_character_set_results(mysql, default_charset)) goto done; @@ -3940,7 +3944,7 @@ static int dump_triggers_for_table(char *table_name, char *db_name) done: if (path) my_fclose(sql_file, MYF(0)); - + mysql_free_result(show_triggers_rs); DBUG_RETURN(ret); } @@ -4056,7 +4060,7 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, size_t total_length, init_length; my_bool versioned= 0; - MYSQL_RES *res; + MYSQL_RES *res= NULL; MYSQL_FIELD *field; MYSQL_ROW row; DBUG_ENTER("dump_table"); @@ -4254,6 +4258,8 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, fprintf(stderr,"%s: Error in field count for table: %s ! Aborting.\n", my_progname_short, result_table); error= EX_CONSCHECK; + if (!quick) + mysql_free_result(res); goto err; } @@ -4562,6 +4568,7 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, err: dynstr_free(&query_string); maybe_exit(error); + mysql_free_result(res); DBUG_VOID_RETURN; } /* dump_table */ @@ -4833,7 +4840,11 @@ static int dump_all_users_roles_and_grants() " '@', QUOTE(DEFAULT_ROLE_HOST))) as r," " CONCAT(QUOTE(mu.USER),'@',QUOTE(mu.HOST)) as u " "FROM mysql.user mu LEFT JOIN mysql.default_roles using (USER, HOST)")) + { + mysql_free_result(tableres); return 1; + } + while ((row= mysql_fetch_row(tableres))) { if (dump_grants(row[1])) @@ -5918,7 +5929,8 @@ static int get_sys_var_lower_case_table_names() lower_case_table_names= atoi(row[1]); mysql_free_result(table_res); } - + if (!row) + mysql_free_result(table_res); return lower_case_table_names; } @@ -6161,7 +6173,11 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos, } if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 1)) + { + mysql_free_result(master); return 1; + } + } /* SHOW MASTER STATUS reports file and position */ @@ -6283,7 +6299,10 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid, { char gtid_pos[MAX_GTID_LENGTH]; if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0)) + { + mysql_free_result(slave); return 1; + } if (opt_comments) fprintf(md_result_file, "\n--\n-- Gtid position to start replication " "from\n--\n\n"); @@ -6479,7 +6498,7 @@ static ulong find_set(TYPELIB *lib, const char *x, size_t length, { const char *end= x + length; ulong found= 0; - uint find; + int find; char buff[255]; *err_pos= 0; /* No error yet */ diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 54df0124063ef..27a80a115fd23 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -1835,6 +1835,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) uint x; struct timeval start_time, end_time; thread_context con; + int error; pthread_t mainthread; /* Thread descriptor */ pthread_attr_t attr; /* Thread attributes */ DBUG_ENTER("run_scheduler"); @@ -1843,8 +1844,11 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) con.limit= limit; pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, - PTHREAD_CREATE_DETACHED); + if ((error= pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))) + { + printf("Got error: %d from pthread_attr_setdetachstate\n", error); + exit(1); + } pthread_mutex_lock(&counter_mutex); thread_counter= 0; diff --git a/client/mysqltest.cc b/client/mysqltest.cc index bf41fa3b0b2b8..8dbad70e7a068 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -3630,9 +3630,11 @@ void do_system(struct st_command *command) /* returns TRUE if path is inside a sandbox */ bool is_sub_path(const char *path, size_t plen, const char *sandbox) { - size_t len= strlen(sandbox); - if (!sandbox || !len || plen <= len || memcmp(path, sandbox, len - 1) - || path[len] != '/') + size_t len; + if (!sandbox) + return false; + len= strlen(sandbox); + if (plen <= len || memcmp(path, sandbox, len-1) || path[len] != '/') return false; return true; } @@ -3878,9 +3880,21 @@ void do_move_file(struct st_command *command) sizeof(move_file_args)/sizeof(struct command_arg), ' '); - if (bad_path(ds_to_file.str)) - DBUG_VOID_RETURN; + size_t from_plen = strlen(ds_from_file.str); + size_t to_plen = strlen(ds_to_file.str); + const char *vardir= getenv("MYSQLTEST_VARDIR"); + const char *tmpdir= getenv("MYSQL_TMP_DIR"); + if (!((is_sub_path(ds_from_file.str, from_plen, vardir) && + is_sub_path(ds_to_file.str, to_plen, vardir)) || + (is_sub_path(ds_from_file.str, from_plen, tmpdir) && + is_sub_path(ds_to_file.str, to_plen, tmpdir)))) { + report_or_die("Paths '%s' and '%s' are not both under MYSQLTEST_VARDIR '%s'" + "or both under MYSQL_TMP_DIR '%s'", + ds_from_file, ds_to_file, vardir, tmpdir); + DBUG_VOID_RETURN; + } + DBUG_PRINT("info", ("Move %s to %s", ds_from_file.str, ds_to_file.str)); error= (my_rename(ds_from_file.str, ds_to_file.str, MYF(disable_warnings ? 0 : MY_WME)) != 0); @@ -11871,7 +11885,7 @@ void dynstr_append_sorted(DYNAMIC_STRING* ds, DYNAMIC_STRING *ds_input, /* Sort array */ qsort(lines.buffer, lines.elements, - sizeof(char**), (qsort_cmp)comp_lines); + sizeof(uchar *), (qsort_cmp)comp_lines); /* Create new result */ for (i= 0; i < lines.elements ; i++) diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index d6516430bcc38..08ef820022b5c 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -16,6 +16,7 @@ set -e # Buildbot, running the test suite from installed .debs on a clean VM. export DEB_BUILD_OPTIONS="nocheck $DEB_BUILD_OPTIONS" +# shellcheck source=/dev/null source ./VERSION # General CI optimizations to keep build output smaller @@ -74,7 +75,7 @@ replace_uring_with_aio() disable_pmem() { sed '/libpmem-dev/d' -i debian/control - sed '/-DWITH_PMEM=YES/d' -i debian/rules + sed '/-DWITH_PMEM=ON/d' -i debian/rules } disable_libfmt() @@ -107,13 +108,13 @@ fi # If not known, use 'unknown' in .deb version identifier if [ -z "${LSBID}" ] then - LSBID="unknown" + LSBID="unknown" fi case "${LSBNAME}" in # Debian - buster) + "buster") add_lsb_base_depends disable_libfmt replace_uring_with_aio @@ -122,12 +123,11 @@ in disable_pmem fi ;& - bullseye|bookworm) + "bullseye"|"bookworm") if [[ "${LSBNAME}" == "bullseye" ]] then add_lsb_base_depends fi - # mariadb-plugin-rocksdb in control is 4 arches covered by the distro rocksdb-tools # so no removal is necessary. if [[ ! "$architecture" =~ amd64|arm64|ppc64el ]] @@ -139,22 +139,22 @@ in replace_uring_with_aio fi ;& - sid) + "sid") # The default packaging should always target Debian Sid, so in this case # there is intentionally no customizations whatsoever. ;; # Ubuntu - bionic) + "bionic") add_lsb_base_depends remove_rocksdb_tools [ "$architecture" != amd64 ] && disable_pmem ;& - focal) + "focal") add_lsb_base_depends replace_uring_with_aio disable_libfmt ;& - impish|jammy|kinetic) + "impish"|"jammy"|"kinetic") add_lsb_base_depends # mariadb-plugin-rocksdb s390x not supported by us (yet) # ubuntu doesn't support mips64el yet, so keep this just @@ -196,17 +196,34 @@ dch -b -D "${LSBNAME}" -v "${VERSION}" "Automatic build with ${LOGSTRING}." --co echo "Creating package version ${VERSION} ... " +BUILDPACKAGE_DPKGCMD="" + # Use eatmydata is available to build faster with less I/O, skipping fsync() # during the entire build process (safe because a build can always be restarted) if which eatmydata > /dev/null then - BUILDPACKAGE_PREPEND=eatmydata + BUILDPACKAGE_DPKGCMD="eatmydata" +fi + +BUILDPACKAGE_DPKGCMD+="dpkg-buildpackage" + +# Using dpkg-buildpackage args +# -us Allow unsigned sources +# -uc Allow unsigned changes +# -I Tar ignore +BUILDPACKAGE_DPKG_ARGS=(-us -uc -I) + +# There can be also extra flags that are appended to args +if [ -n "$BUILDPACKAGE_FLAGS" ] +then + read -ra BUILDPACKAGE_TMP_ARGS <<< "$BUILDPACKAGE_FLAGS" + BUILDPACKAGE_DPKG_ARGS=("${BUILDPACKAGE_DPKG_ARGS[@]} ${BUILDPACKAGE_TMP_ARGS[@]}") fi # Build the package # Pass -I so that .git and other unnecessary temporary and source control files # will be ignored by dpkg-source when creating the tar.gz source package. -fakeroot $BUILDPACKAGE_PREPEND dpkg-buildpackage -us -uc -I $BUILDPACKAGE_FLAGS +fakeroot -- "${BUILDPACKAGE_DPKGCMD}" "${BUILDPACKAGE_DPKG_ARGS[@]}" # If the step above fails due to missing dependencies, you can manually run # sudo mk-build-deps debian/control -r -i diff --git a/debian/control b/debian/control index 6d5bceffc1700..b4a4d3360cb7c 100644 --- a/debian/control +++ b/debian/control @@ -320,13 +320,17 @@ Breaks: mariadb-server-10.0, mariadb-server-core-10.6, mariadb-server-core-10.7, mariadb-server-core-10.8, + mysql-cluster-community-client-plugins, mysql-server-core-5.5, mysql-server-core-5.6, mysql-server-core-5.7, mysql-server-core-8.0, percona-server-server-5.6, + percona-server-server-5.7, + percona-server-server-8.0, percona-xtradb-cluster-server-5.6, - percona-xtradb-cluster-server-5.7 + percona-xtradb-cluster-server-5.7, + percona-xtradb-cluster-server-8.0 Replaces: mariadb-client (<< ${source:Version}), mariadb-client-10.0, mariadb-client-10.1, @@ -375,13 +379,17 @@ Replaces: mariadb-client (<< ${source:Version}), mysql-client-core-5.6, mysql-client-core-5.7, mysql-client-core-8.0, + mysql-cluster-community-client-plugins, mysql-server-core-5.5, mysql-server-core-5.6, mysql-server-core-5.7, mysql-server-core-8.0, percona-server-server-5.6, + percona-server-server-5.7, + percona-server-server-8.0, percona-xtradb-cluster-server-5.6, percona-xtradb-cluster-server-5.7, + percona-xtradb-cluster-server-8.0, virtual-mysql-client-core Provides: default-mysql-client-core, mariadb-client-core-10.10, @@ -598,6 +606,7 @@ Breaks: mariadb-client (<< ${source:Version}), mariadb-server (<< ${source:Version}), mariadb-server-10.0, mariadb-server-10.1, + mariadb-server-10.2, mariadb-server-10.3, mariadb-server-10.4, mariadb-server-10.5, @@ -625,6 +634,7 @@ Replaces: mariadb-client (<< ${source:Version}), mariadb-server (<< ${source:Version}), mariadb-server-10.0, mariadb-server-10.1, + mariadb-server-10.2, mariadb-server-10.3, mariadb-server-10.4, mariadb-server-10.5, @@ -784,9 +794,11 @@ Package: mariadb-backup Architecture: any Breaks: mariadb-backup-10.1, mariadb-backup-10.2, + mariadb-backup-10.3, mariadb-client-10.1 Replaces: mariadb-backup-10.1, mariadb-backup-10.2, + mariadb-backup-10.3, mariadb-client-10.1 Depends: mariadb-client-core (= ${binary:Version}), ${misc:Depends}, @@ -809,12 +821,16 @@ Breaks: mariadb-connect-engine-10.0, mariadb-connect-engine-10.1, mariadb-connect-engine-10.2, mariadb-connect-engine-10.3, - mariadb-connect-engine-10.4 + mariadb-connect-engine-10.4, + mariadb-server-10.0, + mariadb-server-10.1 Replaces: mariadb-connect-engine-10.0, mariadb-connect-engine-10.1, mariadb-connect-engine-10.2, mariadb-connect-engine-10.3, - mariadb-connect-engine-10.4 + mariadb-connect-engine-10.4, + mariadb-server-10.0, + mariadb-server-10.1 Description: Connect storage engine for MariaDB Connect engine supports a number of file formats (dbf, xml, txt, bin, etc), connections to ODBC tables and remote MySQL tables, as well as a number of @@ -835,7 +851,7 @@ Description: Amazon S3 archival storage engine for MariaDB Package: mariadb-plugin-rocksdb Architecture: amd64 arm64 mips64el ppc64el Depends: mariadb-server (= ${server:Version}), - python3, + python3:any, rocksdb-tools, ${misc:Depends}, ${shlibs:Depends} @@ -861,12 +877,16 @@ Breaks: mariadb-oqgraph-engine-10.0, mariadb-oqgraph-engine-10.1, mariadb-oqgraph-engine-10.2, mariadb-oqgraph-engine-10.3, - mariadb-oqgraph-engine-10.4 + mariadb-oqgraph-engine-10.4, + mariadb-server-10.0, + mariadb-server-10.1 Replaces: mariadb-oqgraph-engine-10.0, mariadb-oqgraph-engine-10.1, mariadb-oqgraph-engine-10.2, mariadb-oqgraph-engine-10.3, - mariadb-oqgraph-engine-10.4 + mariadb-oqgraph-engine-10.4, + mariadb-server-10.0, + mariadb-server-10.1 Description: OQGraph storage engine for MariaDB The OQGraph engine is a computation engine plugin for handling hierarchies (trees) and graphs (friend-of-a-friend, etc) cleanly through standard SQL. @@ -922,11 +942,15 @@ Depends: libgssapi-krb5-2, Breaks: mariadb-gssapi-server-10.1, mariadb-gssapi-server-10.2, mariadb-gssapi-server-10.3, - mariadb-gssapi-server-10.4 + mariadb-gssapi-server-10.4, + mariadb-server-10.0, + mariadb-server-10.1 Replaces: mariadb-gssapi-server-10.1, mariadb-gssapi-server-10.2, mariadb-gssapi-server-10.3, - mariadb-gssapi-server-10.4 + mariadb-gssapi-server-10.4, + mariadb-server-10.0, + mariadb-server-10.1 Description: GSSAPI authentication plugin for MariaDB server This plugin includes support for Kerberos on Unix, but can also be used for Windows authentication with or without domain environment. @@ -1085,6 +1109,8 @@ Replaces: mariadb-test-10.0, mariadb-test-5.5, mysql-client-5.5, mysql-server-5.5, + mysql-server-5.7, + mysql-server-core-8.0, mysql-testsuite, mysql-testsuite-5.5, mysql-testsuite-5.6, diff --git a/debian/libmariadb-dev.install b/debian/libmariadb-dev.install index 0f82ac68d7d6d..c015d59ae9b01 100644 --- a/debian/libmariadb-dev.install +++ b/debian/libmariadb-dev.install @@ -12,10 +12,10 @@ usr/include/mariadb/mariadb_dyncol.h usr/include/mariadb/mariadb_rpl.h usr/include/mariadb/mariadb_stmt.h usr/include/mariadb/mariadb_version.h +usr/include/mariadb/my_alloca.h usr/include/mariadb/my_config.h usr/include/mariadb/my_global.h usr/include/mariadb/my_sys.h -usr/include/mariadb/my_alloca.h usr/include/mariadb/mysql.h usr/include/mariadb/mysql/ usr/include/mariadb/mysql/client_plugin.h diff --git a/debian/libmariadb-dev.lintian-overrides b/debian/libmariadb-dev.lintian-overrides index 345b537a52171..6a162120f2427 100644 --- a/debian/libmariadb-dev.lintian-overrides +++ b/debian/libmariadb-dev.lintian-overrides @@ -1 +1,2 @@ +# This is how upstream does it, wont' fix arch-dependent-file-not-in-arch-specific-directory usr/bin/mariadb_config diff --git a/debian/mariadb-server-core.install b/debian/mariadb-server-core.install index 1a07a3fea9494..4eee370672a91 100644 --- a/debian/mariadb-server-core.install +++ b/debian/mariadb-server-core.install @@ -15,8 +15,8 @@ usr/share/man/man1/resolveip.1 usr/share/man/man8/mariadbd.8 usr/share/man/man8/mysqld.8 usr/share/mysql/bulgarian -usr/share/mysql/chinese usr/share/mysql/charsets +usr/share/mysql/chinese usr/share/mysql/czech usr/share/mysql/danish usr/share/mysql/dutch diff --git a/debian/mariadb-server.preinst b/debian/mariadb-server.preinst index 755815eaeedfe..d5663236e3041 100644 --- a/debian/mariadb-server.preinst +++ b/debian/mariadb-server.preinst @@ -118,6 +118,18 @@ then fi +# If there is no debian-*.flag, and no version was detected, but a file that +# indicated MySQL 8.0 is found (undo_001 is created by default in MySQL 8.0+ +# installs), then that file is enough of additional indication to trigger the +# move of the data directory. +if [ -z "$found_version" ] && + [ -z "$(find $mysql_datadir/debian-*.flag 2> /dev/null)" ] && + [ -f "$mysql_datadir/undo_001" ] +then + echo "$mysql_datadir: no server version flag found, assuming MySQL 8.0 data encountered" + downgrade_detected=true + found_version="previous" # Just use dummy name as we don't know real version +fi # Don't abort dpkg if downgrade is detected (as was done previously). # Instead simply move the old datadir and create a new for this_version. @@ -132,8 +144,8 @@ then echo "Please manually export/import your data (e.g. with mysqldump) if needed." 1>&2 mv -f "$mysql_datadir" "$mysql_datadir-$found_version" # Also move away the old debian.cnf file that included credentials that are - # no longer valid - mv -f /etc/mysql/debian.cnf "/etc/mysql/debian.cnf-$found_version" + # no longer valid. If none existed, ignore error and let dpkg continue. + mv -f /etc/mysql/debian.cnf "/etc/mysql/debian.cnf-$found_version" || true fi # to be sure diff --git a/debian/mariadb-test.install b/debian/mariadb-test.install index 5dda38d665c62..1cb063440c6ee 100644 --- a/debian/mariadb-test.install +++ b/debian/mariadb-test.install @@ -41,11 +41,7 @@ usr/share/mysql/mysql-test/README.stress usr/share/mysql/mysql-test/dgcov.pl usr/share/mysql/mysql-test/lib usr/share/mysql/mysql-test/mariadb-stress-test.pl -usr/share/mysql/mysql-test/mariadb-test-run usr/share/mysql/mysql-test/mariadb-test-run.pl -usr/share/mysql/mysql-test/mtr -usr/share/mysql/mysql-test/mysql-test-run -usr/share/mysql/mysql-test/mysql-test-run.pl usr/share/mysql/mysql-test/purify.supp usr/share/mysql/mysql-test/suite.pm usr/share/mysql/mysql-test/valgrind.supp diff --git a/debian/rules b/debian/rules index 5d5766a95c7be..81f313a1511c8 100755 --- a/debian/rules +++ b/debian/rules @@ -1,10 +1,7 @@ #!/usr/bin/make -f -export DH_VERBOSE=1 -export DEB_BUILD_HARDENING=1 - -# enable Debian Hardening -# see: https://wiki.debian.org/Hardening +# Enable Debian Hardening +# https://wiki.debian.org/Hardening export DEB_BUILD_MAINT_OPTIONS = hardening=+all # Disable LTO on Ubuntu, see LP: #1970634 and https://jira.mariadb.org/browse/MDEV-25633 @@ -66,8 +63,8 @@ endif # Only attempt to build with PMEM on archs that have package libpmem-dev available # See https://packages.debian.org/search?searchon=names&keywords=libpmem-dev -ifneq (,$(filter $(DEB_HOST_ARCH_CPU),amd64 arm64 ppc64el riscv64)) - CMAKEFLAGS += -DWITH_PMEM=YES +ifneq (,$(filter $(DEB_HOST_ARCH),amd64 arm64 ppc64el riscv64)) + CMAKEFLAGS += -DWITH_PMEM=ON endif # Add support for verbose builds @@ -99,6 +96,9 @@ endif # quality standards in Debian. Also building it requires an extra 4 GB of disk # space which makes native Debian builds fail as the total disk space needed # for MariaDB becomes over 10 GB. Only build CS via autobake-deb.sh. + # + # Note: Don't use '-DWITH_URING=ON' as some Buildbot builders are missing it + # and would fail permanently. PATH=$${MYSQL_BUILD_PATH:-"/usr/lib/ccache:/usr/local/bin:/usr/bin:/bin"} \ NO_UPDATE_BUILD_VERSION=1 \ dh_auto_configure --builddirectory=$(BUILDDIR) -- \ @@ -113,8 +113,7 @@ endif -DCONC_DEFAULT_CHARSET=utf8mb4 \ -DPLUGIN_AWS_KEY_MANAGEMENT=NO \ -DPLUGIN_COLUMNSTORE=NO \ - -DIGNORE_AIO_CHECK=YES \ - -DWITH_URING=YES \ + -DIGNORE_AIO_CHECK=ON \ -DDEB=$(DEB_VENDOR) # This is needed, otherwise 'make test' will run before binaries have been built @@ -129,6 +128,7 @@ override_dh_auto_test: dh_testdir # Ensure at least an empty file exists touch mysql-test/unstable-tests + # Skip unstable tests if such are defined for arch [ ! -f debian/unstable-tests.$(DEB_HOST_ARCH) ] || cat debian/unstable-tests.$(DEB_HOST_ARCH) >> mysql-test/unstable-tests # Run testsuite ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/debian/salsa-ci.yml b/debian/salsa-ci.yml index 2392df2be4d3d..e30a96c41e21f 100644 --- a/debian/salsa-ci.yml +++ b/debian/salsa-ci.yml @@ -7,7 +7,7 @@ include: # Override Salsa-CI with MariaDB specific variations variables: DEB_BUILD_OPTIONS: "nocheck noautodbgsym" - RELEASE: sid + RELEASE: bullseye SALSA_CI_DISABLE_REPROTEST: 1 SALSA_CI_DISABLE_MISSING_BREAKS: 0 SALSA_CI_DISABLE_RC_BUGS: 1 @@ -25,8 +25,8 @@ stages: - provisioning - build - test - - upgrade in Sid - - upgrade from Bullseye + - upgrade in Bullseye + - upgrade from Buster - upgrade extras - test extras - publish # Stage referenced by Salsa-CI template aptly stanza, so must exist even though not used @@ -51,26 +51,26 @@ build: - ccache -s # Show ccache stats to validate it worked - mv ${CCACHE_TMP_DIR} ${CCACHE_WORK_DIR} -build bullseye-backports: - extends: .build-package - variables: - RELEASE: bullseye-backports - -# base image missing git build i386: + extends: .build-package-i386 + script: + - *autobake-deb-steps + +build sid: extends: .build-package script: - - apt-get update && apt-get install -y --no-install-recommends git - *autobake-deb-steps - image: $SALSA_CI_IMAGES_BASE_I386 variables: - ARCH: 'i386' + RELEASE: sid # Build native deb without using autobake-deb.sh. This way we will detect # if the debian/control file and other packaging is correct as-is for Debian Sid. -build native deb: +build native deb amd64: extends: .build-package +build native deb i386: + extends: .build-package-i386 + autopkgtest: extends: .test-autopkgtest artifacts: @@ -86,7 +86,7 @@ blhc: stage: test extras # Build log checker needs a .build file and thus only works on native build needs: - - job: build native deb + - job: build native deb amd64 # In addition to Salsa-CI, also run these fully MariaDB specific build jobs @@ -101,13 +101,7 @@ blhc: # Avoid the warnings of "debconf: unable to initialize frontend: Dialog" echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections # Prime the apt cache so later apt commands can run - apt-get update - -# Readline was removed from Debian Sid (and Bullseye) in Feb 2021. To be able to install older -# versions of MariaDB that depend on it, fetch and install it from Buster. -.test-install-readline-in-sid-for-backwards-compat: &test-install-readline-in-sid-for-backwards-compat | - curl -O http://ftp.de.debian.org/debian/pool/main/r/readline5/libreadline5_5.2+dfsg-3+b13_amd64.deb - apt install -y ./libreadline5_5.2+dfsg-3+b13_amd64.deb + apt-get update -qq .test-verify-initial: &test-verify-initial | dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed @@ -118,12 +112,23 @@ blhc: mysql --table -e "SELECT * FROM plugin;" mysql mysql --table -e "SHOW PLUGINS;" mysql -.test-enable-sid-repos: &test-enable-sid-repos | +.test-enable-bullseye-repos: &test-enable-bullseye-repos # Replace any old repos with just Sid - echo 'deb http://deb.debian.org/debian sid main' > /etc/apt/sources.list + - echo 'deb http://deb.debian.org/debian bullseye main' > /etc/apt/sources.list # Upgrade minimal stack first - apt-get update - apt-get install -y apt + - apt-get update -qq + - apt-get install -y apt + +.test-enable-bullseye-backports-repos: &test-enable-bullseye-backports-repos | + # Enable bullseye-backports (assumes environment already Debian Bullseye) + echo 'deb http://deb.debian.org/debian bullseye-backports main' > /etc/apt/sources.list.d/bullseye-backports.list + # Increase default backports priority policy from '100' to '500' so it can actually be used + cat << EOF > /etc/apt/preferences.d/enable-backports-to-satisfy-dependencies + Package: * + Pin: release n=bullseye-* + Pin-Priority: 500 + EOF + apt-get update -qq .test-enable-buster-backports-repos: &test-enable-buster-backports-repos | # Enable buster-backports (assumes environment already Debian Buster) @@ -134,14 +139,14 @@ blhc: Pin: release n=buster-* Pin-Priority: 500 EOF - apt-get update + apt-get update -qq -.test-install: &test-install | +.test-install: &test-install # Install MariaDB built in this commit - apt-get install -y ./*.deb + - apt-get install -y ./*.deb # Verify installation of MariaDB built in this commit - dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed - mariadb --version # Client version + - dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed + - mariadb --version # Client version .test-verify-final: &test-verify-final | mkdir -p debug # Ensure dir exists before using it @@ -223,8 +228,8 @@ fresh install: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -mariadb Sid upgrade: - stage: upgrade in Sid +mariadb-10.5 Bullseye upgrade: + stage: upgrade in Bullseye needs: - job: build image: debian:${RELEASE} @@ -235,6 +240,9 @@ mariadb Sid upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container + # Install everything MariaDB 10.5 currently in Debian Bullseye + - apt-get install -y 'default-mysql*' 'mariadb-*' 'libmariadb*' + - *test-verify-initial - *test-install - service mariadb status # There is no init.d/mysql in MariaDB 10.5+ - *test-verify-final @@ -244,11 +252,40 @@ mariadb Sid upgrade: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -mariadb-10.5 Bullseye to mariadb upgrade: - stage: upgrade from Bullseye +mariadb-10.3 with Buster backports upgrade: + stage: upgrade extras + needs: + - job: build buster-backports + image: debian:buster + artifacts: + when: always + name: "$CI_BUILD_NAME" + paths: + - ${WORKING_DIR}/debug + script: + - *test-prepare-container + # Install everything MariaDB 10.3 currently in Debian Buster + - apt-get install -y 'default-mysql*' 'mariadb-*' 'libmariadb*' + # Verify installation of MariaDB from Buster + - *test-verify-initial + # Buster backports is needed for liburing1 (>= 0.7) and galera-4 (>= 26.4) + - *test-enable-buster-backports-repos + - *test-install + # mariadb-10.3 in Buster ships a /etc/init.d/mysql so it should continue to work + - service mysql status + - service mariadb status + - *test-verify-final + variables: + GIT_STRATEGY: none + except: + variables: + - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ + +mariadb-10.3 Buster upgrade: + stage: upgrade from Buster needs: - job: build - image: debian:bullseye + image: debian:buster artifacts: when: always name: "$CI_BUILD_NAME" @@ -256,12 +293,14 @@ mariadb-10.5 Bullseye to mariadb upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container - # Install everything MariaDB currently in Debian Bullseye + # Install everything MariaDB 10.3 currently in Debian Buster - apt-get install -y 'default-mysql*' 'mariadb-*' 'libmariadb*' - # Verify installation of MariaDB from Bullseye + # Verify installation of MariaDB from Buster - *test-verify-initial - - *test-enable-sid-repos + - *test-enable-bullseye-repos - *test-install + # mariadb-10.3 in Buster ships a /etc/init.d/mysql so it should continue to work + - service mysql status - service mariadb status - *test-verify-final variables: @@ -374,41 +413,8 @@ build mariadbclient consumer Python-MySQLdb: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -libmysql* to libmariadb* upgrade: - stage: upgrade in Sid - needs: - - job: build - image: debian:${RELEASE} - artifacts: - when: always - name: "$CI_BUILD_NAME" - paths: - - ${WORKING_DIR}/debug - script: - - *test-prepare-container - # Install all libmysql* available in Debian unstable - - apt-get install -y pkg-config libmysqlclient-dev - - pkg-config --list-all - - pkg-config --cflags mysqlclient # mysqlclient.pc from original package - - apt-get install -y ./libmariadb3_*.deb ./mariadb-common_*.deb - - pkg-config --list-all - - apt-get install -y ./libmariadb-dev_*.deb - - pkg-config --list-all - - apt-get install -y ./libmariadb-dev-compat_*.deb - - pkg-config --cflags mysqlclient # mysqlclient.pc from compat package - - pkg-config --list-all - - apt-get install -y ./libmariadbd19_*.deb - - pkg-config --list-all - - apt-get install -y ./libmariadbd-dev_*.deb - - pkg-config --list-all - - apt-get install -y default-libmysqlclient-dev default-libmysqld-dev - - *test-verify-libs - except: - variables: - - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ - -default-libmysqlclient-dev Sid upgrade: - stage: upgrade in Sid +default-libmysqlclient-dev Bullseye upgrade: + stage: upgrade in Bullseye needs: - job: build image: debian:${RELEASE} @@ -427,11 +433,11 @@ default-libmysqlclient-dev Sid upgrade: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -default-libmysqlclient-dev Bullseye upgrade: - stage: upgrade from Bullseye +default-libmysqlclient-dev Buster upgrade: + stage: upgrade from Buster needs: - job: build - image: debian:bullseye + image: debian:buster artifacts: when: always name: "$CI_BUILD_NAME" @@ -441,7 +447,7 @@ default-libmysqlclient-dev Bullseye upgrade: - *test-prepare-container - apt-get install -y pkg-config default-libmysqlclient-dev - pkg-config --list-all - - *test-enable-sid-repos + - *test-enable-bullseye-repos - *test-install-all-libs - *test-verify-libs except: @@ -450,50 +456,14 @@ default-libmysqlclient-dev Bullseye upgrade: # Upgrading from MySQL 8.0 with datadir in place is not possible. Users need to do a data dump. # The Debian maintainer scripts detect this situation and simply moves old datadir aside and start fresh. -mysql-8.0 Sid to mariadb upgrade: - stage: upgrade in Sid - needs: - - job: build - image: debian:sid - artifacts: - when: always - name: "$CI_BUILD_NAME" - paths: - - ${WORKING_DIR}/debug - script: - - *test-prepare-container - # The postinst fails often if 'ps' is missing from system, so install procps - - apt-get install -y procps mysql-server 'libmysqlc*' - - *test-verify-initial - - *test-install - # The Debian version of MariaDB 10.6 still maintains compatibility and there - # running 'service mysql status' in Salsa-CI job 'mysql-8.0 Sid to - # mariadb-10.6 upgrade' still works. - # - # However, due to debian/control changes, the upstream MariaDB 10.6 when - # installed on a system with a previous installation of MySQL 8.0 will first - # fully remove MySQL, including the /etc/init.d/mysql file, so previous - # techniques in mariadb-server-10.6.postinst to maintain backwards - # compatibility with 'service mysql status' after installing MariaDB on top - # MySQL no longer works, and thus the step to test it now intentionally has - # a fallback to use the service name 'mariadb' instead, and the fallback is - # always used. - - sleep 5 # Give the mysql_upgrade a bit of time to complete before querying the server - - service mysql status || service mariadb status - - *test-verify-final - variables: - GIT_STRATEGY: none - except: - variables: - - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ - -# Upgrading from MySQL 8.0 with datadir in place is not possible. Users need to do a data dump. -# The Debian maintainer scripts detect this situation and simply moves old datadir aside and start fresh. -mysql-8.0 Jammy to mariadb upgrade: +# +# Testing on Focal binaries on Buster works. Using Jammy binaries on Bullseye +# does not work as libc in Jammy is too new. +mysql-8.0 from Ubuntu 22.04 upgrade: stage: upgrade extras needs: - - job: build bullseye-backports - image: debian:bullseye + - job: build + image: debian:${RELEASE} artifacts: when: always name: "$CI_BUILD_NAME" @@ -505,13 +475,11 @@ mysql-8.0 Jammy to mariadb upgrade: - apt-get install --no-install-recommends --yes gpg gpg-agent dirmngr ca-certificates # Bare minimal (<4MB) for apt-key to work - apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com:443 871920D1991BC93C 3B4FE6ACC0B21F32 - echo "deb http://archive.ubuntu.com/ubuntu/ focal main restricted" > /etc/apt/sources.list.d/ubuntu.list - - apt-get update + - apt-get update -qq # First install often fail due to bug in mysql-8.0 - apt-get install -y mysql-server 'libmysqlc*' || true - sleep 10 && apt-get install -f - *test-verify-initial - # Enable backports to make galera-4 available - - echo "deb http://deb.debian.org/debian bullseye-backports main" > /etc/apt/sources.list.d/backports.list && apt-get update - *test-install - service mysql status - sleep 5 # Give the mysql_upgrade a bit of time to complete before querying the server @@ -588,6 +556,43 @@ mariadb.org-10.8 to mariadb upgrade: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ allow_failure: true +# Upgrading from MySQL 8.0 with datadir in place is not possible. Users need to do a data dump. +# The Debian maintainer scripts detect this situation and simply moves old datadir aside and start fresh. +mysql-community-cluster-8.0 from MySQL.com upgrade: + stage: upgrade extras + needs: + - job: build + image: debian:${RELEASE} + artifacts: + when: always + name: "$CI_BUILD_NAME" + paths: + - ${WORKING_DIR}/debug + script: + - *test-prepare-container + - apt-get install --no-install-recommends --yes ca-certificates curl systemctl + - curl -sS "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x859be8d7c586f538430b19c2467b942d3a79bd29" -o /etc/apt/trusted.gpg.d/mysql.asc + - echo "deb https://repo.mysql.com/apt/debian/ bullseye mysql-cluster-8.0" > /etc/apt/sources.list.d/mysql.list + - apt-get update -qq + - apt-get install -y mysql-cluster-community-server + - sed 's/ExecStartPre=+/ExecStartPre=/' -i /lib/systemd/system/mysql.service # Hack to make file compatible with systemctl shim + - systemctl start mysql + - dpkg -l | grep -iE 'maria|mysql|galera' + - systemctl status mysql; mysql -e 'SELECT VERSION()' + - systemctl stop mysql # Stop manually as maintainer scripts don't handle this with systemctl shim + - *test-install + # Ignore systemctl shim result as MariaDB systemd file is incompatible with it and yields: + # ERROR:systemctl:the ExecStartPre control process exited with error code + - systemctl status mysql || true + - mysql -e 'SELECT VERSION()' || true + - sleep 5 # Give the mysql_upgrade a bit of time to complete before querying the server + - *test-verify-final + variables: + GIT_STRATEGY: none + except: + variables: + - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ + mariadb.org-10.7 to mariadb upgrade: stage: upgrade extras needs: @@ -633,10 +638,10 @@ mariadb.org-10.6 to mariadb upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container - - apt install -y curl + - apt-get -qq install --no-install-recommends --yes ca-certificates curl - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - echo "deb https://deb.mariadb.org/10.6/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - - apt-get update + - apt-get update -qq - apt-get install -y mariadb-server-10.6 - *test-verify-initial # Install MariaDB built in this commit @@ -665,16 +670,13 @@ mariadb.org-10.5 to mariadb upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container - - apt install -y curl + - apt-get -qq install --no-install-recommends --yes ca-certificates curl - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - echo "deb https://archive.mariadb.org/mariadb-10.5/repo/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - - apt-get update + - apt-get update -qq - apt-get install -y mariadb-server-10.5 - *test-verify-initial - # Install MariaDB built in this commit - # Verify installation of MariaDB built in this commit - - dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed - - mariadb --version # Client version + - *test-install - service mariadb status # There is no init.d/mysql in MariaDB 10.5+ - *test-verify-final variables: @@ -683,11 +685,11 @@ mariadb.org-10.5 to mariadb upgrade: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -mariadb.org-10.4 to mariadb upgrade: +mariadb.org-10.4 to mariadb with Buster backports upgrade: stage: upgrade extras needs: - - job: build - image: debian:${RELEASE} + - job: build buster-backports + image: debian:buster artifacts: when: always name: "$CI_BUILD_NAME" @@ -695,16 +697,17 @@ mariadb.org-10.4 to mariadb upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container - - apt install -y curl systemctl # systemctl shim needed on platforms that don't have systemd + - apt-get -qq install --no-install-recommends --yes ca-certificates curl - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - - echo "deb https://archive.mariadb.org/mariadb-10.4/repo/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - - apt-get update - - *test-install-readline-in-sid-for-backwards-compat + - echo "deb https://archive.mariadb.org/mariadb-10.4/repo/debian buster main" > /etc/apt/sources.list.d/mariadb.list + - apt-get update -qq - apt-get install -y mariadb-server-10.4 # MariaDB.org version of 10.4 and early 10.5 do not install an init file, so # it must be installed here manually - cp /usr/share/mysql/mysql.init /etc/init.d/mysql; chmod +x /etc/init.d/mysql; service mysql start; sleep 5 - *test-verify-initial + # Buster backports is needed for liburing1 (>= 0.7) and galera-4 (>= 26.4) + - *test-enable-buster-backports-repos - *test-install - sleep 5 # Give the mysql_upgrade a bit of time to complete before querying the server - service mysql status @@ -716,11 +719,11 @@ mariadb.org-10.4 to mariadb upgrade: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -mariadb.org-10.3 to mariadb upgrade: +mariadb.org-10.3 to mariadb with Buster backports upgrade: stage: upgrade extras needs: - - job: build - image: debian:${RELEASE} + - job: build bullseye-backports + image: debian:bullseye artifacts: when: always name: "$CI_BUILD_NAME" @@ -728,13 +731,14 @@ mariadb.org-10.3 to mariadb upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container - - apt install -y curl + - apt-get -qq install --no-install-recommends --yes ca-certificates curl - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - - echo "deb https://archive.mariadb.org/mariadb-10.3/repo/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - - apt-get update - - *test-install-readline-in-sid-for-backwards-compat + - echo "deb https://archive.mariadb.org/mariadb-10.3/repo/debian buster main" > /etc/apt/sources.list.d/mariadb.list + - apt-get update -qq - apt-get install -y mariadb-server-10.3 - *test-verify-initial + # Buster backports is needed for liburing1 (>= 0.7) and galera-4 (>= 26.4) + - *test-enable-buster-backports-repos - *test-install - service mysql status # Give the mariadb-upgrade plenty of time to complete, otherwise next commands @@ -747,7 +751,10 @@ mariadb.org-10.3 to mariadb upgrade: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -mariadb.org-10.2 to mariadb upgrade: +# archive.mariadb.org has for 10.2 only Stretch, so we can't test upgrades to +# 10.6 with only Buster and Bullseye builds + +mysql.com-5.7 upgrade: stage: upgrade extras needs: - job: build @@ -759,26 +766,25 @@ mariadb.org-10.2 to mariadb upgrade: - ${WORKING_DIR}/debug script: - *test-prepare-container - - apt install -y curl - - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - - echo "deb https://archive.mariadb.org/mariadb-10.2/repo/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - - apt-get update - - *test-install-readline-in-sid-for-backwards-compat - - apt-get install -y mariadb-server-10.2 - # Verify initial state before upgrade - - dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed - - service mysql status - # prepending with --defaults-file=/etc/mysql/debian.cnf is needed in upstream 5.5–10.3 - | - mysql --defaults-file=/etc/mysql/debian.cnf --skip-column-names -e "SELECT @@version, @@version_comment" - mysql --defaults-file=/etc/mysql/debian.cnf --table -e "SHOW DATABASES;" - mysql --defaults-file=/etc/mysql/debian.cnf --table -e "SELECT * FROM mysql.user; SHOW CREATE USER root@localhost;" - mysql --defaults-file=/etc/mysql/debian.cnf --table -e "SELECT * FROM mysql.plugin; SHOW PLUGINS;" + apt-get install --no-install-recommends --yes gpg gpg-agent dirmngr ca-certificates # Bare minimal (<4MB) for apt-key to work + apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com:443 467B942D3A79BD29 + echo "deb https://repo.mysql.com/apt/debian/ bullseye mysql-5.7" > /etc/apt/sources.list.d/mysql.list + apt-get update -qq + apt-get install -y 'mysql*' 'libmysqlc*' + - *test-verify-initial - *test-install - - service mysql status - # Give the mariadb-upgrade plenty of time to complete, otherwise next commands - # fail on non-existing mariadb.sys user - - sleep 15 + # Due to some (currently unknown) changes in MySQL 5.7 packaging or apt + # behaviour changes, a system with a previous installation of MySQL will + # on upgrades to MariaDB first fully remove MySQL, including the + # /etc/init.d/mysql file, so previous techniques in + # mariadb-server-10.6.postinst to maintain backwards compatibility with + # 'service mysql status' after installing MariaDB on top MySQL no longer + # works. Thus the step to test it now intentionally has a fallback to use + # the service name 'mariadb' instead, and the fallback is always used. + - sleep 5 # Give the mysql_upgrade a bit of time to complete before querying the server + - service mariadb status # There is no init.d/mysql in MariaDB 10.5+ + - sleep 15 # Give the mysql_upgrade a bit of extra time to complete with MySQL 5.7 before querying the server - *test-verify-final variables: GIT_STRATEGY: none @@ -786,11 +792,11 @@ mariadb.org-10.2 to mariadb upgrade: variables: - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ -mysql.com-5.7 to mariadb upgrade: +percona-xtradb-5.7 upgrade: stage: upgrade extras needs: - - job: build bullseye-backports - image: debian:bullseye + - job: build + image: debian:${RELEASE} artifacts: when: always name: "$CI_BUILD_NAME" @@ -800,15 +806,14 @@ mysql.com-5.7 to mariadb upgrade: - *test-prepare-container - | apt-get install --no-install-recommends --yes gpg gpg-agent dirmngr ca-certificates # Bare minimal (<4MB) for apt-key to work - apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com:443 467B942D3A79BD29 - echo "deb https://repo.mysql.com/apt/debian/ bullseye mysql-5.7" > /etc/apt/sources.list.d/mysql.list - apt-get update - apt-get install -y 'mysql*' 'libmysqlc*' + apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com:443 9334A25F8507EFA5 + echo "deb https://repo.percona.com/apt/ ${RELEASE} main" > /etc/apt/sources.list.d/mysql.list + apt-get update -qq + apt-get install -y percona-xtradb-cluster-full-57 percona-xtrabackup-24 percona-toolkit pmm2-client + - service mysql status - *test-verify-initial - # Enable backports to make galera-4 available - - echo "deb http://deb.debian.org/debian bullseye-backports main" >> /etc/apt/sources.list.d/backports.list && apt-get update - *test-install - - service mariadb status # There is no init.d/mysql in MariaDB 10.5+ + - service mysql status - sleep 15 # Give the mysql_upgrade a bit of extra time to complete with MySQL 5.7 before querying the server - *test-verify-final variables: diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index c3b909fc9b981..093cffd6df709 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -1759,6 +1759,20 @@ apply_log_finish() return(true); } +class Copy_back_dst_dir +{ + std::string buf; + +public: + const char *make(const char *path) + { + if (!path || !path[0]) + return mysql_data_home; + if (is_absolute_path(path)) + return path; + return buf.assign(mysql_data_home).append(path).c_str(); + } +}; bool copy_back() @@ -1766,7 +1780,7 @@ copy_back() bool ret = false; datadir_iter_t *it = NULL; datadir_node_t node; - char *dst_dir; + const char *dst_dir; memset(&node, 0, sizeof(node)); @@ -1830,9 +1844,9 @@ copy_back() /* copy undo tablespaces */ + Copy_back_dst_dir dst_dir_buf; - dst_dir = (srv_undo_dir && *srv_undo_dir) - ? srv_undo_dir : mysql_data_home; + dst_dir = dst_dir_buf.make(srv_undo_dir); ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); @@ -1853,8 +1867,7 @@ copy_back() /* copy redo logs */ - dst_dir = (srv_log_group_home_dir && *srv_log_group_home_dir) - ? srv_log_group_home_dir : mysql_data_home; + dst_dir = dst_dir_buf.make(srv_log_group_home_dir); /* --backup generates a single ib_logfile0, which we must copy. */ @@ -1867,8 +1880,7 @@ copy_back() /* copy innodb system tablespace(s) */ - dst_dir = (innobase_data_home_dir && *innobase_data_home_dir) - ? innobase_data_home_dir : mysql_data_home; + dst_dir = dst_dir_buf.make(innobase_data_home_dir); ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); diff --git a/extra/mariabackup/xbcloud.cc b/extra/mariabackup/xbcloud.cc index cee76e5f3d7a7..588a15eb791dd 100644 --- a/extra/mariabackup/xbcloud.cc +++ b/extra/mariabackup/xbcloud.cc @@ -2534,7 +2534,7 @@ swift_keystone_auth_v3(const char *auth_url, swift_auth_info *info) } else if (opt_swift_project != NULL) { snprintf(scope, sizeof(scope), ",\"scope\":{\"project\":{\"name\":\"%s\"%s}}", - opt_swift_project_id, domain); + opt_swift_project, domain); } snprintf(payload, sizeof(payload), "{\"auth\":{\"identity\":" diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 1d936b3d544d8..77bd0a081d49e 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -1106,7 +1106,8 @@ struct my_option xb_client_options[]= { (G_PTR *) &xtrabackup_print_param, (G_PTR *) &xtrabackup_print_param, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"use-memory", OPT_XTRA_USE_MEMORY, - "The value is used instead of buffer_pool_size", + "The value is used in place of innodb_buffer_pool_size. " + "This option is only relevant when the --prepare option is specified.", (G_PTR *) &xtrabackup_use_memory, (G_PTR *) &xtrabackup_use_memory, 0, GET_LL, REQUIRED_ARG, 100 * 1024 * 1024L, 1024 * 1024L, LONGLONG_MAX, 0, 1024 * 1024L, 0}, @@ -1843,6 +1844,12 @@ static void print_version(void) my_progname, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); } +static void concatenate_default_groups(std::vector &backup_load_groups, const char **default_groups) +{ + for ( ; *default_groups ; default_groups++) + backup_load_groups.push_back(*default_groups); +} + static void usage(void) { puts("Open source backup tool for InnoDB and XtraDB\n\ @@ -1863,7 +1870,11 @@ GNU General Public License for more details.\n\ You can download full text of the license on http://www.gnu.org/licenses/gpl-2.0.txt\n"); printf("Usage: %s [--defaults-file=#] [--backup | --prepare | --copy-back | --move-back] [OPTIONS]\n",my_progname); - print_defaults("my", load_default_groups); + std::vector backup_load_default_groups; + concatenate_default_groups(backup_load_default_groups, backup_default_groups); + concatenate_default_groups(backup_load_default_groups, load_default_groups); + backup_load_default_groups.push_back(nullptr); + print_defaults("my", &backup_load_default_groups[0]); my_print_help(xb_client_options); my_print_help(xb_server_options); my_print_variables(xb_server_options); @@ -2299,7 +2310,11 @@ static bool innodb_init() ut_ad(recv_no_log_write); buf_flush_sync(); recv_sys.debug_free(); - DBUG_ASSERT(!buf_pool.any_io_pending()); + ut_ad(!os_aio_pending_reads()); + ut_ad(!os_aio_pending_writes()); + ut_d(mysql_mutex_lock(&buf_pool.flush_list_mutex)); + ut_ad(!buf_pool.get_oldest_modification(0)); + ut_d(mysql_mutex_unlock(&buf_pool.flush_list_mutex)); log_sys.close_file(); if (xtrabackup_incremental) @@ -6740,6 +6755,7 @@ int main(int argc, char **argv) */ if (strcmp(argv[1], "--mysqld") == 0) { + srv_operation= SRV_OPERATION_EXPORT_RESTORED; extern int mysqld_main(int argc, char **argv); argc--; argv++; diff --git a/include/m_ctype.h b/include/m_ctype.h index 708107ff120da..2f1767433ad18 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -391,6 +391,28 @@ typedef enum enum_collation_name_mode #define MY_STRXFRM_REVERSE_LEVEL6 0x00200000 /* if reverse order for level6 */ #define MY_STRXFRM_REVERSE_SHIFT 16 +/* Flags to strnncollsp_nchars */ +/* + MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES - + defines if inside strnncollsp_nchars() + short strings should be virtually extended to "nchars" + characters by emulating trimmed trailing spaces. + + This flag is needed when comparing packed strings of the CHAR + data type, when trailing spaces are trimmed on storage (like in InnoDB), + however the actual values (after unpacking) will have those trailing + spaces. + + If this flag is passed, strnncollsp_nchars() performs both + truncating longer strings and extending shorter strings + to exactly "nchars". + + If this flag is not passed, strnncollsp_nchars() only truncates longer + strings to "nchars", but does not extend shorter strings to "nchars". +*/ +#define MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES 1 + + /* Collation IDs for MariaDB that should not conflict with MySQL. We reserve 256..511, because MySQL will most likely use this range @@ -526,7 +548,8 @@ struct my_collation_handler_st int (*strnncollsp_nchars)(CHARSET_INFO *, const uchar *str1, size_t len1, const uchar *str2, size_t len2, - size_t nchars); + size_t nchars, + uint flags); size_t (*strnxfrm)(CHARSET_INFO *, uchar *dst, size_t dstlen, uint nweights, const uchar *src, size_t srclen, uint flags); diff --git a/include/m_string.h b/include/m_string.h index 9e35be404ec85..046dc39d13ab5 100644 --- a/include/m_string.h +++ b/include/m_string.h @@ -199,9 +199,22 @@ extern ulonglong strtoull(const char *str, char **ptr, int base); #include -#define STRING_WITH_LEN(X) (X), ((size_t) (sizeof(X) - 1)) -#define USTRING_WITH_LEN(X) ((uchar*) X), ((size_t) (sizeof(X) - 1)) -#define C_STRING_WITH_LEN(X) ((char *) (X)), ((size_t) (sizeof(X) - 1)) +#ifdef __cplusplus +#include +template inline const char *_swl_check(T s) +{ + static_assert(std::is_same::value + || std::is_same::value, + "Wrong argument for STRING_WITH_LEN()"); + return s; +} +#define STRING_WITH_LEN(X) _swl_check(X), ((size_t) (sizeof(X) - 1)) +#else +#define STRING_WITH_LEN(X) (X ""), ((size_t) (sizeof(X) - 1)) +#endif + +#define USTRING_WITH_LEN(X) (uchar*) STRING_WITH_LEN(X) +#define C_STRING_WITH_LEN(X) (char *) STRING_WITH_LEN(X) #define LEX_STRING_WITH_LEN(X) (X).str, (X).length typedef struct st_mysql_const_lex_string LEX_CSTRING; diff --git a/include/mysql/service_encryption.h b/include/mysql/service_encryption.h index 280b9c69e3562..4963940758c53 100644 --- a/include/mysql/service_encryption.h +++ b/include/mysql/service_encryption.h @@ -30,8 +30,6 @@ #ifndef __cplusplus #define inline __inline #endif -#else -#include #endif #endif diff --git a/include/ssl_compat.h b/include/ssl_compat.h index 664f3aac87ca5..3678e5fa08481 100644 --- a/include/ssl_compat.h +++ b/include/ssl_compat.h @@ -19,7 +19,8 @@ /* OpenSSL version specific definitions */ #if defined(OPENSSL_VERSION_NUMBER) -#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x10100000L && \ + !(defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x30500000L) #define HAVE_OPENSSL11 1 #define SSL_LIBRARY OpenSSL_version(OPENSSL_VERSION) #define ERR_remove_state(X) ERR_clear_error() diff --git a/libmysqld/libmysql.c b/libmysqld/libmysql.c index 6c7e0b3b8124b..7f55b0c2726b9 100644 --- a/libmysqld/libmysql.c +++ b/libmysqld/libmysql.c @@ -2953,7 +2953,8 @@ my_bool STDCALL mysql_stmt_bind_param(MYSQL_STMT *stmt, MYSQL_BIND *my_bind) break; default: strmov(stmt->sqlstate, unknown_sqlstate); - sprintf(stmt->last_error, + snprintf(stmt->last_error, + sizeof(stmt->last_error), ER(stmt->last_errno= CR_UNSUPPORTED_PARAM_TYPE), param->buffer_type, count); DBUG_RETURN(1); @@ -3040,7 +3041,9 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, { /* Long data handling should be used only for string/binary types */ strmov(stmt->sqlstate, unknown_sqlstate); - sprintf(stmt->last_error, ER(stmt->last_errno= CR_INVALID_BUFFER_USE), + snprintf(stmt->last_error, + sizeof(stmt->last_error), + ER(stmt->last_errno= CR_INVALID_BUFFER_USE), param->param_number); DBUG_RETURN(1); } @@ -4171,7 +4174,8 @@ my_bool STDCALL mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *my_bind) if (setup_one_fetch_function(param, field)) { strmov(stmt->sqlstate, unknown_sqlstate); - sprintf(stmt->last_error, + snprintf(stmt->last_error, + sizeof(stmt->last_error), ER(stmt->last_errno= CR_UNSUPPORTED_PARAM_TYPE), field->type, param_count); DBUG_RETURN(1); diff --git a/man/mysql_upgrade.1 b/man/mysql_upgrade.1 index c4041f281a519..198efcac7473a 100644 --- a/man/mysql_upgrade.1 +++ b/man/mysql_upgrade.1 @@ -656,7 +656,8 @@ The MariaDB user name to use when connecting to the server and not using the cur Display more output about the process\&. Using it twice will print connection arguments; using it 3 times will print out all CHECK, RENAME and ALTER TABLE commands used during the check phase; using it 4 times (added in MariaDB 10.0.14) -will also write out all mysqlcheck commands used\&. +will also write out all mariadb-check commands used; using it 5 times will print all +the mariadb commands used and their results while running mysql_fix_privilege_tables script\&. .RE .sp .RS 4 diff --git a/mysql-test/include/analyze-format.inc b/mysql-test/include/analyze-format.inc index 7d1c48f3e6ffd..4b24f3cc6f4a5 100644 --- a/mysql-test/include/analyze-format.inc +++ b/mysql-test/include/analyze-format.inc @@ -1,3 +1,3 @@ # The time on ANALYSE FORMAT=JSON is rather variable ---replace_regex /("(r_total_time_ms|r_table_time_ms|r_other_time_ms|r_buffer_size|r_filling_time_ms|r_query_time_in_progress_ms)": )[^, \n]*/\1"REPLACED"/ +--replace_regex /("(r_[a-z_]*_time(_in_progress)?_ms|r_buffer_size)": )[^, \n]*/\1"REPLACED"/ diff --git a/mysql-test/include/ctype_nopad_prefix_unique.inc b/mysql-test/include/ctype_nopad_prefix_unique.inc new file mode 100644 index 0000000000000..b25128e9fef17 --- /dev/null +++ b/mysql-test/include/ctype_nopad_prefix_unique.inc @@ -0,0 +1,85 @@ +--echo # +--echo # MDEV-30034 UNIQUE USING HASH accepts duplicate entries for tricky collations +--echo # + +# TEXT + +if (`SELECT UPPER(@@storage_engine) != 'MEMORY'`) +{ +EXECUTE IMMEDIATE REPLACE( + 'CREATE TABLE t1 ( ' + ' a TEXT COLLATE ,' + 'UNIQUE(a(3)))', + '', @@collation_connection); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('ss '); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; + + +EXECUTE IMMEDIATE REPLACE( + 'CREATE TABLE t1 ( ' + ' a TEXT COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', + '', @@collation_connection); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('ss '); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +} + + +# VARCHAR + +EXECUTE IMMEDIATE REPLACE( + 'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)))', + '', @@collation_connection); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('ss '); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; + + +EXECUTE IMMEDIATE REPLACE( + 'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', + '', @@collation_connection); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('ss '); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; + +# CHAR + +# MyISAM is buggy on CHAR+BTREE+UNIQUE+PREFIX (see MDEV-30048), disable for now +# Other engines work fine + +if (`SELECT UPPER(@@storage_engine) != 'MYISAM'`) +{ +EXECUTE IMMEDIATE REPLACE( + 'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)))', + '', @@collation_connection); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +} + +EXECUTE IMMEDIATE REPLACE( + 'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', + '', @@collation_connection); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; diff --git a/mysql-test/include/expect_crash.inc b/mysql-test/include/expect_crash.inc index af8b090810415..b4bd9828a089f 100644 --- a/mysql-test/include/expect_crash.inc +++ b/mysql-test/include/expect_crash.inc @@ -1,5 +1,5 @@ ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect # There should be a debug crash after using this .inc file --exec echo "wait" > $_expect_file_name diff --git a/mysql-test/include/explain_non_select.inc b/mysql-test/include/explain_non_select.inc index d22310c98133c..bd0962d387640 100644 --- a/mysql-test/include/explain_non_select.inc +++ b/mysql-test/include/explain_non_select.inc @@ -788,6 +788,22 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5); DROP TABLE t1; +--echo #75 + +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +--let $query = INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +--source include/explain_utils.inc +DROP TABLE t1; + +--echo #76 + +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +CREATE TABLE t2 (a INT, b INT); +INSERT INTO t2 VALUES (1,10), (3,10), (7,11), (3,11); +--let $query = INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +--source include/explain_utils.inc +DROP TABLE t1,t2; + --echo # --echo # Bug #12949629: CLIENT LOSES CONNECTION AFTER EXECUTING A PROCEDURE WITH --echo # EXPLAIN UPDATE/DEL/INS diff --git a/mysql-test/include/kill_galera.inc b/mysql-test/include/kill_galera.inc index d7f665df6c750..aba672d8a8928 100644 --- a/mysql-test/include/kill_galera.inc +++ b/mysql-test/include/kill_galera.inc @@ -1,8 +1,8 @@ --echo Killing server ... # Write file to make mysql-test-run.pl expect the crash, but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Kill the connected server diff --git a/mysql-test/include/kill_mysqld.inc b/mysql-test/include/kill_mysqld.inc index 86ee048a0f18c..01ee7f82bdc13 100644 --- a/mysql-test/include/kill_mysqld.inc +++ b/mysql-test/include/kill_mysqld.inc @@ -1,5 +1,5 @@ ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --echo # Kill the server --exec echo "wait" > $_expect_file_name diff --git a/mysql-test/include/shutdown_mysqld.inc b/mysql-test/include/shutdown_mysqld.inc index db0cfb82c68a9..fc2972560c37a 100644 --- a/mysql-test/include/shutdown_mysqld.inc +++ b/mysql-test/include/shutdown_mysqld.inc @@ -22,8 +22,8 @@ if ($rpl_inited) } # Write file to make mysql-test-run.pl expect the "crash", but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Avoid warnings from connection threads that does not have time to exit diff --git a/mysql-test/main/analyze_format_json.result b/mysql-test/main/analyze_format_json.result index 6c9b8ce298293..619e00afb3edc 100644 --- a/mysql-test/main/analyze_format_json.result +++ b/mysql-test/main/analyze_format_json.result @@ -171,7 +171,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "1Kb", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 20, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 60 } } ] @@ -218,7 +222,11 @@ ANALYZE "buffer_size": "1Kb", "join_type": "BNL", "attached_condition": "tbl1.c > tbl2.c", - "r_filtered": 15.83333333 + "r_loops": 20, + "r_filtered": 15.83333333, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 60 } } ] @@ -724,7 +732,11 @@ ANALYZE "buffer_size": "65", "join_type": "BNL", "attached_condition": "(t2.b,t2.b in (subquery#2))", - "r_filtered": null + "r_loops": 2, + "r_filtered": null, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 0 } } ], @@ -824,7 +836,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "1", "join_type": "BNL", - "r_filtered": null + "r_loops": 2, + "r_filtered": null, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 0 } } ], @@ -862,7 +878,11 @@ ANALYZE "buffer_size": "65", "join_type": "BNL", "attached_condition": "t2.f2 = t3.f3", - "r_filtered": null + "r_loops": 0, + "r_filtered": null, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": null } } ] @@ -947,3 +967,139 @@ ANALYZE } } drop table t0,t1,t2; +# +# MDEV-30806: ANALYZE FORMAT=JSON: better support for BNL and BNL-H joins +# +create table t10 ( +a int, +b int +); +insert into t10 select seq, seq/3 from seq_0_to_999; +create table t11 ( +a int, +b int +); +insert into t11 select seq, seq/5 from seq_0_to_999; +analyze table t10,t11 persistent for all; +Table Op Msg_type Msg_text +test.t10 analyze status Engine-independent statistics collected +test.t10 analyze status OK +test.t11 analyze status Engine-independent statistics collected +test.t11 analyze status OK +analyze format=json +select * from t10, t11 +where +t10.a < 700 and +t11.a < 100 +and t10.b=t11.b; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t11", + "access_type": "ALL", + "r_loops": 1, + "rows": 1000, + "r_rows": 1000, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 10, + "r_filtered": 10, + "attached_condition": "t11.a < 100" + } + }, + { + "block-nl-join": { + "table": { + "table_name": "t10", + "access_type": "ALL", + "r_loops": 1, + "rows": 1000, + "r_rows": 1000, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 70, + "r_filtered": 70, + "attached_condition": "t10.a < 700" + }, + "buffer_type": "flat", + "buffer_size": "1Kb", + "join_type": "BNL", + "attached_condition": "t10.b = t11.b", + "r_loops": 100, + "r_filtered": 0.424285714, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 700 + } + } + ] + } +} +set @tmp=@@join_cache_level, join_cache_level=6; +analyze format=json +select * from t10, t11 +where +t10.a < 700 and +t11.a < 100 +and t10.b=t11.b; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t11", + "access_type": "ALL", + "r_loops": 1, + "rows": 1000, + "r_rows": 1000, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 10, + "r_filtered": 10, + "attached_condition": "t11.a < 100 and t11.b is not null" + } + }, + { + "block-nl-join": { + "table": { + "table_name": "t10", + "access_type": "hash_ALL", + "key": "#hash#$hj", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["test.t11.b"], + "r_loops": 1, + "rows": 1000, + "r_rows": 1000, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 70, + "r_filtered": 70, + "attached_condition": "t10.a < 700" + }, + "buffer_type": "flat", + "buffer_size": "3Kb", + "join_type": "BNLH", + "attached_condition": "t10.b = t11.b", + "r_loops": 100, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 2.97 + } + } + ] + } +} +set join_cache_level=@tmp; +drop table t10, t11; diff --git a/mysql-test/main/analyze_format_json.test b/mysql-test/main/analyze_format_json.test index 3f3324e9eec52..84f44869afe68 100644 --- a/mysql-test/main/analyze_format_json.test +++ b/mysql-test/main/analyze_format_json.test @@ -226,3 +226,45 @@ create table t2 as select * from t1; --source include/analyze-format.inc analyze format=json select a, (select t2.b from t2 where t2.a 0; +cast(json_extract(@out,'$[0]') as DOUBLE) > 0 +1 +drop table t1,t2; +# +# Now, check the hashed, BNL-H join +# +create table t1 ( +a int, +b int +); +insert into t1 select seq, seq/3 from seq_0_to_499; +create table t2 ( +a int, +b int +); +insert into t2 select seq, seq/5 from seq_0_to_499; +set @tmp=@@join_cache_level, join_cache_level=6; +select '$out' as X; +X +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "r_loops": 1, + "rows": 500, + "r_rows": 500, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 100, + "r_filtered": 100, + "attached_condition": "t1.a < 700 and t1.b is not null" + } + }, + { + "block-nl-join": { + "table": { + "table_name": "t2", + "access_type": "hash_ALL", + "key": "#hash#$hj", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["test.t1.b"], + "r_loops": 1, + "rows": 500, + "r_rows": 500, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 100, + "r_filtered": 20, + "attached_condition": "t2.a < 100" + }, + "buffer_type": "flat", + "buffer_size": "18Kb", + "join_type": "BNLH", + "attached_condition": "t2.b = t1.b", + "r_loops": 500, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 0.594 + } + } + ] + } +} +set @out=(select json_extract(@js,'$**.block-nl-join.r_unpack_time_ms')); +select cast(json_extract(@out,'$[0]') as DOUBLE) > 0; +cast(json_extract(@out,'$[0]') as DOUBLE) > 0 +1 +set join_cache_level=@tmp; +drop table t1, t2; +# +# MDEV-30972: ANALYZE FORMAT=JSON: some time is unaccounted-for in BNL-H join +# +create table t1 ( +a int, +col1 varchar(100), +col2 varchar(100), +col3 varchar(100) +); +insert into t1 select +seq/100, +concat('col1-', seq), +concat('col1-', seq), +concat('col1-', seq) +from seq_1_to_1000; +create table t2 ( +a int, +col1 varchar(100), +col2 varchar(100), +col3 varchar(100) +); +insert into t2 select +seq/100, +concat('col1-', seq), +concat('col1-', seq), +concat('col1-', seq) +from seq_1_to_2000; +set @tmp=@@join_cache_level, join_cache_level=6; +select '$out' as X; +X +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "r_loops": 1, + "rows": 1000, + "r_rows": 1000, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 100, + "r_filtered": 100, + "attached_condition": "t1.a is not null" + } + }, + { + "block-nl-join": { + "table": { + "table_name": "t2", + "access_type": "hash_ALL", + "key": "#hash#$hj", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t1.a"], + "r_loops": 1, + "rows": 2000, + "r_rows": 2000, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 100, + "r_filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNLH", + "attached_condition": "t2.a = t1.a and concat(t1.col1,t1.col2,t1.col3) = concat(t2.col1,t2.col2,t2.col3)", + "r_loops": 1000, + "r_filtered": 1.025630506, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 97.501 + } + } + ] + } +} +set @out=(select json_extract(@js,'$**.block-nl-join.r_other_time_ms')); +select cast(json_extract(@out,'$[0]') as DOUBLE) > 0; +cast(json_extract(@out,'$[0]') as DOUBLE) > 0 +1 +set join_cache_level=@tmp; +drop table t1, t2; diff --git a/mysql-test/main/analyze_format_json_timings.test b/mysql-test/main/analyze_format_json_timings.test new file mode 100644 index 0000000000000..fe02c31838893 --- /dev/null +++ b/mysql-test/main/analyze_format_json_timings.test @@ -0,0 +1,127 @@ +# +# Tests to check that r_something_time_ms is non-zero in +# ANALYZE FORMAT=JSON +# +--source include/default_optimizer_switch.inc +--source include/have_sequence.inc + +# The tests here are large so that we get non-zero timings +--source include/big_test.inc + +--echo # +--echo # MDEV-30830: ANALYZE FORMAT=JSON: r_unpack_time_ms is empty for the hashed joins +--echo # + +--echo # +--echo # First, check a regular BNL-join +--echo # +create table t1 ( + a int, + b int +); +insert into t1 select seq, seq/3 from seq_0_to_99; + +create table t2 ( + a int, + b int +); +insert into t2 select seq, seq/5 from seq_0_to_99; + +let $out=` +analyze format=json +select * from t1, t2 +where + t1.a < 700 and + t2.a < 100 + and t1.b=t2.b +`; + +evalp set @js='$out'; +set @out=(select json_extract(@js,'$**.block-nl-join.r_unpack_time_ms')); +select cast(json_extract(@out,'$[0]') as DOUBLE) > 0; +drop table t1,t2; + +--echo # +--echo # Now, check the hashed, BNL-H join +--echo # +create table t1 ( + a int, + b int +); +insert into t1 select seq, seq/3 from seq_0_to_499; + +create table t2 ( + a int, + b int +); +insert into t2 select seq, seq/5 from seq_0_to_499; +set @tmp=@@join_cache_level, join_cache_level=6; + +let $out=` +analyze format=json +select * from t1, t2 +where + t1.a < 700 and + t2.a < 100 + and t1.b=t2.b +`; + +--source include/analyze-format.inc +evalp select '$out' as X; + +set @out=(select json_extract(@js,'$**.block-nl-join.r_unpack_time_ms')); +select cast(json_extract(@out,'$[0]') as DOUBLE) > 0; + +set join_cache_level=@tmp; +drop table t1, t2; + +--echo # +--echo # MDEV-30972: ANALYZE FORMAT=JSON: some time is unaccounted-for in BNL-H join +--echo # + +create table t1 ( + a int, + col1 varchar(100), + col2 varchar(100), + col3 varchar(100) +); + +insert into t1 select + seq/100, + concat('col1-', seq), + concat('col1-', seq), + concat('col1-', seq) +from seq_1_to_1000; + +create table t2 ( + a int, + col1 varchar(100), + col2 varchar(100), + col3 varchar(100) +); + +insert into t2 select + seq/100, + concat('col1-', seq), + concat('col1-', seq), + concat('col1-', seq) +from seq_1_to_2000; + +set @tmp=@@join_cache_level, join_cache_level=6; + +let $out=` +analyze format=json +select * from t1, t2 +where + t1.a=t2.a + and concat(t1.col1, t1.col2, t1.col3)= concat(t2.col1, t2.col2, t2.col3) +`; +--source include/analyze-format.inc +evalp select '$out' as X; + +set @out=(select json_extract(@js,'$**.block-nl-join.r_other_time_ms')); +select cast(json_extract(@out,'$[0]') as DOUBLE) > 0; + +set join_cache_level=@tmp; +drop table t1, t2; + diff --git a/mysql-test/main/analyze_stmt_orderby.result b/mysql-test/main/analyze_stmt_orderby.result index d4d0fe6b25b77..65be50bd0f6a8 100644 --- a/mysql-test/main/analyze_stmt_orderby.result +++ b/mysql-test/main/analyze_stmt_orderby.result @@ -473,7 +473,11 @@ ANALYZE "buffer_size": "65", "join_type": "BNL", "attached_condition": "t3.a = t0.a", - "r_filtered": 10 + "r_loops": 10, + "r_filtered": 10, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 10 } } ] @@ -554,7 +558,11 @@ ANALYZE "buffer_size": "119", "join_type": "BNL", "attached_condition": "t5.a = t6.a", - "r_filtered": 21.42857143 + "r_loops": 4, + "r_filtered": 21.42857143, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 7 } } ] diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result index ce0d695797bb2..eb1220f9a5612 100644 --- a/mysql-test/main/ctype_ucs.result +++ b/mysql-test/main/ctype_ucs.result @@ -6442,6 +6442,23 @@ IS_IPV4('10.0.0.1') 1 SET NAMES utf8; # +# MDEV-30746 Regression in ucs2_general_mysql500_ci +# +SET NAMES utf8mb3; +CREATE TABLE t1 (a VARCHAR(32) CHARACTER SET ucs2 COLLATE ucs2_general_mysql500_ci); +INSERT INTO t1 VALUES ('s'),('z'),(_latin1 0xDF); +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +DROP TABLE t1; +# # End of 10.4 tests # # diff --git a/mysql-test/main/ctype_ucs.test b/mysql-test/main/ctype_ucs.test index 20e2ea3908d00..b77636507dd6f 100644 --- a/mysql-test/main/ctype_ucs.test +++ b/mysql-test/main/ctype_ucs.test @@ -1140,6 +1140,18 @@ SELECT IS_IPV6('::'); SELECT IS_IPV4('10.0.0.1'); SET NAMES utf8; +--echo # +--echo # MDEV-30746 Regression in ucs2_general_mysql500_ci +--echo # + +SET NAMES utf8mb3; +CREATE TABLE t1 (a VARCHAR(32) CHARACTER SET ucs2 COLLATE ucs2_general_mysql500_ci); +INSERT INTO t1 VALUES ('s'),('z'),(_latin1 0xDF); +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +DROP TABLE t1; + + --echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/ctype_ucs2_general_mysql500_ci_ws.result b/mysql-test/main/ctype_ucs2_general_mysql500_ci_ws.result index 3679169122e10..40ba714560f0c 100644 --- a/mysql-test/main/ctype_ucs2_general_mysql500_ci_ws.result +++ b/mysql-test/main/ctype_ucs2_general_mysql500_ci_ws.result @@ -24,7 +24,7 @@ SUM(codepoint_hex4=HEX(WEIGHT_STRING(c))) AS count_bmp_weight_is_codepoint, SUM(codepoint_hex4<>HEX(WEIGHT_STRING(c))) AS count_bmp_weight_is_not_codepoint FROM v_bmp; count_bmp_weight_is_codepoint count_bmp_weight_is_not_codepoint -64428 1108 +64429 1107 SELECT codepoint_hex4,HEX(WEIGHT_STRING(c)) FROM v_bmp WHERE codepoint_hex4<>HEX(WEIGHT_STRING(c)); @@ -82,7 +82,6 @@ codepoint_hex4 HEX(WEIGHT_STRING(c)) 00DB 0055 00DC 0055 00DD 0059 -00DF 0053 00E0 0041 00E1 0041 00E2 0041 diff --git a/mysql-test/main/ctype_upgrade.result b/mysql-test/main/ctype_upgrade.result index 30684df402330..500b013a5a154 100644 --- a/mysql-test/main/ctype_upgrade.result +++ b/mysql-test/main/ctype_upgrade.result @@ -603,3 +603,56 @@ DROP TABLE maria050313_utf8_croatian_ci; DROP TABLE maria050533_xxx_croatian_ci; DROP TABLE maria100004_xxx_croatian_ci; DROP TABLE mysql050614_xxx_croatian_ci; +# +# Start of 10.4 tests +# +# +# MDEV-30746 Regression in ucs2_general_mysql500_ci +# +SET NAMES utf8mb3; +SHOW CREATE TABLE t1; +ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.t1` FORCE" or dump/reload to fix it! +SELECT * FROM t1; +ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.t1` FORCE" or dump/reload to fix it! +SELECT * FROM t1 IGNORE KEY(a); +ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.t1` FORCE" or dump/reload to fix it! +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair status OK +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 FORCE KEY(a) ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 IGNORE KEY(a) ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +SELECT GROUP_CONCAT(a) FROM t1 IGNORE KEY(a) GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +SELECT GROUP_CONCAT(a) FROM t1 FORCE KEY(a) GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +DROP TABLE t1; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/ctype_upgrade.test b/mysql-test/main/ctype_upgrade.test index fee962e7ceb57..7cb1ec9b69d60 100644 --- a/mysql-test/main/ctype_upgrade.test +++ b/mysql-test/main/ctype_upgrade.test @@ -203,3 +203,38 @@ DROP TABLE maria050313_utf8_croatian_ci; DROP TABLE maria050533_xxx_croatian_ci; DROP TABLE maria100004_xxx_croatian_ci; DROP TABLE mysql050614_xxx_croatian_ci; + + +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-30746 Regression in ucs2_general_mysql500_ci +--echo # + +SET NAMES utf8mb3; + +copy_file std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm $MYSQLD_DATADIR/test/t1.frm; +copy_file std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD $MYSQLD_DATADIR/test/t1.MYD; +copy_file std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI $MYSQLD_DATADIR/test/t1.MYI; + +--error ER_TABLE_NEEDS_REBUILD +SHOW CREATE TABLE t1; +--error ER_TABLE_NEEDS_REBUILD +SELECT * FROM t1; +--error ER_TABLE_NEEDS_REBUILD +SELECT * FROM t1 IGNORE KEY(a); +CHECK TABLE t1; +REPAIR TABLE t1; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 FORCE KEY(a) ORDER BY a; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 IGNORE KEY(a) ORDER BY a; +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +SELECT GROUP_CONCAT(a) FROM t1 IGNORE KEY(a) GROUP BY a ORDER BY a; +SELECT GROUP_CONCAT(a) FROM t1 FORCE KEY(a) GROUP BY a ORDER BY a; +DROP TABLE t1; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/mysql-test/main/ctype_utf8_uca.result b/mysql-test/main/ctype_utf8_uca.result index ecd2daa527215..bacdd81cbd9b1 100644 --- a/mysql-test/main/ctype_utf8_uca.result +++ b/mysql-test/main/ctype_utf8_uca.result @@ -855,6 +855,154 @@ DROP TABLE case_folding; # End of 10.3 tests # # +# Start of 10.4 tests +# +SET DEFAULT_STORAGE_ENGINE=MyISAM; +SET NAMES utf8mb3 COLLATE utf8mb3_unicode_nopad_ci; +# +# MDEV-30034 UNIQUE USING HASH accepts duplicate entries for tricky collations +# +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a TEXT COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a TEXT COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(2000) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(2000) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +SET DEFAULT_STORAGE_ENGINE=HEAP; +# +# MDEV-30034 UNIQUE USING HASH accepts duplicate entries for tricky collations +# +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(2000) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(2000) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +SET DEFAULT_STORAGE_ENGINE=DEFAULT; +# +# End of 10.4 tests +# +# # Start of 10.10 tests # # diff --git a/mysql-test/main/ctype_utf8_uca.test b/mysql-test/main/ctype_utf8_uca.test index 5abf919251180..6f89d6462603d 100644 --- a/mysql-test/main/ctype_utf8_uca.test +++ b/mysql-test/main/ctype_utf8_uca.test @@ -54,6 +54,24 @@ SET NAMES utf8mb3 COLLATE utf8mb3_thai_520_w2; --echo # End of 10.3 tests --echo # + +--echo # +--echo # Start of 10.4 tests +--echo # + +SET DEFAULT_STORAGE_ENGINE=MyISAM; +SET NAMES utf8mb3 COLLATE utf8mb3_unicode_nopad_ci; +--source include/ctype_nopad_prefix_unique.inc + +SET DEFAULT_STORAGE_ENGINE=HEAP; +--source include/ctype_nopad_prefix_unique.inc + +SET DEFAULT_STORAGE_ENGINE=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # + --echo # --echo # Start of 10.10 tests --echo # diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result index b86cd1c42cc98..e040aa7c21717 100644 --- a/mysql-test/main/derived_view.result +++ b/mysql-test/main/derived_view.result @@ -3765,3 +3765,485 @@ drop procedure sp2; drop view v, v2; drop table t1,t2; # End of 10.2 tests +# +# MDEV-30706: view defined as select with implicit grouping and +# a set function used in a subquery +# +CREATE TABLE t1 (a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,2); +CREATE TABLE t2 (a INT PRIMARY KEY, b INT); +INSERT INTO t2 VALUES (1,1), (3,3); +CREATE TABLE t3 (a INT PRIMARY KEY, b INT); +INSERT INTO t3 VALUES (2,2), (4,4), (7,7); +CREATE TABLE t4 (a INT PRIMARY KEY, b INT); +INSERT INTO t4 VALUES (2,2), (5,5), (7,7); +CREATE VIEW v AS SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b; +SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b; +m +9 +SELECT * FROM v; +m +9 +WITH cte AS ( SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b ) SELECT * FROM cte; +m +9 +EXPLAIN SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1 Using index +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using index +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 3 +2 SUBQUERY t4 ALL NULL NULL NULL NULL 3 Using where +2 SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1 Using index +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +3 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using index +EXPLAIN WITH cte AS ( SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 3 +2 DERIVED t4 ALL NULL NULL NULL NULL 3 Using where +2 DERIVED t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1 Using index +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +3 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using index +PREPARE stmt FROM "SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b"; +execute stmt; +m +9 +execute stmt; +m +9 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +m +9 +execute stmt; +m +9 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b ) SELECT * FROM cte"; +execute stmt; +m +9 +execute stmt; +m +9 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2,t3,t4; +# +# MDEV-29224: view defined as select with implicit grouping and +# a set function used in a subquery +# +CREATE TABLE t1 (f1 INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (f2 int); +INSERT INTO t2 VALUES (3); +CREATE VIEW v AS SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; +SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; +( SELECT MAX(f1) FROM t2 ) +2 +SELECT * FROM v; +( SELECT MAX(f1) FROM t2 ) +2 +WITH cte AS ( SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1 ) SELECT * FROM cte; +( SELECT MAX(f1) FROM t2 ) +2 +EXPLAIN SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 +2 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1 +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 2 +2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 +3 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1 +EXPLAIN WITH cte AS ( SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1 ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 2 +3 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1 +PREPARE stmt FROM "SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1"; +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1 ) SELECT * FROM cte"; +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2; +# +# MDEV-28573: view defined as select with implicit grouping and +# a set function used in a subquery +# +CREATE TABLE t1 (a INTEGER, b INTEGER); +CREATE TABLE t2 (c INTEGER); +INSERT INTO t1 VALUES (1,11), (2,22), (2,22); +INSERT INTO t2 VALUES (1), (2); +CREATE VIEW v1 AS SELECT (SELECT COUNT(b) FROM t2) FROM t1; +CREATE VIEW v2 AS SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1; +SELECT (SELECT COUNT(b) FROM t2) FROM t1; +ERROR 21000: Subquery returns more than 1 row +SELECT * FROM v1; +ERROR 21000: Subquery returns more than 1 row +WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2) FROM t1 ) SELECT * FROM cte; +ERROR 21000: Subquery returns more than 1 row +SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +SELECT * FROM v2; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1 ) SELECT * FROM cte; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +EXPLAIN SELECT (SELECT COUNT(b) FROM t2) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 +EXPLAIN SELECT * FROM v1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 3 +2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 +EXPLAIN WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2) FROM t1 ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 3 +2 DERIVED t1 ALL NULL NULL NULL NULL 3 +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 +PREPARE stmt FROM "SELECT (SELECT COUNT(b) FROM t2) FROM t1"; +execute stmt; +ERROR 21000: Subquery returns more than 1 row +execute stmt; +ERROR 21000: Subquery returns more than 1 row +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v1"; +execute stmt; +ERROR 21000: Subquery returns more than 1 row +execute stmt; +ERROR 21000: Subquery returns more than 1 row +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2) FROM t1 ) SELECT * FROM cte"; +execute stmt; +ERROR 21000: Subquery returns more than 1 row +execute stmt; +ERROR 21000: Subquery returns more than 1 row +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1"; +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v2"; +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1 ) SELECT * FROM cte"; +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +DEALLOCATE PREPARE stmt; +DROP VIEW v1,v2; +DROP TABLE t1,t2; +# +# MDEV-28570: VIEW with WHERE containing subquery +# with set function aggregated in query +# +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (c int, d int); +INSERT INTO t1 VALUES +(1,10), (2,10), (1,20), (2,20), (3,20), (2,30), (4,40); +INSERT INTO t2 VALUES +(2,10), (2,20), (4,10), (5,10), (3,20), (2,40); +CREATE VIEW v AS SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); +SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); +a +2 +4 +SELECT * FROM v; +a +2 +4 +WITH cte AS ( SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20) ) SELECT * FROM cte; +a +2 +4 +EXPLAIN SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using temporary; Using filesort +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 6 +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 7 +2 DERIVED t1 ALL NULL NULL NULL NULL 7 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 6 +EXPLAIN WITH cte AS ( SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20) ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 7 +2 DERIVED t1 ALL NULL NULL NULL NULL 7 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 6 +PREPARE stmt FROM "SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20)"; +execute stmt; +a +2 +4 +execute stmt; +a +2 +4 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +a +2 +4 +execute stmt; +a +2 +4 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20) ) SELECT * FROM cte"; +execute stmt; +a +2 +4 +execute stmt; +a +2 +4 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2; +# +# MDEV-28571: VIEW with select list containing subquery +# with set function aggregated in query +# +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); +CREATE VIEW v AS SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; +SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; +c +2 +3 +1,1 +SELECT * FROM v; +c +2 +3 +1,1 +WITH cte AS ( SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a ) SELECT * FROM cte; +c +2 +3 +1,1 +EXPLAIN SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 6 +2 DERIVED t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +EXPLAIN WITH cte AS ( SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 6 +2 DERIVED t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +PREPARE stmt FROM "SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a"; +execute stmt; +c +2 +3 +1,1 +execute stmt; +c +2 +3 +1,1 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +c +2 +3 +1,1 +execute stmt; +c +2 +3 +1,1 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a ) SELECT * FROM cte"; +execute stmt; +c +2 +3 +1,1 +execute stmt; +c +2 +3 +1,1 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2; +# +# MDEV-30668: VIEW with WHERE containing nested subquery +# with set function aggregated in outer subquery +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +create table t2 (b int); +insert into t2 values (2), (1), (4), (7); +create table t3 (a int, b int); +insert into t3 values (2,10), (7,30), (2,30), (1,10), (7,40); +create view v as select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))); +select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))); +a +7 +select * from v; +a +7 +with cte as ( select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))) ) select * from cte; +a +7 +explain select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY eq_ref distinct_key distinct_key 4 test.t1.a 1 +2 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using temporary +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where +explain select * from v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY eq_ref distinct_key distinct_key 4 test.t1.a 1 +3 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using temporary +4 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where +explain with cte as ( select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))) ) select * from cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY eq_ref distinct_key distinct_key 4 test.t1.a 1 +3 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using temporary +4 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where +prepare stmt from "select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b)))"; +execute stmt; +a +7 +execute stmt; +a +7 +deallocate prepare stmt; +prepare stmt from "select * from v"; +execute stmt; +a +7 +execute stmt; +a +7 +deallocate prepare stmt; +prepare stmt from "with cte as ( select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))) ) select * from cte"; +execute stmt; +a +7 +execute stmt; +a +7 +deallocate prepare stmt; +drop view v; +drop table t1,t2,t3; +# End of 10.4 tests diff --git a/mysql-test/main/derived_view.test b/mysql-test/main/derived_view.test index caccc7dafa166..5422fbcfd1d65 100644 --- a/mysql-test/main/derived_view.test +++ b/mysql-test/main/derived_view.test @@ -2455,3 +2455,300 @@ drop view v, v2; drop table t1,t2; --echo # End of 10.2 tests + +--echo # +--echo # MDEV-30706: view defined as select with implicit grouping and +--echo # a set function used in a subquery +--echo # + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,2); +CREATE TABLE t2 (a INT PRIMARY KEY, b INT); +INSERT INTO t2 VALUES (1,1), (3,3); +CREATE TABLE t3 (a INT PRIMARY KEY, b INT); +INSERT INTO t3 VALUES (2,2), (4,4), (7,7); +CREATE TABLE t4 (a INT PRIMARY KEY, b INT); +INSERT INTO t4 VALUES (2,2), (5,5), (7,7); + +let $q= +SELECT + (SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 + WHERE t3.a = t4.b; + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2,t3,t4; + +--echo # +--echo # MDEV-29224: view defined as select with implicit grouping and +--echo # a set function used in a subquery +--echo # + +CREATE TABLE t1 (f1 INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (f2 int); +INSERT INTO t2 VALUES (3); + +let $q= +SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-28573: view defined as select with implicit grouping and +--echo # a set function used in a subquery +--echo # + +CREATE TABLE t1 (a INTEGER, b INTEGER); +CREATE TABLE t2 (c INTEGER); +INSERT INTO t1 VALUES (1,11), (2,22), (2,22); +INSERT INTO t2 VALUES (1), (2); + +let $q1= +SELECT (SELECT COUNT(b) FROM t2) FROM t1; +let $q2= +SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1; + +eval CREATE VIEW v1 AS $q1; +eval CREATE VIEW v2 AS $q2; + +--error ER_SUBQUERY_NO_1_ROW +eval $q1; +--error ER_SUBQUERY_NO_1_ROW +SELECT * FROM v1; +--error ER_SUBQUERY_NO_1_ROW +eval WITH cte AS ( $q1 ) SELECT * FROM cte; +eval $q2; +SELECT * FROM v2; +eval WITH cte AS ( $q2 ) SELECT * FROM cte; + +eval EXPLAIN $q1; +EXPLAIN SELECT * FROM v1; +eval EXPLAIN WITH cte AS ( $q1 ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q1"; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v1"; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q1 ) SELECT * FROM cte"; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "$q2"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v2"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q2 ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v1,v2; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-28570: VIEW with WHERE containing subquery +--echo # with set function aggregated in query +--echo # + +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (c int, d int); + +INSERT INTO t1 VALUES + (1,10), (2,10), (1,20), (2,20), (3,20), (2,30), (4,40); +INSERT INTO t2 VALUES + (2,10), (2,20), (4,10), (5,10), (3,20), (2,40); + +let $q= +SELECT a FROM t1 GROUP BY a + HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-28571: VIEW with select list containing subquery +--echo # with set function aggregated in query +--echo # + +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); + +let $q= +SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-30668: VIEW with WHERE containing nested subquery +--echo # with set function aggregated in outer subquery +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); + +create table t2 (b int); +insert into t2 values (2), (1), (4), (7); + +create table t3 (a int, b int); +insert into t3 values (2,10), (7,30), (2,30), (1,10), (7,40); + +let $q= +select * from t1 + where t1.a in (select t3.a from t3 group by t3.a + having t3.a > any (select t2.b from t2 + where t2.b*10 < sum(t3.b))); +eval create view v as $q; + +eval $q; +eval select * from v; +eval with cte as ( $q ) select * from cte; + +eval explain $q; +eval explain select * from v; +eval explain with cte as ( $q ) select * from cte; + +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "select * from v"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "with cte as ( $q ) select * from cte"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +drop view v; +drop table t1,t2,t3; + +--echo # End of 10.4 tests diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index 888d3143f2c09..0c5eccad754a6 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -1071,6 +1071,43 @@ UNION drop table t1; End of 5.5 tests # +# MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result +# +create table t1 (c int, d int); +insert into t1 values (5, 1), (0, 3); +select distinct sum(distinct 1), sum(t1.d) > 2 from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 2 +1 1 +1 0 +select distinct sum(distinct 1), sum(t1.d) > 2, t1.c from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 2 c +1 1 0 +1 0 5 +insert into t1 values (6,6); +select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 5 +1 1 +1 0 +select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 5 c +1 1 0 +1 0 5 +1 1 6 +set @@sort_buffer_size=1024; +insert into t1 select -seq,-seq from seq_1_to_100; +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ; +sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000 +1 0 0 +1 1 0 +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2; +sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000 c +1 0 0 -1 +1 1 0 0 +1 1 0 5 +1 1 0 6 +drop table t1; +# End of 10.4 tests +# # MDEV-27382: OFFSET is ignored when it is combined with the DISTINCT, IN() and JOIN # CREATE TABLE t1 ( diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test index 32e189da98aaf..9039edff20fc5 100644 --- a/mysql-test/main/distinct.test +++ b/mysql-test/main/distinct.test @@ -4,6 +4,7 @@ # --source include/default_optimizer_switch.inc +--source include/have_sequence.inc --disable_warnings drop table if exists t1,t2,t3; --enable_warnings @@ -819,6 +820,28 @@ drop table t1; --echo End of 5.5 tests +--echo # +--echo # MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result +--echo # + +create table t1 (c int, d int); +insert into t1 values (5, 1), (0, 3); +select distinct sum(distinct 1), sum(t1.d) > 2 from (t1 e join t1) group by t1.c; +select distinct sum(distinct 1), sum(t1.d) > 2, t1.c from (t1 e join t1) group by t1.c; + +insert into t1 values (6,6); +select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c; +select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c; + +# Force usage of remove_dup_with_compare() algorithm +set @@sort_buffer_size=1024; +insert into t1 select -seq,-seq from seq_1_to_100; +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ; +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2; +drop table t1; + +--echo # End of 10.4 tests + --echo # --echo # MDEV-27382: OFFSET is ignored when it is combined with the DISTINCT, IN() and JOIN --echo # diff --git a/mysql-test/main/except.result b/mysql-test/main/except.result index 3dc4e1c94706b..698f33d87095f 100644 --- a/mysql-test/main/except.result +++ b/mysql-test/main/except.result @@ -378,7 +378,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 2, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 2 } } ] @@ -420,7 +424,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 2, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 2 } } ] @@ -493,7 +501,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 2, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 2 } } ] @@ -535,7 +547,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 2, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 2 } } ] diff --git a/mysql-test/main/except_all.result b/mysql-test/main/except_all.result index ac032221cc583..a014425431257 100644 --- a/mysql-test/main/except_all.result +++ b/mysql-test/main/except_all.result @@ -505,7 +505,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 3, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 3 } } ] @@ -547,7 +551,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 2, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 3 } } ] @@ -619,7 +627,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 3, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 3 } } ] @@ -661,7 +673,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "119", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 2, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 3 } } ] diff --git a/mysql-test/main/explain_json.result b/mysql-test/main/explain_json.result index cf1aa172146bc..5821a6abd86f5 100644 --- a/mysql-test/main/explain_json.result +++ b/mysql-test/main/explain_json.result @@ -1812,7 +1812,11 @@ ANALYZE "buffer_size": "400", "join_type": "BKA", "mrr_type": "Rowid-ordered scan", - "r_filtered": 100 + "r_loops": 10, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 1 } } ] diff --git a/mysql-test/main/explain_non_select.result b/mysql-test/main/explain_non_select.result index d1e7af6afdedc..03259fff62bde 100644 --- a/mysql-test/main/explain_non_select.result +++ b/mysql-test/main/explain_non_select.result @@ -157,9 +157,13 @@ id select_type table partitions type possible_keys key key_len ref rows Extra explain extended update t2 set b=3 where a in (3,4); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`b` = 3 where `test`.`t2`.`a` in (3,4) explain extended delete from t2 where a in (3,4); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`a` in (3,4) drop table t1,t2; # # Check the special case where partition pruning removed all partitions diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result index 8f9c27eeb868e..27845af71646d 100644 --- a/mysql-test/main/func_group.result +++ b/mysql-test/main/func_group.result @@ -1443,16 +1443,11 @@ FROM derived1 AS X WHERE X.int_nokey < 61 GROUP BY pk -LIMIT 1) +LIMIT 1) AS m FROM D AS X WHERE X.int_key < 13 GROUP BY int_nokey LIMIT 1; -(SELECT COUNT( int_nokey ) -FROM derived1 AS X -WHERE -X.int_nokey < 61 -GROUP BY pk -LIMIT 1) +m 1 DROP TABLE derived1; DROP TABLE D; diff --git a/mysql-test/main/func_group.test b/mysql-test/main/func_group.test index 842ca4388ef23..05946908fcfc5 100644 --- a/mysql-test/main/func_group.test +++ b/mysql-test/main/func_group.test @@ -583,8 +583,6 @@ DROP TABLE t1; # # Bug #16792 query with subselect, join, and group not returning proper values # -#enable after fix MDEV-28573 ---disable_view_protocol CREATE TABLE t1 (a INT, b INT); INSERT INTO t1 VALUES (1,1),(1,2),(2,3); @@ -595,7 +593,6 @@ SELECT AVG(2), BIT_AND(2), BIT_OR(2), BIT_XOR(2), COUNT(*), COUNT(12), COUNT(DISTINCT 12), MIN(2),MAX(2),STD(2), VARIANCE(2),SUM(2), GROUP_CONCAT(2),GROUP_CONCAT(DISTINCT 2); DROP TABLE t1; ---enable_view_protocol # End of 4.1 tests @@ -627,13 +624,10 @@ drop table t1, t2, t3; # # BUG#3190, WL#1639: Standard Deviation STDDEV - 2 different calculations # -#enable after fix MDEV-28573 ---disable_view_protocol CREATE TABLE t1 (id int(11),value1 float(10,2)); INSERT INTO t1 VALUES (1,0.00),(1,1.00), (1,2.00), (2,10.00), (2,11.00), (2,12.00), (2,13.00); select id, stddev_pop(value1), var_pop(value1), stddev_samp(value1), var_samp(value1) from t1 group by id; DROP TABLE t1; ---enable_view_protocol # # BUG#8464 decimal AVG returns incorrect result @@ -970,22 +964,19 @@ INSERT INTO D VALUES (83,45,4,repeat(' X', 42)), (105,53,12,NULL); -#enable after fix MDEV-27871 ---disable_view_protocol SELECT (SELECT COUNT( int_nokey ) FROM derived1 AS X WHERE X.int_nokey < 61 GROUP BY pk - LIMIT 1) + LIMIT 1) AS m FROM D AS X WHERE X.int_key < 13 GROUP BY int_nokey LIMIT 1; DROP TABLE derived1; DROP TABLE D; ---enable_view_protocol # # Bug #39656: Behaviour different for agg functions with & without where - diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 213cc798a6df9..eb4b4e414d0d0 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1449,6 +1449,13 @@ JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) [{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}] drop table t200; # +# MDEV-24538: JSON_LENGTH does not return error upon wrong number of parameters +# +SELECT JSON_LENGTH('{"a":"b"}','$','$', 'foo'); +ERROR 42000: Incorrect parameter count in the call to native function 'json_length' +SELECT JSON_LENGTH(); +ERROR 42000: Incorrect parameter count in the call to native function 'JSON_LENGTH' +# # End of 10.4 tests # # diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 93d8c2a7d6af6..7bf7244ac3804 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -665,6 +665,7 @@ SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest'); SELECT NULL; SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a'); + --echo # --echo # End of 10.3 tests --echo # @@ -917,6 +918,14 @@ select JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t20 select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; drop table t200; +--echo # +--echo # MDEV-24538: JSON_LENGTH does not return error upon wrong number of parameters +--echo # +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT JSON_LENGTH('{"a":"b"}','$','$', 'foo'); +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT JSON_LENGTH(); + --echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result index b912cfa7cd30e..d43287de53b3c 100644 --- a/mysql-test/main/func_str.result +++ b/mysql-test/main/func_str.result @@ -5241,6 +5241,15 @@ DROP TABLE t1; # Start of 10.4 tests # # +# MDEV-30351 crash in Item_func_left::val_str +# +SELECT WEIGHT_STRING('aa') IN (LEFT(WEIGHT_STRING('aaa'),4),'bbb') as expect_1; +expect_1 +1 +SELECT UNHEX('0032') in (LEFT(UNHEX('003200'), 2),'dog') as expect_1; +expect_1 +1 +# # MDEV-21841 CONV() function truncates the result type to 21 symbol. # CREATE TABLE t1(i BIGINT); diff --git a/mysql-test/main/func_str.test b/mysql-test/main/func_str.test index 1c8839f24a2b9..377470ca60e60 100644 --- a/mysql-test/main/func_str.test +++ b/mysql-test/main/func_str.test @@ -2280,16 +2280,22 @@ CREATE TABLE crash_test_2 ( --echo # Cleanup DROP TABLE t1; - --echo # --echo # End of 10.3 tests --echo # - --echo # --echo # Start of 10.4 tests --echo # +--echo # +--echo # MDEV-30351 crash in Item_func_left::val_str +--echo # + +SELECT WEIGHT_STRING('aa') IN (LEFT(WEIGHT_STRING('aaa'),4),'bbb') as expect_1; + +SELECT UNHEX('0032') in (LEFT(UNHEX('003200'), 2),'dog') as expect_1; + --echo # --echo # MDEV-21841 CONV() function truncates the result type to 21 symbol. --echo # diff --git a/mysql-test/main/grant5.result b/mysql-test/main/grant5.result index 6f50f530a3ed8..76a5f5375e905 100644 --- a/mysql-test/main/grant5.result +++ b/mysql-test/main/grant5.result @@ -453,4 +453,13 @@ insert mysql.host values (1); flush privileges; ERROR HY000: Fatal error: mysql.host table is damaged or in unsupported 3.20 format drop table mysql.host; +# +# MDEV-30826 Invalid data on mysql.host segfaults the server after an upgrade to 10.4 +# +create table mysql.host (host char(60) binary default '' not null, db char(64) binary default '' not null, select_priv enum('n','y') collate utf8_general_ci default 'n' not null, insert_priv enum('n','y') collate utf8_general_ci default 'n' not null, update_priv enum('n','y') collate utf8_general_ci default 'n' not null, delete_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_priv enum('n','y') collate utf8_general_ci default 'n' not null, drop_priv enum('n','y') collate utf8_general_ci default 'n' not null, grant_priv enum('n','y') collate utf8_general_ci default 'n' not null, references_priv enum('n','y') collate utf8_general_ci default 'n' not null, index_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_tmp_table_priv enum('n','y') collate utf8_general_ci default 'n' not null, lock_tables_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, show_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, execute_priv enum('n','y') collate utf8_general_ci default 'n' not null, trigger_priv enum('n','y') collate utf8_general_ci default 'n' not null, primary key /*host*/ (host,db)) engine=myisam character set utf8 collate utf8_bin comment='host privileges; merged with database privileges'; +insert mysql.host values('10.5.0.0/255.255.0.0','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','N'); +flush privileges; +drop table mysql.host; +# # End of 10.4 tests +# diff --git a/mysql-test/main/grant5.test b/mysql-test/main/grant5.test index c4a302fca865f..49e0ab1abf16c 100644 --- a/mysql-test/main/grant5.test +++ b/mysql-test/main/grant5.test @@ -408,4 +408,16 @@ insert mysql.host values (1); flush privileges; drop table mysql.host; +--echo # +--echo # MDEV-30826 Invalid data on mysql.host segfaults the server after an upgrade to 10.4 +--echo # + +# from mysql_system_tables.sql in 10.3: +create table mysql.host (host char(60) binary default '' not null, db char(64) binary default '' not null, select_priv enum('n','y') collate utf8_general_ci default 'n' not null, insert_priv enum('n','y') collate utf8_general_ci default 'n' not null, update_priv enum('n','y') collate utf8_general_ci default 'n' not null, delete_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_priv enum('n','y') collate utf8_general_ci default 'n' not null, drop_priv enum('n','y') collate utf8_general_ci default 'n' not null, grant_priv enum('n','y') collate utf8_general_ci default 'n' not null, references_priv enum('n','y') collate utf8_general_ci default 'n' not null, index_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_tmp_table_priv enum('n','y') collate utf8_general_ci default 'n' not null, lock_tables_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, show_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, execute_priv enum('n','y') collate utf8_general_ci default 'n' not null, trigger_priv enum('n','y') collate utf8_general_ci default 'n' not null, primary key /*host*/ (host,db)) engine=myisam character set utf8 collate utf8_bin comment='host privileges; merged with database privileges'; +insert mysql.host values('10.5.0.0/255.255.0.0','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','N'); +flush privileges; +drop table mysql.host; + +--echo # --echo # End of 10.4 tests +--echo # diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result index ba403fa8b73c4..b7bbab6dd47af 100644 --- a/mysql-test/main/group_by.result +++ b/mysql-test/main/group_by.result @@ -1036,8 +1036,9 @@ FROM t1 AS t1_outer GROUP BY t1_outer.b; 21 21 SELECT (SELECT SUM(t1_inner.a) FROM t1 AS t1_inner GROUP BY t1_inner.b LIMIT 1) +AS m FROM t1 AS t1_outer; -(SELECT SUM(t1_inner.a) FROM t1 AS t1_inner GROUP BY t1_inner.b LIMIT 1) +m 3 3 3 @@ -1277,12 +1278,9 @@ a select avg ( (select (select sum(outr.a + innr.a) from t1 as innr limit 1) as tt -from t1 as outr order by outr.a limit 1)) +from t1 as outr order by outr.a limit 1)) as m from t1 as most_outer; -avg ( -(select -(select sum(outr.a + innr.a) from t1 as innr limit 1) as tt -from t1 as outr order by outr.a limit 1)) +m 29.0000 select avg ( (select ( diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test index 357959d40714c..7f81c787c36c9 100644 --- a/mysql-test/main/group_by.test +++ b/mysql-test/main/group_by.test @@ -783,11 +783,9 @@ SELECT 1 FROM t1 as t1_outer GROUP BY a SELECT (SELECT SUM(t1_inner.a) FROM t1 AS t1_inner LIMIT 1) FROM t1 AS t1_outer GROUP BY t1_outer.b; -#enable after fix MDEV-27871 ---disable_view_protocol SELECT (SELECT SUM(t1_inner.a) FROM t1 AS t1_inner GROUP BY t1_inner.b LIMIT 1) +AS m FROM t1 AS t1_outer; ---enable_view_protocol --error ER_WRONG_FIELD_WITH_GROUP SELECT (SELECT SUM(t1_outer.a) FROM t1 AS t1_inner LIMIT 1) @@ -863,6 +861,7 @@ DROP TABLE t1; --echo # --echo # Bug#27219: Aggregate functions in ORDER BY. --echo # + SET @save_sql_mode=@@sql_mode; SET @@sql_mode='ONLY_FULL_GROUP_BY'; @@ -884,6 +883,8 @@ SELECT 1 FROM t1 ORDER BY SUM(a) + 1; --error 1140 SELECT 1 FROM t1 ORDER BY SUM(a), b; +--disable_service_connection + --error 1140 SELECT a FROM t1 ORDER BY COUNT(b); @@ -896,9 +897,6 @@ SELECT t1.a FROM t1 ORDER BY (SELECT SUM(t2.a) FROM t2 ORDER BY t2.a); --error 1140 SELECT t1.a FROM t1 ORDER BY (SELECT t2.a FROM t2 ORDER BY SUM(t2.b) LIMIT 1); -#enable after fix MDEV-28570 ---disable_view_protocol - --error 1140 SELECT t1.a FROM t1 WHERE t1.a = (SELECT t2.a FROM t2 ORDER BY SUM(t2.b) LIMIT 1); @@ -936,7 +934,7 @@ SELECT 1 FROM t1 GROUP BY t1.a SELECT 1 FROM t1 GROUP BY t1.a HAVING (SELECT AVG(t1.b + t2.b) FROM t2 ORDER BY t2.a LIMIT 1); ---enable_view_protocol +--enable_service_connection # Both SUMs are aggregated in the subquery, no mixture: SELECT t1.a FROM t1 @@ -961,18 +959,17 @@ SELECT t1.a, SUM(t1.b) FROM t1 ORDER BY SUM(t2.b + t1.a) LIMIT 1) GROUP BY t1.a; -#enable after fix MDEV-28570, MDEV-28571 ---disable_view_protocol - SELECT t1.a FROM t1 GROUP BY t1.a HAVING (1, 1) = (SELECT SUM(t1.a), t1.a FROM t2 LIMIT 1); select avg ( (select (select sum(outr.a + innr.a) from t1 as innr limit 1) as tt - from t1 as outr order by outr.a limit 1)) + from t1 as outr order by outr.a limit 1)) as m from t1 as most_outer; +--disable_service_connection + --error 1140 select avg ( (select ( @@ -980,7 +977,7 @@ select avg ( from t1 as outr order by count(outr.a) limit 1)) as tt from t1 as most_outer; ---enable_view_protocol +--enable_service_connection select (select sum(outr.a + t1.a) from t1 limit 1) as tt from t1 as outr order by outr.a; @@ -1378,7 +1375,7 @@ DROP TABLE t1; --echo # Bug#11765254 (58200): Assertion failed: param.sort_length when grouping --echo # by functions --echo # -#createing view adds one new warning +#creating view adds one new warning --disable_view_protocol set tmp_memory_table_size=0; diff --git a/mysql-test/main/insert_returning.result b/mysql-test/main/insert_returning.result index 1976c1ca02e89..b2ed9c90e51ee 100644 --- a/mysql-test/main/insert_returning.result +++ b/mysql-test/main/insert_returning.result @@ -63,6 +63,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1(id1,val1) VALUES(9,'j') RETURNING id1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(id1,val1) values (9,'j') EXPLAIN FORMAT="json" INSERT INTO t1(id1,val1) VALUES (10,'k') RETURNING id1; EXPLAIN { @@ -151,6 +153,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1 VALUES (17,'s'),(18,'t') RETURNING *; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1` values (17,'s'),(18,'t') EXPLAIN FORMAT="json" INSERT INTO t1 VALUES(19,'u'),(20,'v') RETURNING id1; EXPLAIN { @@ -236,6 +240,8 @@ EXPLAIN EXTENDED INSERT INTO ins_duplicate(id,val) VALUES (2,'b') ON DUPLICATE KEY UPDATE val='i' RETURNING val; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT ins_duplicate ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`ins_duplicate`(id,val) values (2,'b') on duplicate key update `test`.`ins_duplicate`.`val` = 'i' EXPLAIN FORMAT="json" INSERT INTO ins_duplicate(id,val) VALUES (2,'b') ON DUPLICATE KEY UPDATE val='j' RETURNING val; EXPLAIN @@ -320,6 +326,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1 SET id1=10, val1='j' RETURNING val1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(id1,val1) values (10,'j') EXPLAIN FORMAT="json" INSERT INTO t1 SET id1=11, val1='k' RETURNING val1; EXPLAIN { @@ -404,6 +412,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1 SELECT * FROM t1 WHERE id1=9 RETURNING val1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 insert into `test`.`t1` select sql_buffer_result NULL AS `id1`,NULL AS `val1` from `test`.`t1` where 0 EXPLAIN FORMAT="json" INSERT INTO t1 SELECT * FROM t1 WHERE id1=10 RETURNING val1; EXPLAIN { diff --git a/mysql-test/main/intersect.result b/mysql-test/main/intersect.result index c7858b49cd445..4e14ec33c608c 100644 --- a/mysql-test/main/intersect.result +++ b/mysql-test/main/intersect.result @@ -453,7 +453,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "256Kb", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 3, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 3 } } ] @@ -548,7 +552,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "256Kb", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 3, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 3 } } ] diff --git a/mysql-test/main/intersect_all.result b/mysql-test/main/intersect_all.result index 6c7a9c1cdd26a..11ee34b7390fe 100644 --- a/mysql-test/main/intersect_all.result +++ b/mysql-test/main/intersect_all.result @@ -484,7 +484,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "65", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 5, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 7 } } ] @@ -579,7 +583,11 @@ ANALYZE "buffer_type": "flat", "buffer_size": "65", "join_type": "BNL", - "r_filtered": 100 + "r_loops": 5, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 7 } } ] diff --git a/mysql-test/main/kill-2.result b/mysql-test/main/kill-2.result index daaba2c092a9b..d735d412eeb98 100644 --- a/mysql-test/main/kill-2.result +++ b/mysql-test/main/kill-2.result @@ -10,3 +10,34 @@ foo root kill user foo@'127.0.0.1'; drop user foo@'127.0.0.1'; +# +# KILL USER and missing privileges +# +create user a@'127.0.0.1'; +create user b@'127.0.0.1'; +grant process on *.* to a@'127.0.0.1'; +grant select on *.* to b@'127.0.0.1'; +connect a,127.0.0.1,a; +show grants; +Grants for a@127.0.0.1 +GRANT PROCESS ON *.* TO `a`@`127.0.0.1` +connect b,127.0.0.1,b; +show processlist; +Id User Host db Command Time State Info Progress +# b # test # # starting show processlist # +kill user a; +kill user x; +connection a; +show processlist; +Id User Host db Command Time State Info Progress +# root # test # # # # # +# a # test # # # # # +# b # test # # # # # +kill user b; +ERROR HY000: Operation KILL USER failed for b@% +connection default; +drop user a@'127.0.0.1'; +drop user b@'127.0.0.1'; +# +# End of 10.3 tests +# diff --git a/mysql-test/main/kill-2.test b/mysql-test/main/kill-2.test index 6d40e14a700ba..9bc4fe03346c0 100644 --- a/mysql-test/main/kill-2.test +++ b/mysql-test/main/kill-2.test @@ -27,3 +27,30 @@ let $wait_condition= --source include/wait_condition.inc drop user foo@'127.0.0.1'; --enable_service_connection + +--echo # +--echo # KILL USER and missing privileges +--echo # +create user a@'127.0.0.1'; +create user b@'127.0.0.1'; +grant process on *.* to a@'127.0.0.1'; +grant select on *.* to b@'127.0.0.1'; +--connect a,127.0.0.1,a +show grants; +--connect b,127.0.0.1,b +--replace_column 1 # 3 # 5 # 6 # 9 # +show processlist; +kill user a; # existing connection, but not visible to current_user +kill user x; # not existing connection +--connection a +--replace_column 1 # 3 # 5 # 6 # 7 # 8 # 9 # +show processlist; +--error ER_KILL_DENIED_ERROR +kill user b; +--connection default +drop user a@'127.0.0.1'; +drop user b@'127.0.0.1'; + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/main/locale.result b/mysql-test/main/locale.result index d29482ceffa9c..3e632aaead5a7 100644 --- a/mysql-test/main/locale.result +++ b/mysql-test/main/locale.result @@ -306,3 +306,13 @@ date_format('2001-01-06', '%w %a %W', 'de_CH') select date_format('2001-09-01', '%c %b %M', 'de_CH'); date_format('2001-09-01', '%c %b %M', 'de_CH') 9 Sep September +# +# MDEV-30630 locale: Chinese error message for ZH_CN +# +SET lc_messages=ZH_CN; +SELECT x; +ERROR 42S22: 未知列'x'在'field list' +SET lc_messages=DEFAULT; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/locale.test b/mysql-test/main/locale.test index a9a507bc38798..7d9a07178eed3 100644 --- a/mysql-test/main/locale.test +++ b/mysql-test/main/locale.test @@ -181,3 +181,17 @@ select date_format('2001-10-01', '%c %b %M', 'rm_CH'); select date_format('2001-12-01', '%c %b %M', 'rm_CH'); select date_format('2001-01-06', '%w %a %W', 'de_CH'); select date_format('2001-09-01', '%c %b %M', 'de_CH'); + +--echo # +--echo # MDEV-30630 locale: Chinese error message for ZH_CN +--echo # + +SET lc_messages=ZH_CN; +--error ER_BAD_FIELD_ERROR +SELECT x; + +SET lc_messages=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result index 2112c6d5480f3..5d62e153ac0c7 100644 --- a/mysql-test/main/multi_update.result +++ b/mysql-test/main/multi_update.result @@ -1271,7 +1271,7 @@ EXPLAIN DROP TABLES t1, t2; # End of 10.3 tests # -# MDEV-28538: multi-table UPDATE/DELETE with possible exists-to-in +# MDEV-30538: multi-table UPDATE/DELETE with possible exists-to-in # create table t1 (c1 int, c2 int, c3 int, index idx(c2)); insert into t1 values diff --git a/mysql-test/main/multi_update.test b/mysql-test/main/multi_update.test index 48e6250393bcc..329394e8468d9 100644 --- a/mysql-test/main/multi_update.test +++ b/mysql-test/main/multi_update.test @@ -1134,7 +1134,7 @@ DROP TABLES t1, t2; --echo # End of 10.3 tests --echo # ---echo # MDEV-28538: multi-table UPDATE/DELETE with possible exists-to-in +--echo # MDEV-30538: multi-table UPDATE/DELETE with possible exists-to-in --echo # create table t1 (c1 int, c2 int, c3 int, index idx(c2)); diff --git a/mysql-test/main/myisam_explain_non_select_all.result b/mysql-test/main/myisam_explain_non_select_all.result index 2ff966fdfd359..210679ba29ee8 100644 --- a/mysql-test/main/myisam_explain_non_select_all.result +++ b/mysql-test/main/myisam_explain_non_select_all.result @@ -17,6 +17,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a < 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = 10 where `test`.`t1`.`a` < 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -60,6 +62,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a < 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` < 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -103,6 +107,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 USING t1 WHERE a = 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t1` where `test`.`t1`.`a` = 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -150,6 +156,8 @@ EXPLAIN EXTENDED UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where 1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 update `test`.`t1` join `test`.`t2` set `test`.`t1`.`a` = 10 where `test`.`t1`.`a` = 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -200,6 +208,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 Using where 1 PRIMARY ALL NULL NULL NULL NULL 3 100.00 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = 10 where `test`.`t11`.`a` = 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -248,6 +258,8 @@ EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 2 SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where (1,(/* select#2 */ select 1 from `test`.`t2` where `test`.`t2`.`b` < 3 and 1)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -300,6 +312,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where (`test`.`t1`.`a`,(/* select#2 */ select `test`.`t2`.`b` from `test`.`t2` where `test`.`t1`.`a` < 3 and (`test`.`t1`.`a`) = `test`.`t2`.`b`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -353,6 +366,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 100.00 1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 update `test`.`t1` semi join (`test`.`t2`) join `test`.`t2` set `test`.`t1`.`a` = 10 where `test`.`t2`.`b` < 3 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -405,6 +420,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 1 PRIMARY ALL NULL NULL NULL NULL 3 100.00 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = `test`.`t11`.`a` + 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -457,6 +474,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY system NULL NULL NULL NULL 1 100.00 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` set `test`.`t11`.`a` = `test`.`t11`.`a` + 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -511,6 +530,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 Using where 1 PRIMARY ALL NULL NULL NULL NULL 3 100.00 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = 10 where `test`.`t11`.`a` > 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -555,6 +576,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a > 1 LIMIT 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 1 limit 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -598,6 +621,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE 0; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1003 delete from `test`.`t1` where 0 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -638,6 +663,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 USING t1 WHERE 0; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t1` where 0 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -678,6 +705,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a = 3; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` = 3 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 5 @@ -719,6 +748,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a < 3; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` < 3 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 5 @@ -758,6 +789,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -797,6 +830,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -840,6 +875,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where @a:=`test`.`t1`.`a` order by `test`.`t1`.`a` limit 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -884,6 +921,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using filesort +Warnings: +Note 1003 delete from `test`.`t1` order by `test`.`t1`.`a`,`test`.`t1`.`b` limit 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -941,6 +980,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 100.00 1 SIMPLE t3 eq_ref PRIMARY PRIMARY 8 test.t2.b2,test.t1.b1 1 100.00 +Warnings: +Note 1003 delete from `test`.`t1`,`test`.`t2`,`test`.`t3` using `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a2` = `test`.`t1`.`a1` and `test`.`t3`.`a3` = `test`.`t2`.`b2` and `test`.`t3`.`b3` = `test`.`t1`.`b1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 13 @@ -993,6 +1034,8 @@ EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where (`test`.`t1`.`a`,(/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` where (`test`.`t1`.`a`) = `test`.`t2`.`a`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1043,6 +1086,8 @@ EXPLAIN EXTENDED DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where +Warnings: +Note 1003 /* select#1 */ delete from `test`.`t1` where (`test`.`t1`.`a1`,(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and (`test`.`t1`.`a1`) = `test`.`t2`.`a2`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1088,6 +1133,8 @@ EXPLAIN EXTENDED DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where +Warnings: +Note 1003 /* select#1 */ delete from `test`.`t1` where (`test`.`t1`.`a1`,(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and (`test`.`t1`.`a1`) = `test`.`t2`.`a2`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1132,6 +1179,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET i = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00 +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`i` = 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -1175,6 +1224,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL 5 NULL Deleting all rows +Warnings: +Note 1003 delete from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -1221,6 +1272,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1267,6 +1320,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO t2 SELECT * FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 insert into `test`.`t2` select `test`.`t1`.`i` AS `i` from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1311,6 +1366,8 @@ FLUSH TABLES; EXPLAIN EXTENDED REPLACE INTO t2 SELECT * FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 replace into `test`.`t2` select `test`.`t1`.`i` AS `i` from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1351,6 +1408,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO t1 SET i = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(i) values (10) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -1374,6 +1433,8 @@ FLUSH TABLES; EXPLAIN EXTENDED REPLACE INTO t1 SET i = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 replace into `test`.`t1`(i) values (10) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -1402,6 +1463,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1447,6 +1510,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1500,6 +1565,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1554,6 +1621,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1603,6 +1672,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1657,6 +1728,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1712,6 +1785,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -1765,6 +1840,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1812,6 +1889,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 ORDER BY a, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort +Warnings: +Note 1003 delete from `test`.`t2` order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -1866,6 +1945,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 6 NULL 5 100.00 +Warnings: +Note 1003 delete from `test`.`t2` order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -1915,6 +1996,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1963,6 +2046,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -2017,6 +2102,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2072,6 +2159,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2122,6 +2211,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2176,6 +2267,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2231,6 +2324,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`i` = 123 where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2284,6 +2379,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -2332,6 +2429,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`c` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2387,6 +2486,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 6 NULL 5 100.00 Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`c` = 10 order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2439,6 +2540,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`c2` = 0 where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2485,6 +2588,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2534,6 +2639,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a=a+10 WHERE a > 34; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 10 where `test`.`t1`.`a` > 34 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -2581,6 +2688,8 @@ EXPLAIN EXTENDED UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 system NULL NULL NULL NULL 0 0.00 Const row not found 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +Warnings: +Note 1003 update `test`.`t1` set NULL = 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -2624,6 +2733,8 @@ EXPLAIN EXTENDED UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 W id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 system NULL NULL NULL NULL 0 0.00 Const row not found 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 update `test`.`t1` set NULL = 10 where `test`.`t1`.`c3` = 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -2676,6 +2787,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 2 DEPENDENT SUBQUERY t2 ALL IDX NULL NULL NULL 2 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1 +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`f2` = (/* select#2 */ select max(`test`.`t2`.`f4`) from `test`.`t2` where `test`.`t2`.`f3` = `test`.`t1`.`f1`) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -2747,6 +2859,8 @@ EXPLAIN EXTENDED UPDATE v1 SET a = 1 WHERE a > 0; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t11 ALL NULL NULL NULL NULL 2 100.00 Using where 1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00 +Warnings: +Note 1003 update `test`.`t1` `t11` join `test`.`t1` `t12` set `test`.`t11`.`a` = 1 where `test`.`t11`.`a` > 0 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -2792,6 +2906,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 1 SIMPLE t11 ALL NULL NULL NULL NULL 2 100.00 Using where 1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00 +Warnings: +Note 1003 update `test`.`t1` join `test`.`t1` `t11` join `test`.`t1` `t12` set `test`.`t11`.`a` = 1 where `test`.`t11`.`a` = `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -2841,6 +2957,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM v1 WHERE a < 4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ delete from `test`.`t1` where `test`.`t1`.`a` < 4 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -2892,6 +3010,8 @@ EXPLAIN EXTENDED DELETE v1 FROM t2, v1 WHERE t2.x = v1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00 +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2943,6 +3063,8 @@ EXPLAIN EXTENDED DELETE v1 FROM t2, v1 WHERE t2.x = v1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00 +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2987,6 +3109,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO v1 VALUES (10); id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(x) values (10) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -3027,6 +3151,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO v1 SELECT * FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 system NULL NULL NULL NULL 0 0.00 Const row not found +Warnings: +Note 1003 insert into `test`.`t2`(x) /* select#1 */ select NULL AS `a` from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3084,6 +3210,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where 2 DEPENDENT SUBQUERY index_subquery key0 key0 5 func 2 100.00 3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where (`test`.`t1`.`a`,(((`test`.`t1`.`a`) in (temporary) on key0))) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3137,6 +3265,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1) 1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` set `test`.`t1`.`a` = 10 where `x`.`b` = `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3192,6 +3322,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY ALL NULL NULL NULL NULL 3 100.00 4 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` semi join ((/* select#4 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `y` set `test`.`t1`.`a` = 10 where `x`.`b` = `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3241,6 +3373,8 @@ JOIN t1 AS a12 ON a12.c1 = a11.c1 id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0) DROP TABLE t1, t2, t3; #73 CREATE TABLE t1 (id INT); @@ -3269,6 +3403,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a=a+1 WHERE a>10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 1 where `test`.`t1`.`a` > 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -3306,6 +3442,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 1 where `test`.`t1`.`a` > 10 order by `test`.`t1`.`a` + 20 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -3331,6 +3469,61 @@ Handler_read_key 4 Sort_range 1 DROP TABLE t1; +#75 +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +# +# query: INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +# select: +# +EXPLAIN INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows Extra +1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL +FLUSH STATUS; +FLUSH TABLES; +EXPLAIN EXTENDED INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1` values (3,10),(7,11),(3,11) on duplicate key update `test`.`t1`.`id` = last_insert_id(`test`.`t1`.`id`) +# Status of EXPLAIN EXTENDED query +Variable_name Value +Handler_read_key 4 +# Status of testing query execution: +Variable_name Value +Handler_read_key 4 +Handler_read_rnd 1 +Handler_write 3 + +DROP TABLE t1; +#76 +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +CREATE TABLE t2 (a INT, b INT); +INSERT INTO t2 VALUES (1,10), (3,10), (7,11), (3,11); +# +# query: INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +# select: +# +EXPLAIN INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 4 +FLUSH STATUS; +FLUSH TABLES; +EXPLAIN EXTENDED INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 +Warnings: +Note 1003 insert into `test`.`t1` select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` on duplicate key update `test`.`t1`.`id` = last_insert_id(`test`.`t1`.`id`) +# Status of EXPLAIN EXTENDED query +Variable_name Value +Handler_read_key 7 +# Status of testing query execution: +Variable_name Value +Handler_read_key 7 +Handler_read_rnd 1 +Handler_read_rnd_next 5 +Handler_write 4 + +DROP TABLE t1,t2; # # Bug #12949629: CLIENT LOSES CONNECTION AFTER EXECUTING A PROCEDURE WITH # EXPLAIN UPDATE/DEL/INS diff --git a/mysql-test/main/mysqlbinlog.test b/mysql-test/main/mysqlbinlog.test index b12709583e484..22a85393a356d 100644 --- a/mysql-test/main/mysqlbinlog.test +++ b/mysql-test/main/mysqlbinlog.test @@ -534,7 +534,7 @@ remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog.warn; --echo # MYSQL_BINLOG std_data/master-bin.000001 --base64-output 2>&1 # The error produces the absolute path of the mysqlbinlog executable, remove it. --replace_regex /.*mariadb-binlog.*:/mariadb-binlog:/i ---error 1 +--error 5 --exec $MYSQL_BINLOG std_data/master-bin.000001 --base64-output 2>&1 --echo # diff --git a/mysql-test/main/name_resolution_cache_debug.result b/mysql-test/main/name_resolution_cache_debug.result new file mode 100644 index 0000000000000..7030176c5fe8e --- /dev/null +++ b/mysql-test/main/name_resolution_cache_debug.result @@ -0,0 +1,25 @@ +connect con1,localhost,root; +create table t1 (a int, b int); +create table t2 (c int, d int); +create view v1 as select c+1 as e, d+1 as f from t2; +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select a from t1"; +execute stmt1; +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; +connection con1; +a +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select e from v1"; +execute stmt1; +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; +connection con1; +e +connection default; +disconnect con1; +SET DEBUG_SYNC = 'RESET'; +drop view v1; +drop table t1,t2; diff --git a/mysql-test/main/name_resolution_cache_debug.test b/mysql-test/main/name_resolution_cache_debug.test new file mode 100644 index 0000000000000..362d883cbd1d7 --- /dev/null +++ b/mysql-test/main/name_resolution_cache_debug.test @@ -0,0 +1,36 @@ + +source include/have_debug_sync.inc; + +connect con1,localhost,root; +create table t1 (a int, b int); +create table t2 (c int, d int); +create view v1 as select c+1 as e, d+1 as f from t2; + +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select a from t1"; +--send execute stmt1 + +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; + +connection con1; +--reap + +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select e from v1"; +--send execute stmt1 + +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; + +connection con1; +--reap + +connection default; +disconnect con1; + +SET DEBUG_SYNC = 'RESET'; +drop view v1; +drop table t1,t2; diff --git a/mysql-test/main/old-mode.result b/mysql-test/main/old-mode.result index 44e8f5bfeceb9..daa2a4dc915ab 100644 --- a/mysql-test/main/old-mode.result +++ b/mysql-test/main/old-mode.result @@ -221,3 +221,39 @@ a UNIX_TIMESTAMP(t1.a) a UNIX_TIMESTAMP(t2.a) DROP TABLE t1; SET time_zone=DEFAULT; SET global mysql56_temporal_format=true; +# +# MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +# +SET old_mode=zero_date_time_cast; +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +CURRENT_TIMESTAMP +1970-01-15 06:56:07 +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +UNIX_TIMESTAMP(CURRENT_TIME()) +NULL +Warnings: +Warning 1292 Truncated incorrect datetime value: '06:56:07' +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +UNIX_TIMESTAMP(TIME'06:56:07') +NULL +Warnings: +Warning 1292 Truncated incorrect datetime value: '06:56:07' +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +UNIX_TIMESTAMP(TIME'10:20:30') +NULL +Warnings: +Warning 1292 Truncated incorrect datetime value: '10:20:30' +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +UNIX_TIMESTAMP(a) +NULL +NULL +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +DROP TABLE t1; +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; diff --git a/mysql-test/main/old-mode.test b/mysql-test/main/old-mode.test index 0419be4bf6af7..e4928329b47e9 100644 --- a/mysql-test/main/old-mode.test +++ b/mysql-test/main/old-mode.test @@ -149,3 +149,23 @@ SELECT t1.a, UNIX_TIMESTAMP(t1.a), t2.a, UNIX_TIMESTAMP(t2.a) FROM t1 t1, t1 t2 DROP TABLE t1; SET time_zone=DEFAULT; SET global mysql56_temporal_format=true; + + +--echo # +--echo # MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +--echo # + +SET old_mode=zero_date_time_cast; +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +DROP TABLE t1; + +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index 2d52392dedb0a..83bc03fe0f900 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -1107,7 +1107,6 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b { "index": "a", "used_range_estimates": false, "reason": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 200.0585794, "chosen": true @@ -1155,7 +1154,6 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b { "index": "a", "used_range_estimates": false, "reason": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 200.0585794, "chosen": true @@ -4025,7 +4023,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 { "select_id": 1, "steps": [ { - "expanded_query": "select NULL AS `NULL` from t0 join t1 where t0.a = t1.a and t1.a < 3" + "expanded_query": "delete from t0,t1 using t0 join t1 where t0.a = t1.a and t1.a < 3" } ] } @@ -4285,7 +4283,6 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 { "index": "a", "used_range_estimates": false, "reason": "not better than ref estimates", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 3.001757383, "chosen": true @@ -4426,7 +4423,7 @@ explain select * from (select rand() from t1)q { "derived": { "table": "q", "select_id": 2, - "algorithm": "merged" + "algorithm": "materialized" } }, { @@ -4440,7 +4437,7 @@ explain select * from (select rand() from t1)q { } }, { - "expanded_query": "/* select#1 */ select rand() AS `rand()` from (/* select#2 */ select rand() AS `rand()` from t1) q" + "expanded_query": "/* select#1 */ select q.`rand()` AS `rand()` from (/* select#2 */ select rand() AS `rand()` from t1) q" } ] } @@ -4449,14 +4446,6 @@ explain select * from (select rand() from t1)q { "join_optimization": { "select_id": 1, "steps": [ - { - "derived": { - "table": "q", - "select_id": 2, - "algorithm": "materialized", - "cause": "Random function in the select" - } - }, { "join_optimization": { "select_id": 2, @@ -9486,7 +9475,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "index": "b", "used_range_estimates": false, "reason": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 20.00585794, "chosen": true @@ -9715,7 +9703,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "index": "a", "used_range_estimates": false, "reason": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 20.00585794, "chosen": true @@ -9772,7 +9759,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "index": "a", "used_range_estimates": false, "reason": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 200.0585794, "chosen": true diff --git a/mysql-test/main/partition.result b/mysql-test/main/partition.result index 0d37d4cb168fd..57aaa227030d4 100644 --- a/mysql-test/main/partition.result +++ b/mysql-test/main/partition.result @@ -2820,3 +2820,41 @@ DROP TABLE t1,t2; # # End of 10.1 tests # +# +# MDEV-30596: Assertion 'pushed_rowid_filter != __null ...' failed +# +create table t1 (a int); +insert into t1 values (NULL),(1),(2); +create table t2 (a int); +insert into t2 select seq from seq_1_to_1000; +create table t3 ( +a1 int, +a2 int, +b int, +c int, +filler1 char(200), +filler2 char(200), +key(a1,a2), +key(b) +) partition by hash(a1) partitions 2; +insert into t3 select seq/100, seq/100, seq, seq, seq, seq from seq_1_to_10000; +analyze table t3 persistent for all; +Table Op Msg_type Msg_text +test.t3 analyze status Engine-independent statistics collected +test.t3 analyze status OK +set @tmp_os= @@optimizer_switch; +set optimizer_switch='materialization=off'; +# Must not show "Using rowid filter": +explain +select * +from t1 +where +t1.a not in (select straight_join t3.a1 +from t2, t3 +where t3.b < 3000 and t3.a2=t2.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 1000 Using where +2 DEPENDENT SUBQUERY t3 ref_or_null a1,b a1 10 func,test.t2.a 198 Using where; Full scan on NULL key +set optimizer_switch=@tmp_os; +drop table t1,t2,t3; diff --git a/mysql-test/main/partition.test b/mysql-test/main/partition.test index 5f4c5f659cae1..d96031a6b2b3e 100644 --- a/mysql-test/main/partition.test +++ b/mysql-test/main/partition.test @@ -3021,7 +3021,44 @@ INSERT INTO t2 VALUES (1),(2); UPDATE t1 SET a = 7 WHERE a = ( SELECT b FROM t2 ) ORDER BY a LIMIT 6; DROP TABLE t1,t2; - --echo # --echo # End of 10.1 tests --echo # + +--echo # +--echo # MDEV-30596: Assertion 'pushed_rowid_filter != __null ...' failed +--echo # +--source include/have_sequence.inc +create table t1 (a int); +insert into t1 values (NULL),(1),(2); +create table t2 (a int); +insert into t2 select seq from seq_1_to_1000; + +create table t3 ( + a1 int, + a2 int, + b int, + c int, + filler1 char(200), + filler2 char(200), + key(a1,a2), + key(b) +) partition by hash(a1) partitions 2; +insert into t3 select seq/100, seq/100, seq, seq, seq, seq from seq_1_to_10000; +analyze table t3 persistent for all; + +set @tmp_os= @@optimizer_switch; +set optimizer_switch='materialization=off'; + +--echo # Must not show "Using rowid filter": +explain +select * +from t1 +where + t1.a not in (select straight_join t3.a1 + from t2, t3 + where t3.b < 3000 and t3.a2=t2.a); +set optimizer_switch=@tmp_os; + +drop table t1,t2,t3; + diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index d5e501b06ef1b..21e67ff320a07 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5556,11 +5556,15 @@ EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 F id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0) PREPARE stmt FROM "EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 )"; EXECUTE stmt; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0) DEALLOCATE PREPARE stmt; DROP TABLE t1, t2, t3; # diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result index 5f1a4a1db76db..2ab2f9797581f 100644 --- a/mysql-test/main/rowid_filter.result +++ b/mysql-test/main/rowid_filter.result @@ -353,8 +353,8 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition -1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) Using where; Using rowid filter +1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (2%) Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND @@ -366,42 +366,42 @@ EXPLAIN "nested_loop": [ { "table": { - "table_name": "lineitem", + "table_name": "orders", "access_type": "range", + "possible_keys": ["PRIMARY", "i_o_totalprice"], + "key": "i_o_totalprice", + "key_length": "9", + "used_key_parts": ["o_totalprice"], + "rows": 69, + "filtered": 100, + "index_condition": "orders.o_totalprice between 200000 and 230000" + } + }, + { + "table": { + "table_name": "lineitem", + "access_type": "ref", "possible_keys": [ "PRIMARY", "i_l_shipdate", "i_l_orderkey", "i_l_orderkey_quantity" ], - "key": "i_l_shipdate", - "key_length": "4", - "used_key_parts": ["l_shipDATE"], - "rows": 98, - "filtered": 100, - "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" - } - }, - { - "table": { - "table_name": "orders", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "i_o_totalprice"], - "key": "PRIMARY", + "key": "i_l_orderkey", "key_length": "4", - "used_key_parts": ["o_orderkey"], - "ref": ["dbt3_s001.lineitem.l_orderkey"], + "used_key_parts": ["l_orderkey"], + "ref": ["dbt3_s001.orders.o_orderkey"], "rowid_filter": { "range": { - "key": "i_o_totalprice", - "used_key_parts": ["o_totalprice"] + "key": "i_l_shipdate", + "used_key_parts": ["l_shipDATE"] }, - "rows": 69, - "selectivity_pct": 4.6 + "rows": 98, + "selectivity_pct": 1.631973356 }, - "rows": 1, - "filtered": 4.599999905, - "attached_condition": "orders.o_totalprice between 200000 and 230000" + "rows": 4, + "filtered": 1.631973386, + "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" } } ] @@ -412,8 +412,8 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra -1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition -1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 4.60 100.00 Using where; Using rowid filter +1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (2%) 0.15 (2%) 1.63 100.00 Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND @@ -427,57 +427,57 @@ ANALYZE "nested_loop": [ { "table": { - "table_name": "lineitem", + "table_name": "orders", "access_type": "range", - "possible_keys": [ - "PRIMARY", - "i_l_shipdate", - "i_l_orderkey", - "i_l_orderkey_quantity" - ], - "key": "i_l_shipdate", - "key_length": "4", - "used_key_parts": ["l_shipDATE"], + "possible_keys": ["PRIMARY", "i_o_totalprice"], + "key": "i_o_totalprice", + "key_length": "9", + "used_key_parts": ["o_totalprice"], "r_loops": 1, - "rows": 98, - "r_rows": 98, + "rows": 69, + "r_rows": 71, "r_table_time_ms": "REPLACED", "r_other_time_ms": "REPLACED", "filtered": 100, "r_filtered": 100, - "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" + "index_condition": "orders.o_totalprice between 200000 and 230000" } }, { "table": { - "table_name": "orders", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "i_o_totalprice"], - "key": "PRIMARY", + "table_name": "lineitem", + "access_type": "ref", + "possible_keys": [ + "PRIMARY", + "i_l_shipdate", + "i_l_orderkey", + "i_l_orderkey_quantity" + ], + "key": "i_l_orderkey", "key_length": "4", - "used_key_parts": ["o_orderkey"], - "ref": ["dbt3_s001.lineitem.l_orderkey"], + "used_key_parts": ["l_orderkey"], + "ref": ["dbt3_s001.orders.o_orderkey"], "rowid_filter": { "range": { - "key": "i_o_totalprice", - "used_key_parts": ["o_totalprice"] + "key": "i_l_shipdate", + "used_key_parts": ["l_shipDATE"] }, - "rows": 69, - "selectivity_pct": 4.6, - "r_rows": 71, - "r_lookups": 96, - "r_selectivity_pct": 10.41666667, + "rows": 98, + "selectivity_pct": 1.631973356, + "r_rows": 98, + "r_lookups": 476, + "r_selectivity_pct": 2.31092437, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" }, - "r_loops": 98, - "rows": 1, - "r_rows": 0.112244898, + "r_loops": 71, + "rows": 4, + "r_rows": 0.154929577, "r_table_time_ms": "REPLACED", "r_other_time_ms": "REPLACED", - "filtered": 4.599999905, + "filtered": 1.631973386, "r_filtered": 100, - "attached_condition": "orders.o_totalprice between 200000 and 230000" + "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" } } ] diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result index 990c6b4e894ed..107470fab4505 100644 --- a/mysql-test/main/rowid_filter_innodb.result +++ b/mysql-test/main/rowid_filter_innodb.result @@ -3891,7 +3891,11 @@ ANALYZE "join_type": "BKA", "mrr_type": "Rowid-ordered scan", "attached_condition": "a.atp = 1", - "r_filtered": 100 + "r_loops": 3, + "r_filtered": 100, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 1 } }, { @@ -3917,7 +3921,11 @@ ANALYZE "join_type": "BKA", "mrr_type": "Rowid-ordered scan", "attached_condition": "fi.fh in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)", - "r_filtered": 40 + "r_loops": 3, + "r_filtered": 40, + "r_unpack_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_effective_rows": 26.66666667 } } ] diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index 99844d05f2a9a..abffe58739df0 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where +1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index a19e98d49a3f5..1a7eb796e969d 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -3755,7 +3755,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where +1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index 99844d05f2a9a..abffe58739df0 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where +1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result index 7e8a4aeb083a2..94dad70ebd40a 100644 --- a/mysql-test/main/selectivity.result +++ b/mysql-test/main/selectivity.result @@ -1856,7 +1856,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 @@ -1868,7 +1868,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter set optimizer_switch= @save_optimizer_switch; set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result index 76093f16cd78b..7d2dfc15810f2 100644 --- a/mysql-test/main/selectivity_innodb.result +++ b/mysql-test/main/selectivity_innodb.result @@ -1868,7 +1868,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 @@ -1880,7 +1880,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter set optimizer_switch= @save_optimizer_switch; set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; diff --git a/mysql-test/main/shutdown.test b/mysql-test/main/shutdown.test index 13b7ac3f67227..71f2156a17f64 100644 --- a/mysql-test/main/shutdown.test +++ b/mysql-test/main/shutdown.test @@ -18,8 +18,8 @@ disconnect c1; create procedure try_shutdown() shutdown; drop procedure try_shutdown; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name --send shutdown diff --git a/mysql-test/main/status.test b/mysql-test/main/status.test index 78626615d457b..a1baa36baf8f9 100644 --- a/mysql-test/main/status.test +++ b/mysql-test/main/status.test @@ -280,7 +280,6 @@ show status like 'Com%function'; # connect (root, localhost, root,,test); connection root; -let $root_connection_id= `select connection_id()`; --disable_warnings create database db37908; --enable_warnings @@ -296,7 +295,6 @@ delimiter ;| connect (user1,localhost,mysqltest_1,,test); connection user1; -let $user1_connection_id= `select connection_id()`; --error ER_TABLEACCESS_DENIED_ERROR select * from db37908.t1; @@ -315,11 +313,8 @@ drop procedure proc37908; drop function func37908; REVOKE ALL PRIVILEGES, GRANT OPTION FROM mysqltest_1@localhost; DROP USER mysqltest_1@localhost; -# Wait till the sessions user1 and root are disconnected -let $wait_condition = - SELECT COUNT(*) = 0 - FROM information_schema.processlist - WHERE id in ('$root_connection_id','$user1_connection_id'); +# Wait until all non-default sessions are disconnected +let $wait_condition = SELECT COUNT(*) = 1 FROM information_schema.processlist; --source include/wait_condition.inc # diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result index 4209e2bc5293e..0c877b03b97f2 100644 --- a/mysql-test/main/subselect.result +++ b/mysql-test/main/subselect.result @@ -118,27 +118,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -228,19 +228,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -307,21 +314,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -330,7 +350,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -486,8 +506,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -505,8 +528,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -879,6 +905,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -889,6 +934,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1530,8 +1589,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1726,8 +1785,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3782,9 +3841,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3794,9 +3854,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3820,16 +3880,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6423,11 +6477,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6560,66 +6613,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6665,8 +6745,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6883,7 +6965,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6893,9 +6977,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6921,7 +7007,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6931,9 +7019,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test index be22169a1d693..0f6032fb14871 100644 --- a/mysql-test/main/subselect.test +++ b/mysql-test/main/subselect.test @@ -78,13 +78,13 @@ SELECT ROW(1,2,3) > (SELECT 1,2,1); #enable after fix MDEV-28585 --disable_view_protocol SELECT ROW(1,2,3) = (SELECT 1,2,NULL); -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); --enable_view_protocol +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; -- error ER_OPERAND_COLUMNS SELECT (SELECT * FROM (SELECT 'test' a,'test' b) a); @@ -122,11 +122,15 @@ set optimizer_switch=@tmp_optimizer_switch; select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3) order by 1 desc limit 1); select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a > t1.a) order by 1 desc limit 1); select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); -#enable afte fix MDEV-27871 ---disable_view_protocol -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; ---enable_view_protocol +select + b, + (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +explain extended +select + b, + (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; select * from t3 where exists (select * from t2 where t2.b=t3.a); select * from t3 where not exists (select * from t2 where t2.b=t3.a); select * from t3 where a in (select b from t2); @@ -159,16 +163,25 @@ delete from t2 where a=2 and b=10; select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 where t2.b=t4.b); create table t5 (a int); -#enable afte fix MDEV-27871 ---disable_view_protocol - -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; ---enable_view_protocol +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; +explain extended +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; -- error ER_SUBQUERY_NO_1_ROW select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; @@ -273,19 +286,19 @@ SELECT * from t2 where topic = any (SELECT topic FROM t2 GROUP BY topic HAVING t SELECT * from t2 where topic = any (SELECT SUM(topic) FROM t1); SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic); SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100); -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; ---enable_view_protocol +SELECT + *, + topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); SELECT * from t2 where topic <> any (SELECT SUM(topic) FROM t2); SELECT * from t2 where topic IN (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); SELECT * from t2 where topic = any (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; ---enable_view_protocol +SELECT + *, + topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; drop table t1,t2; #forumconthardwarefr7 @@ -529,13 +542,22 @@ select 10.5 IN (SELECT * from t1); select 1.5 > ALL (SELECT * from t1); select 10.5 > ALL (SELECT * from t1); select 1.5 > ANY (SELECT * from t1); +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +select 3.5 IN (SELECT * from t1); +select 10.5 IN (SELECT * from t1); +select 1.5 > ALL (SELECT * from t1); +select 10.5 > ALL (SELECT * from t1); +select 1.5 > ANY (SELECT * from t1); select 10.5 > ANY (SELECT * from t1); --enable_prepare_warnings +--enable_view_protocol +explain extended select (select a+1) from t1; +explain extended select (select a+1) from t1; explain extended select (select a+1) from t1; --disable_prepare_warnings select (select a+1) from t1; drop table t1; ---enable_view_protocol # # Null with keys @@ -955,10 +977,7 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -#enable after fix MDEV-27871 ---disable_view_protocol -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; ---enable_view_protocol +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; drop table t1,t2,t3; # @@ -1076,10 +1095,7 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -#enable after fix MDEV-27871 ---disable_view_protocol -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; ---enable_view_protocol +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; SELECT dbid, name FROM t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND ((date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01')) AND t4.task_id = taskid; drop table t1,t2,t3,t4; @@ -2398,9 +2414,6 @@ SELECT a, MAX(b), MIN(b) FROM t1 GROUP BY a; SELECT * FROM t2; SELECT * FROM t3; -#enable after fix MDEV-28570 ---disable_view_protocol - SELECT a FROM t1 GROUP BY a HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); SELECT a FROM t1 GROUP BY a @@ -2465,8 +2478,6 @@ SELECT t1.a, SUM(b) AS sum FROM t1 GROUP BY t1.a HAVING t1.a IN (SELECT t2.c FROM t2 GROUP BY t2.c HAVING t2.c+sum > 20); ---enable_view_protocol - DROP TABLE t1,t2,t3; @@ -2697,19 +2708,17 @@ DROP TABLE t1; # Bug#21540 Subqueries with no from and aggregate functions return # wrong results -#enable after fix MDEV-27871, MDEV-28573 ---disable_view_protocol - CREATE TABLE t1 (a INT, b INT); CREATE TABLE t2 (a INT); INSERT INTO t2 values (1); INSERT INTO t1 VALUES (1,1),(1,2),(2,3),(3,4); SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT + (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; SELECT COUNT(DISTINCT t1.b), (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; SELECT COUNT(DISTINCT t1.b), - (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) + (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; SELECT ( SELECT ( @@ -2723,11 +2732,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) - FROM t1 GROUP BY t1.a LIMIT 1) + FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; DROP TABLE t1,t2; ---enable_view_protocol # # Bug#21727 Correlated subquery that requires filesort: @@ -2962,8 +2970,6 @@ DROP TABLE t1,t2; # Bug#27229 GROUP_CONCAT in subselect with COUNT() as an argument # -#enable after fix MDEV-28571 ---disable_view_protocol CREATE TABLE t1 (a int, b int); CREATE TABLE t2 (m int, n int); INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); @@ -2978,7 +2984,6 @@ SELECT COUNT(*) c, a, FROM t1 GROUP BY a; DROP table t1,t2; ---enable_view_protocol # # Bug#27321 Wrong subquery result in a grouping select @@ -3009,14 +3014,11 @@ SELECT tt.a, FROM t1 WHERE t1.a=tt.a GROUP BY a LIMIT 1) as test FROM t1 as tt GROUP BY tt.a; -#enable after fix MDEV-28571 ---disable_view_protocol SELECT tt.a, MAX( (SELECT (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.d=MAX(t1.b + tt.a) LIMIT 1) FROM t1 WHERE t1.a=tt.a GROUP BY a LIMIT 1)) as test FROM t1 as tt GROUP BY tt.a; ---enable_view_protocol DROP TABLE t1; @@ -3171,8 +3173,6 @@ CREATE TABLE t2 (x INTEGER); INSERT INTO t1 VALUES (1,11), (2,22), (2,22); INSERT INTO t2 VALUES (1), (2); -#enable after fix MDEV-28573 ---disable_view_protocol # wasn't failing, but should --error ER_SUBQUERY_NO_1_ROW SELECT a, COUNT(b), (SELECT COUNT(b) FROM t2) FROM t1 GROUP BY a; @@ -3182,7 +3182,6 @@ SELECT a, COUNT(b), (SELECT COUNT(b) FROM t2) FROM t1 GROUP BY a; SELECT a, COUNT(b), (SELECT COUNT(b)+0 FROM t2) FROM t1 GROUP BY a; SELECT (SELECT SUM(t1.a)/AVG(t2.x) FROM t2) FROM t1; ---enable_view_protocol DROP TABLE t1,t2; @@ -3197,8 +3196,6 @@ GROUP BY a1.a; DROP TABLE t1; #test cases from 29297 -#enable after fix MDEV-28573 ---disable_view_protocol CREATE TABLE t1 (a INT); CREATE TABLE t2 (a INT); INSERT INTO t1 VALUES (1),(2); @@ -3208,7 +3205,6 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a!=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; DROP TABLE t1,t2; ---enable_view_protocol # # Bug#31884 Assertion + crash in subquery in the SELECT clause. @@ -5410,14 +5406,11 @@ INSERT INTO t2 VALUES (10,7,0), (0,7,0); CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); -#enable after fix MDEV-27871 ---disable_view_protocol SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 - WHERE t.a != 0 AND t2.a != 0) + WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; ---enable_view_protocol SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -5575,29 +5568,53 @@ INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); +SELECT + (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m + FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +SELECT + (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; set @@session.optimizer_switch=@old_optimizer_switch; # check different IN with default switches -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); ---enable_view_protocol +SELECT + (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +SELECT + (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; # other row operation with NULL single row subquery also should work select (null, null) = (null, null); @@ -5637,10 +5654,9 @@ INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; ---enable_view_protocol +SELECT + ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; # This query just for example, it should return the same as above (1 and NULL) SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -5829,15 +5845,17 @@ INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; -#enable after fix MDEV-27871 ---disable_view_protocol EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; show status like "subquery_cache%"; @@ -5846,17 +5864,20 @@ show status like '%Handler_read%'; set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; show status like "subquery_cache%"; show status like '%Handler_read%'; ---enable_view_protocol drop table t1, t2, t3; @@ -6110,22 +6131,16 @@ INSERT INTO t1 VALUES (1),(2); CREATE TABLE t2 (f2 int); INSERT INTO t2 VALUES (3); -#enable after fix MDEV-29224 ---disable_view_protocol SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; SELECT ( SELECT MAX(f1) FROM t2 ) FROM v1; ---enable_view_protocol INSERT INTO t2 VALUES (4); -#enable after fix MDEV-28573 ---disable_view_protocol --error ER_SUBQUERY_NO_1_ROW SELECT ( SELECT MAX(f1) FROM t2 ) FROM v1; --error ER_SUBQUERY_NO_1_ROW SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; ---enable_view_protocol drop view v1; drop table t1,t2; diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result index d00f7ccd69024..b0e08a360062b 100644 --- a/mysql-test/main/subselect4.result +++ b/mysql-test/main/subselect4.result @@ -20,8 +20,8 @@ WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3)) ORDER BY count(*); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 2 Using index -2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where -3 DEPENDENT SUBQUERY t3 system NULL NULL NULL NULL 0 Const row not found +2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where +3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table # should not crash the next statement SELECT 1 FROM t1 WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3)) diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result index e32e600732852..c7fc78ce28ec1 100644 --- a/mysql-test/main/subselect_no_exists_to_in.result +++ b/mysql-test/main/subselect_no_exists_to_in.result @@ -122,27 +122,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -232,19 +232,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -311,21 +318,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -334,7 +354,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -490,8 +510,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -509,8 +532,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -883,6 +909,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -893,6 +938,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1534,8 +1593,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1730,8 +1789,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3785,9 +3844,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3797,9 +3857,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3823,16 +3883,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6423,11 +6477,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6560,66 +6613,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6665,8 +6745,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6883,7 +6965,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6893,9 +6977,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6921,7 +7007,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6931,9 +7019,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result index 07755a5144a47..5998be625350a 100644 --- a/mysql-test/main/subselect_no_mat.result +++ b/mysql-test/main/subselect_no_mat.result @@ -125,27 +125,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -235,19 +235,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -314,21 +321,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -337,7 +357,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -493,8 +513,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -512,8 +535,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -886,6 +912,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -896,6 +941,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1537,8 +1596,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1733,8 +1792,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3785,9 +3844,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3797,9 +3857,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3823,16 +3883,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6418,11 +6472,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6555,66 +6608,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6660,8 +6740,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6877,7 +6959,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6887,9 +6971,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6915,7 +7001,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6925,9 +7013,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result index 15688fc17179f..4d3ecfd70e154 100644 --- a/mysql-test/main/subselect_no_opts.result +++ b/mysql-test/main/subselect_no_opts.result @@ -121,27 +121,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -231,19 +231,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -310,21 +317,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -333,7 +353,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -489,8 +509,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -508,8 +531,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -882,6 +908,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -892,6 +937,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1533,8 +1592,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1729,8 +1788,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3781,9 +3840,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3793,9 +3853,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3819,16 +3879,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6414,11 +6468,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6551,66 +6604,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6656,8 +6736,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6874,7 +6956,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6884,9 +6968,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6912,7 +6998,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6922,9 +7010,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result index e3bdddbf84b7f..7991829cd49cc 100644 --- a/mysql-test/main/subselect_no_scache.result +++ b/mysql-test/main/subselect_no_scache.result @@ -124,27 +124,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -234,19 +234,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -313,21 +320,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -336,7 +356,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -492,8 +512,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -511,8 +534,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -885,6 +911,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -895,6 +940,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1536,8 +1595,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1732,8 +1791,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3788,9 +3847,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3800,9 +3860,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3826,16 +3886,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6429,11 +6483,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6566,66 +6619,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6671,8 +6751,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6889,7 +6971,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6899,9 +6983,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6927,7 +7013,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 58 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6937,9 +7025,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result index 88f8f78019e1b..a9408248aa8d1 100644 --- a/mysql-test/main/subselect_no_semijoin.result +++ b/mysql-test/main/subselect_no_semijoin.result @@ -121,27 +121,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -231,19 +231,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -310,21 +317,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -333,7 +353,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -489,8 +509,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -508,8 +531,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -882,6 +908,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -892,6 +937,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1533,8 +1592,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1729,8 +1788,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3781,9 +3840,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3793,9 +3853,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3819,16 +3879,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6414,11 +6468,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6551,66 +6604,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6656,8 +6736,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6874,7 +6956,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6884,9 +6968,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6912,7 +6998,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6922,9 +7010,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/system_mysql_db_fix50030.result b/mysql-test/main/system_mysql_db_fix50030.result index 8055211e5b2c3..d870a645a6bb9 100644 --- a/mysql-test/main/system_mysql_db_fix50030.result +++ b/mysql-test/main/system_mysql_db_fix50030.result @@ -166,14 +166,14 @@ show create table servers; Table Create Table servers CREATE TABLE `servers` ( `Server_name` char(64) NOT NULL DEFAULT '', - `Host` char(64) NOT NULL DEFAULT '', + `Host` varchar(2048) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `Username` char(128) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', - `Owner` char(64) NOT NULL DEFAULT '', + `Owner` varchar(512) NOT NULL DEFAULT '', PRIMARY KEY (`Server_name`) ) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table' show create table proc; diff --git a/mysql-test/main/system_mysql_db_fix50117.result b/mysql-test/main/system_mysql_db_fix50117.result index 214b7ee572dbf..5e9e3df582963 100644 --- a/mysql-test/main/system_mysql_db_fix50117.result +++ b/mysql-test/main/system_mysql_db_fix50117.result @@ -146,14 +146,14 @@ show create table servers; Table Create Table servers CREATE TABLE `servers` ( `Server_name` char(64) NOT NULL DEFAULT '', - `Host` char(64) NOT NULL DEFAULT '', + `Host` varchar(2048) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `Username` char(128) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', - `Owner` char(64) NOT NULL DEFAULT '', + `Owner` varchar(512) NOT NULL DEFAULT '', PRIMARY KEY (`Server_name`) ) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table' show create table proc; diff --git a/mysql-test/main/system_mysql_db_fix50568.result b/mysql-test/main/system_mysql_db_fix50568.result new file mode 100644 index 0000000000000..09c51b80409ea --- /dev/null +++ b/mysql-test/main/system_mysql_db_fix50568.result @@ -0,0 +1,297 @@ +use test; +CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions'; +CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl varchar(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_general_ci comment='MySQL plugins'; +CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table'; +CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges'; +CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges'; +CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics'; +CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url text not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories'; +CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation'; +CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords'; +CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names'; +Warnings: +Warning 1280 Name 'Name' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones'; +Warnings: +Warning 1280 Name 'TzId' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions'; +Warnings: +Warning 1280 Name 'TzIdTranTime' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, `Offset` int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types'; +Warnings: +Warning 1280 Name 'TzIdTrTId' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones'; +Warnings: +Warning 1280 Name 'TranTime' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures'; +CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges'; +CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; +CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; +CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(16) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +show tables; +Tables_in_db +column_stats +columns_priv +db +event +func +general_log +global_priv +gtid_slave_pos +help_category +help_keyword +help_relation +help_topic +host +index_stats +innodb_index_stats +innodb_table_stats +ndb_binlog_index +plugin +proc +procs_priv +proxies_priv +roles_mapping +servers +slow_log +table_stats +tables_priv +time_zone +time_zone_leap_second +time_zone_name +time_zone_transition +time_zone_transition_type +transaction_registry +user +show create table db; +Table Create Table +db CREATE TABLE `db` ( + `Host` char(255) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(128) NOT NULL DEFAULT '', + `Select_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Insert_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Update_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Delete_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Create_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Drop_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Grant_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `References_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Index_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Alter_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Create_view_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Show_view_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Create_routine_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Execute_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Event_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Trigger_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + `Delete_history_priv` enum('N','Y') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT 'N', + PRIMARY KEY (`Host`,`Db`,`User`), + KEY `User` (`User`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges' +show create table user; +View Create View character_set_client collation_connection +user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci +show create table func; +Table Create Table +func CREATE TABLE `func` ( + `name` char(64) NOT NULL DEFAULT '', + `ret` tinyint(1) NOT NULL DEFAULT 0, + `dl` char(128) NOT NULL DEFAULT '', + `type` enum('function','aggregate') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL, + PRIMARY KEY (`name`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User defined functions' +show create table global_priv; +Table Create Table +global_priv CREATE TABLE `global_priv` ( + `Host` char(255) NOT NULL DEFAULT '', + `User` char(128) NOT NULL DEFAULT '', + `Priv` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '{}' CHECK (json_valid(`Priv`)), + PRIMARY KEY (`Host`,`User`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Users and global privileges' +show create table tables_priv; +Table Create Table +tables_priv CREATE TABLE `tables_priv` ( + `Host` char(255) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(128) NOT NULL DEFAULT '', + `Table_name` char(64) NOT NULL DEFAULT '', + `Grantor` varchar(384) NOT NULL DEFAULT '', + `Timestamp` timestamp NULL DEFAULT NULL, + `Table_priv` set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger','Delete versioning rows') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', + `Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', + PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`), + KEY `Grantor` (`Grantor`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Table privileges' +show create table columns_priv; +Table Create Table +columns_priv CREATE TABLE `columns_priv` ( + `Host` char(255) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(128) NOT NULL DEFAULT '', + `Table_name` char(64) NOT NULL DEFAULT '', + `Column_name` char(64) NOT NULL DEFAULT '', + `Timestamp` timestamp NULL DEFAULT NULL, + `Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', + PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Column privileges' +show create table procs_priv; +Table Create Table +procs_priv CREATE TABLE `procs_priv` ( + `Host` char(60) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(128) NOT NULL DEFAULT '', + `Routine_name` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', + `Routine_type` enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, + `Grantor` varchar(384) NOT NULL DEFAULT '', + `Proc_priv` set('Execute','Alter Routine','Grant') CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', + `Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + PRIMARY KEY (`Host`,`Db`,`User`,`Routine_name`,`Routine_type`), + KEY `Grantor` (`Grantor`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Procedure privileges' +show create table servers; +Table Create Table +servers CREATE TABLE `servers` ( + `Server_name` char(64) NOT NULL DEFAULT '', + `Host` varchar(2048) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `Username` char(128) NOT NULL DEFAULT '', + `Password` char(64) NOT NULL DEFAULT '', + `Port` int(4) NOT NULL DEFAULT 0, + `Socket` char(64) NOT NULL DEFAULT '', + `Wrapper` char(64) NOT NULL DEFAULT '', + `Owner` varchar(512) NOT NULL DEFAULT '', + PRIMARY KEY (`Server_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table' +show create table proc; +Table Create Table +proc CREATE TABLE `proc` ( + `db` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin NOT NULL DEFAULT '', + `name` char(64) NOT NULL DEFAULT '', + `type` enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, + `specific_name` char(64) NOT NULL DEFAULT '', + `language` enum('SQL') NOT NULL DEFAULT 'SQL', + `sql_data_access` enum('CONTAINS_SQL','NO_SQL','READS_SQL_DATA','MODIFIES_SQL_DATA') NOT NULL DEFAULT 'CONTAINS_SQL', + `is_deterministic` enum('YES','NO') NOT NULL DEFAULT 'NO', + `security_type` enum('INVOKER','DEFINER') NOT NULL DEFAULT 'DEFINER', + `param_list` blob NOT NULL, + `returns` longblob NOT NULL, + `body` longblob NOT NULL, + `definer` varchar(384) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin NOT NULL DEFAULT '', + `created` timestamp NULL DEFAULT NULL, + `modified` timestamp NULL DEFAULT NULL, + `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '', + `comment` text CHARACTER SET utf8mb3 COLLATE utf8mb3_bin NOT NULL, + `character_set_client` char(32) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin DEFAULT NULL, + `collation_connection` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin DEFAULT NULL, + `db_collation` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin DEFAULT NULL, + `body_utf8` longblob DEFAULT NULL, + `aggregate` enum('NONE','GROUP') NOT NULL DEFAULT 'NONE', + PRIMARY KEY (`db`,`name`,`type`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Stored Procedures' +show create table event; +Table Create Table +event CREATE TABLE `event` ( + `db` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin NOT NULL DEFAULT '', + `name` char(64) NOT NULL DEFAULT '', + `body` longblob NOT NULL, + `definer` varchar(384) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin NOT NULL DEFAULT '', + `execute_at` datetime DEFAULT NULL, + `interval_value` int(11) DEFAULT NULL, + `interval_field` enum('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') DEFAULT NULL, + `created` timestamp NOT NULL, + `modified` timestamp NOT NULL, + `last_executed` datetime DEFAULT NULL, + `starts` datetime DEFAULT NULL, + `ends` datetime DEFAULT NULL, + `status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED', + `on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP', + `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '', + `comment` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin NOT NULL DEFAULT '', + `originator` int(10) unsigned NOT NULL, + `time_zone` char(64) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL DEFAULT 'SYSTEM', + `character_set_client` char(32) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin DEFAULT NULL, + `collation_connection` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin DEFAULT NULL, + `db_collation` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_bin DEFAULT NULL, + `body_utf8` longblob DEFAULT NULL, + PRIMARY KEY (`db`,`name`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Events' +show create table general_log; +Table Create Table +general_log CREATE TABLE `general_log` ( + `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `command_type` varchar(64) NOT NULL, + `argument` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci COMMENT='General log' +show create table slow_log; +Table Create Table +slow_log CREATE TABLE `slow_log` ( + `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `query_time` time(6) NOT NULL, + `lock_time` time(6) NOT NULL, + `rows_sent` int(11) NOT NULL, + `rows_examined` int(11) NOT NULL, + `db` varchar(512) NOT NULL, + `last_insert_id` int(11) NOT NULL, + `insert_id` int(11) NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `sql_text` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `rows_affected` int(11) NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci COMMENT='Slow log' +show create table table_stats; +Table Create Table +table_stats CREATE TABLE `table_stats` ( + `db_name` varchar(64) NOT NULL, + `table_name` varchar(64) NOT NULL, + `cardinality` bigint(21) unsigned DEFAULT NULL, + PRIMARY KEY (`db_name`,`table_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Tables' +show create table column_stats; +Table Create Table +column_stats CREATE TABLE `column_stats` ( + `db_name` varchar(64) NOT NULL, + `table_name` varchar(64) NOT NULL, + `column_name` varchar(64) NOT NULL, + `min_value` varbinary(255) DEFAULT NULL, + `max_value` varbinary(255) DEFAULT NULL, + `nulls_ratio` decimal(12,4) DEFAULT NULL, + `avg_length` decimal(12,4) DEFAULT NULL, + `avg_frequency` decimal(12,4) DEFAULT NULL, + `hist_size` tinyint(3) unsigned DEFAULT NULL, + `hist_type` enum('SINGLE_PREC_HB','DOUBLE_PREC_HB','JSON_HB') DEFAULT NULL, + `histogram` longblob DEFAULT NULL, + PRIMARY KEY (`db_name`,`table_name`,`column_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Columns' +show create table index_stats; +Table Create Table +index_stats CREATE TABLE `index_stats` ( + `db_name` varchar(64) NOT NULL, + `table_name` varchar(64) NOT NULL, + `index_name` varchar(64) NOT NULL, + `prefix_arity` int(11) unsigned NOT NULL, + `avg_frequency` decimal(12,4) DEFAULT NULL, + PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`) +) ENGINE=Aria DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Indexes' +DROP VIEW user; +DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, event, proxies_priv, general_log, slow_log, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv, ndb_binlog_index; +show tables; +Tables_in_test diff --git a/mysql-test/main/system_mysql_db_fix50568.test b/mysql-test/main/system_mysql_db_fix50568.test new file mode 100644 index 0000000000000..3429f8c54783c --- /dev/null +++ b/mysql-test/main/system_mysql_db_fix50568.test @@ -0,0 +1,99 @@ +# Embedded server doesn't support external clients +--source include/not_embedded.inc +--source include/have_innodb.inc + +# Don't run this test if $MYSQL_FIX_PRIVILEGE_TABLES isn't set +# to the location of mysql_fix_privilege_tables.sql +if (!$MYSQL_FIX_PRIVILEGE_TABLES) +{ + skip Test needs MYSQL_FIX_PRIVILEGE_TABLES; +} + +# +# This is the test for mysql_fix_privilege_tables +# It checks that a system tables from mysql 5.5.68 +# can be upgraded to current system table format +# +# Note: If this test fails, don't be confused about the errors reported +# by mysql-test-run This shows warnings generated by +# mysql_fix_system_tables which should be ignored. +# Instead, concentrate on the errors in r/system_mysql_db.reject + +use test; + +# create system tables as in mysql-5.5.68 +CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges'; + +CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges'; + +CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'; + +CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions'; + + +CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl varchar(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_general_ci comment='MySQL plugins'; + + +CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table'; + + +CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges'; + +CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges'; + + +CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics'; + + +CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url text not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories'; + + +CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation'; + + +CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords'; + + +CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names'; + + +CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones'; + + +CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions'; + + +CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, `Offset` int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types'; + + +CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones'; + +CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures'; + +CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges'; + + +CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; + + +CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; + +CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(16) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges'; + + +-- disable_result_log +# Run the mysql_fix_privilege_tables.sql using "mysql --force" +--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES +-- enable_result_log + +# Dump the tables that should be compared +-- source include/system_db_struct.inc + +# Drop all tables created by this test +DROP VIEW user; +DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, event, proxies_priv, general_log, slow_log, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv, ndb_binlog_index; + +# check that we dropped all system tables +show tables; + +# End of 4.1 tests diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result index b90b5c86c15e8..49d23264a63c1 100644 --- a/mysql-test/main/table_value_constr.result +++ b/mysql-test/main/table_value_constr.result @@ -3131,5 +3131,124 @@ INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE)); ERROR HY000: 'ignore' is not allowed in this context DROP TABLE t1; # +# MDEV-28603: VIEW with table value constructor used as single-value +# subquery contains subquery as its first element +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +create table t2 (b int); +insert into t2 values (1), (2); +create view v as select (values ((select * from t1 where a > 5))) as m from t2; +select (values ((select * from t1 where a > 5))) as m from t2; +m +7 +7 +select * from v; +m +7 +7 +with cte as ( select (values ((select * from t1 where a > 5))) as m from t2 ) select * from cte; +m +7 +7 +explain select (values ((select * from t1 where a > 5))) as m from t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 +4 SUBQUERY ALL NULL NULL NULL NULL 2 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used +3 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where +explain select * from v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 +5 SUBQUERY ALL NULL NULL NULL NULL 2 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used +4 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where +explain with cte as ( select (values ((select * from t1 where a > 5))) as m from t2 ) select * from cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 +5 SUBQUERY ALL NULL NULL NULL NULL 2 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used +4 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where +prepare stmt from "select (values ((select * from t1 where a > 5))) as m from t2"; +execute stmt; +m +7 +7 +execute stmt; +m +7 +7 +deallocate prepare stmt; +prepare stmt from "select * from v"; +execute stmt; +m +7 +7 +execute stmt; +m +7 +7 +deallocate prepare stmt; +prepare stmt from "with cte as ( select (values ((select * from t1 where a > 5))) as m from t2 ) select * from cte"; +execute stmt; +m +7 +7 +execute stmt; +m +7 +7 +deallocate prepare stmt; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +drop view v; +prepare stmt from "create view v as select (values ((select * from t1 where a > 5))) as m from t2"; +execute stmt; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +select * from v; +m +7 +7 +drop view v; +execute stmt; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +select * from v; +m +7 +7 +deallocate prepare stmt; +prepare stmt from "show create view v"; +execute stmt; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +execute stmt; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +deallocate prepare stmt; +drop view v; +create view v as select (values ((select * from t1 where a > 5 +union +select * from t1 where a > 7))) as m from t2; +select (values ((select * from t1 where a > 5 +union +select * from t1 where a > 7))) as m from t2; +m +7 +7 +select * from v; +m +7 +7 +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5 union select `t1`.`a` from `t1` where `t1`.`a` > 7))) AS `m` from `t2` latin1 latin1_swedish_ci +drop view v; +drop table t1,t2; +# # End of 10.4 tests # diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test index 07197329e1064..177aa96e6373c 100644 --- a/mysql-test/main/table_value_constr.test +++ b/mysql-test/main/table_value_constr.test @@ -1738,6 +1738,80 @@ INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT)); INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE)); DROP TABLE t1; +--echo # +--echo # MDEV-28603: VIEW with table value constructor used as single-value +--echo # subquery contains subquery as its first element +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); +create table t2 (b int); +insert into t2 values (1), (2); + +let $q= +select (values ((select * from t1 where a > 5))) as m from t2; + +eval create view v as $q; + +eval $q; +eval select * from v; +eval with cte as ( $q ) select * from cte; + +eval explain $q; +eval explain select * from v; +eval explain with cte as ( $q ) select * from cte; + +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "select * from v"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "with cte as ( $q ) select * from cte"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +show create view v; + +drop view v; + +eval prepare stmt from "create view v as $q"; +execute stmt; +show create view v; +select * from v; +drop view v; +execute stmt; +show create view v; +select * from v; +deallocate prepare stmt; + +prepare stmt from "show create view v"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +drop view v; + +let $q= +select (values ((select * from t1 where a > 5 + union + select * from t1 where a > 7))) as m from t2; + +eval create view v as $q; + +eval $q; +eval select * from v; + +show create view v; + +drop view v; +drop table t1,t2; + --echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/type_time.result b/mysql-test/main/type_time.result index 40746345bdf06..a863eaea6862f 100644 --- a/mysql-test/main/type_time.result +++ b/mysql-test/main/type_time.result @@ -2420,5 +2420,31 @@ SET @@global.mysql56_temporal_format=default; DROP PROCEDURE p1; SET timestamp=DEFAULT; # +# MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +# +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +CURRENT_TIMESTAMP +1970-01-15 06:56:07 +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +UNIX_TIMESTAMP(CURRENT_TIME()) +1234567 +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +UNIX_TIMESTAMP(TIME'06:56:07') +1234567 +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +UNIX_TIMESTAMP(TIME'10:20:30') +1246830 +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +UNIX_TIMESTAMP(a) +1234567 +1246830 +DROP TABLE t1; +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; +# # End of 10.4 tests # diff --git a/mysql-test/main/type_time.test b/mysql-test/main/type_time.test index 0f67223238c45..9ed5c3a73dce1 100644 --- a/mysql-test/main/type_time.test +++ b/mysql-test/main/type_time.test @@ -1566,6 +1566,25 @@ SET @@global.mysql56_temporal_format=default; DROP PROCEDURE p1; SET timestamp=DEFAULT; +--echo # +--echo # MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +--echo # + +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +DROP TABLE t1; + +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; + + --echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD new file mode 100644 index 0000000000000..77a281667b55c Binary files /dev/null and b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD differ diff --git a/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI new file mode 100644 index 0000000000000..20fc1b97c00fd Binary files /dev/null and b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI differ diff --git a/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm new file mode 100644 index 0000000000000..1eb8ac151338b Binary files /dev/null and b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm differ diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result index 9148f0e8c2b98..294e96e59971a 100644 --- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result +++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result @@ -1,3 +1,7 @@ +# +# MDEV-30698 Cover missing test cases for mariadb-binlog options +# --raw [and] --flashback +# CREATE TABLE t1 (a int); FLUSH LOGS; INSERT INTO t1 VALUES (1); diff --git a/mysql-test/suite/binlog/r/flashback.result b/mysql-test/suite/binlog/r/flashback.result index da08f81478312..68f15a7cc2545 100644 --- a/mysql-test/suite/binlog/r/flashback.result +++ b/mysql-test/suite/binlog/r/flashback.result @@ -702,6 +702,10 @@ include/assert.inc [Table t1 should have 0 rows.] # 6- Rows must be present upon restoring from flashback include/assert.inc [Table t1 should have six rows.] DROP TABLE t1; +# +# MDEV-30698 Cover missing test cases for mariadb-binlog options +# --raw [and] --flashback +# SET binlog_format=statement; Warnings: Warning 1105 MariaDB Galera and flashback do not support binlog format: STATEMENT diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test index f95fc0137a298..252a8577b6c4e 100644 --- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test +++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test @@ -5,6 +5,7 @@ # respective log file specified by --result-file, and shown on-disk. This test # ensures that the log files on disk, created by mariadb-binlog, have the most # up-to-date events from the master. +# Option --raw works only with --read-from-remote-server, otherwise returns error. # # Methodology: # On the master, rotate to a newly active binlog file and write an event to @@ -20,6 +21,14 @@ --source include/linux.inc --source include/have_log_bin.inc +--echo # +--echo # MDEV-30698 Cover missing test cases for mariadb-binlog options +--echo # --raw [and] --flashback +--echo # +# Test --raw format without -R (--read-from-remote-server) +--error 1 # Error 1 operation not permitted +--exec $MYSQL_BINLOG --raw --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --stop-never --result-file=$MYSQLTEST_VARDIR/tmp/ master-bin.000001 + # Create newly active log CREATE TABLE t1 (a int); FLUSH LOGS; diff --git a/mysql-test/suite/binlog/t/flashback.test b/mysql-test/suite/binlog/t/flashback.test index 76379f35dae9a..7c58b56c93502 100644 --- a/mysql-test/suite/binlog/t/flashback.test +++ b/mysql-test/suite/binlog/t/flashback.test @@ -364,6 +364,14 @@ FLUSH LOGS; DROP TABLE t1; +--echo # +--echo # MDEV-30698 Cover missing test cases for mariadb-binlog options +--echo # --raw [and] --flashback +--echo # + +--error 1 # --raw mode and --flashback mode are not allowed +--exec $MYSQL_BINLOG -vv -B --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000003> $MYSQLTEST_VARDIR/tmp/mysqlbinlog_row_flashback_8.sql + ## Clear SET binlog_format=statement; --error ER_FLASHBACK_NOT_SUPPORTED diff --git a/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test b/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test index c2a7ec9d27aa8..25849c111a72d 100644 --- a/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test +++ b/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test @@ -1,2 +1,2 @@ --let $binlog_limit= 5,1 ---source suite/rpl/include/rpl_binlog_errors.inc +--source suite/rpl/t/rpl_binlog_errors.test diff --git a/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test b/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test index acbe0d59a5ed1..406af58c03b10 100644 --- a/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test +++ b/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_cant_read_event_incident.inc +--source suite/rpl/t/rpl_cant_read_event_incident.test diff --git a/mysql-test/suite/binlog_encryption/rpl_checksum.test b/mysql-test/suite/binlog_encryption/rpl_checksum.test index ca8cdc06726d5..a2abd019e24f5 100644 --- a/mysql-test/suite/binlog_encryption/rpl_checksum.test +++ b/mysql-test/suite/binlog_encryption/rpl_checksum.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_checksum.inc +--source suite/rpl/t/rpl_checksum.test diff --git a/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test b/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test index 8fa44136fc2f2..56fb2be0ce395 100644 --- a/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test +++ b/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_checksum_cache.inc +--source suite/rpl/t/rpl_checksum_cache.test diff --git a/mysql-test/suite/binlog_encryption/rpl_corruption.test b/mysql-test/suite/binlog_encryption/rpl_corruption.test index 1abf2c882ec25..f6ba294439878 100644 --- a/mysql-test/suite/binlog_encryption/rpl_corruption.test +++ b/mysql-test/suite/binlog_encryption/rpl_corruption.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_corruption.inc +--source suite/rpl/t/rpl_corruption.test diff --git a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result index 99eda3b61a09b..07ef909d8a6c7 100644 --- a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result +++ b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result @@ -564,3 +564,27 @@ a connection server_1; DROP TABLE t1; include/rpl_end.inc +# +# Start of 10.2 tests +# +# +# MDEV-10134 Add full support for DEFAULT +# +CREATE TABLE t1 (a VARCHAR(100) DEFAULT BINLOG_GTID_POS("master-bin.000001", 600)); +ERROR HY000: Function or expression 'binlog_gtid_pos()' cannot be used in the DEFAULT clause of `a` +# +# End of 10.2 tests +# +# +# Start of 10.3 tests +# +# +# MDEV-13967 Parameter data type control for Item_long_func +# +SELECT MASTER_GTID_WAIT(ROW(1,1),'str'); +ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait' +SELECT MASTER_GTID_WAIT('str',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait' +# +# End of 10.3 tests +# diff --git a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test index b9f5a18a58893..b183c1d4b4ec7 100644 --- a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test +++ b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_gtid_basic.inc +--source suite/rpl/t/rpl_gtid_basic.test diff --git a/mysql-test/suite/binlog_encryption/rpl_incident.test b/mysql-test/suite/binlog_encryption/rpl_incident.test index b6d2a24a71e10..5a707774f3c86 100644 --- a/mysql-test/suite/binlog_encryption/rpl_incident.test +++ b/mysql-test/suite/binlog_encryption/rpl_incident.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_incident.inc +--source suite/rpl/t/rpl_incident.test diff --git a/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test b/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test index 872b8cd35981e..532db963e63c1 100644 --- a/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test +++ b/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_init_slave_errors.inc +--source suite/rpl/t/rpl_init_slave_errors.test diff --git a/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test b/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test index 9e0bb9598bf13..35ad09647a6e2 100644 --- a/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test +++ b/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_loaddata_local.inc +--source suite/rpl/t/rpl_loaddata_local.test diff --git a/mysql-test/suite/binlog_encryption/rpl_loadfile.test b/mysql-test/suite/binlog_encryption/rpl_loadfile.test index 84e6ecd7a0db9..235c4a3d29e86 100644 --- a/mysql-test/suite/binlog_encryption/rpl_loadfile.test +++ b/mysql-test/suite/binlog_encryption/rpl_loadfile.test @@ -1,4 +1,4 @@ ---source suite/rpl/include/rpl_loadfile.inc +--source suite/rpl/t/rpl_loadfile.test --let $datadir= `SELECT @@datadir` diff --git a/mysql-test/suite/binlog_encryption/rpl_packet.test b/mysql-test/suite/binlog_encryption/rpl_packet.test index 43637314236ea..28beae1a91c97 100644 --- a/mysql-test/suite/binlog_encryption/rpl_packet.test +++ b/mysql-test/suite/binlog_encryption/rpl_packet.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_packet.inc +--source suite/rpl/t/rpl_packet.test diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test b/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test index 8a26778c8f2df..0c3e538693084 100644 --- a/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_parallel_ignored_errors.inc +--source suite/rpl/t/rpl_parallel_ignored_errors.test diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test b/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test index 7bdfaaf9adb7b..e342e4d61ffae 100644 --- a/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc +--source suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test diff --git a/mysql-test/suite/binlog_encryption/rpl_relayrotate.test b/mysql-test/suite/binlog_encryption/rpl_relayrotate.test index e52f5159fcc81..a8dabdc3e8dc2 100644 --- a/mysql-test/suite/binlog_encryption/rpl_relayrotate.test +++ b/mysql-test/suite/binlog_encryption/rpl_relayrotate.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_relayrotate.inc +--source suite/rpl/t/rpl_relayrotate.test diff --git a/mysql-test/suite/binlog_encryption/rpl_semi_sync.test b/mysql-test/suite/binlog_encryption/rpl_semi_sync.test index dfc68b699c00c..2e0907cce3039 100644 --- a/mysql-test/suite/binlog_encryption/rpl_semi_sync.test +++ b/mysql-test/suite/binlog_encryption/rpl_semi_sync.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_semi_sync.inc +--source suite/rpl/t/rpl_semi_sync.test diff --git a/mysql-test/suite/binlog_encryption/rpl_skip_replication.test b/mysql-test/suite/binlog_encryption/rpl_skip_replication.test index 40992586c7c64..11c5b33b36290 100644 --- a/mysql-test/suite/binlog_encryption/rpl_skip_replication.test +++ b/mysql-test/suite/binlog_encryption/rpl_skip_replication.test @@ -1,2 +1,2 @@ --let $use_remote_mysqlbinlog= 1 ---source suite/rpl/include/rpl_skip_replication.inc +--source suite/rpl/t/rpl_skip_replication.test diff --git a/mysql-test/suite/binlog_encryption/rpl_special_charset.test b/mysql-test/suite/binlog_encryption/rpl_special_charset.test index eb6972048601d..c74f8915798e1 100644 --- a/mysql-test/suite/binlog_encryption/rpl_special_charset.test +++ b/mysql-test/suite/binlog_encryption/rpl_special_charset.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_special_charset.inc +--source suite/rpl/t/rpl_special_charset.test diff --git a/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test b/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test index 0dab68a4b08b0..1e3992dc5d9e8 100644 --- a/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test +++ b/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_sporadic_master.inc +--source suite/rpl/t/rpl_sporadic_master.test diff --git a/mysql-test/suite/binlog_encryption/rpl_ssl.test b/mysql-test/suite/binlog_encryption/rpl_ssl.test index 9a4788c1d2d03..fb30d83ab5e24 100644 --- a/mysql-test/suite/binlog_encryption/rpl_ssl.test +++ b/mysql-test/suite/binlog_encryption/rpl_ssl.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_ssl.inc +--source suite/rpl/t/rpl_ssl.test diff --git a/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test b/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test index 45d18a2541055..d5a08bde96904 100644 --- a/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test +++ b/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_stm_relay_ign_space.inc +--source suite/rpl/t/rpl_stm_relay_ign_space.test diff --git a/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test b/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test index 2a16d90f9aded..c65cc202ba24c 100644 --- a/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_switch_stm_row_mixed.inc +--source suite/rpl/t/rpl_switch_stm_row_mixed.test diff --git a/mysql-test/suite/binlog_encryption/rpl_sync.test b/mysql-test/suite/binlog_encryption/rpl_sync.test index 8dbd6ff254b87..1ff7222856560 100644 --- a/mysql-test/suite/binlog_encryption/rpl_sync.test +++ b/mysql-test/suite/binlog_encryption/rpl_sync.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_sync_test.inc +--source suite/rpl/t/rpl_sync.test diff --git a/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test b/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test index 30f5f247c23f5..5a9a79bad423e 100644 --- a/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test +++ b/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_temporal_format_default_to_default.inc +--source suite/rpl/t/rpl_temporal_format_default_to_default.test diff --git a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test index 68afb4148ef46..b9576b30f924a 100644 --- a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test +++ b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test @@ -3,4 +3,4 @@ --let $force_master_mysql56_temporal_format=false; --let $force_slave_mysql56_temporal_format=true; ---source suite/rpl/include/rpl_temporal_format_default_to_default.inc +--source suite/rpl/t/rpl_temporal_format_default_to_default.test diff --git a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test index 96d928fcf086b..7d09942814e30 100644 --- a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test +++ b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test @@ -1,4 +1,4 @@ --let $force_master_mysql56_temporal_format=true; --let $force_slave_mysql56_temporal_format=false; ---source suite/rpl/include/rpl_temporal_format_default_to_default.inc +--source suite/rpl/t/rpl_temporal_format_default_to_default.test diff --git a/mysql-test/suite/binlog_encryption/rpl_typeconv.test b/mysql-test/suite/binlog_encryption/rpl_typeconv.test index fe56a14825641..6761cddfb87f8 100644 --- a/mysql-test/suite/binlog_encryption/rpl_typeconv.test +++ b/mysql-test/suite/binlog_encryption/rpl_typeconv.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_typeconv.inc +--source suite/rpl/t/rpl_typeconv.test diff --git a/mysql-test/suite/encryption/r/import_4k.result b/mysql-test/suite/encryption/r/import_4k.result new file mode 100644 index 0000000000000..959e2498e00ff --- /dev/null +++ b/mysql-test/suite/encryption/r/import_4k.result @@ -0,0 +1,10 @@ +set @save_limit = @@GLOBAL.innodb_limit_optimistic_insert_debug; +set global innodb_limit_optimistic_insert_debug=3; +create table t1 (a INT PRIMARY KEY) engine=InnoDB ENCRYPTED=YES; +insert into t1 select * from seq_1_to_6000; +flush table t1 for export; +unlock tables; +alter table t1 discard tablespace; +alter table t1 import tablespace; +set global innodb_limit_optimistic_insert_debug=@save_limit; +drop table t1; diff --git a/mysql-test/suite/encryption/t/import_4k.opt b/mysql-test/suite/encryption/t/import_4k.opt new file mode 100644 index 0000000000000..e5b58602036b7 --- /dev/null +++ b/mysql-test/suite/encryption/t/import_4k.opt @@ -0,0 +1 @@ +--innodb-page-size=4k diff --git a/mysql-test/suite/encryption/t/import_4k.test b/mysql-test/suite/encryption/t/import_4k.test new file mode 100644 index 0000000000000..aef7c702d1289 --- /dev/null +++ b/mysql-test/suite/encryption/t/import_4k.test @@ -0,0 +1,20 @@ +--source include/have_innodb.inc +--source include/have_sequence.inc +--source include/have_example_key_management_plugin.inc +--source include/have_debug.inc + +set @save_limit = @@GLOBAL.innodb_limit_optimistic_insert_debug; +set global innodb_limit_optimistic_insert_debug=3; +create table t1 (a INT PRIMARY KEY) engine=InnoDB ENCRYPTED=YES; +insert into t1 select * from seq_1_to_6000; +flush table t1 for export; +--let $datadir= `select @@datadir` +--copy_file $datadir/test/t1.ibd $datadir/t1.ibd +--copy_file $datadir/test/t1.cfg $datadir/t1.cfg +unlock tables; +alter table t1 discard tablespace; +--move_file $datadir/t1.ibd $datadir/test/t1.ibd +--move_file $datadir/t1.cfg $datadir/test/t1.cfg +alter table t1 import tablespace; +set global innodb_limit_optimistic_insert_debug=@save_limit; +drop table t1; diff --git a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test index 2a16d90f9aded..c65cc202ba24c 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_switch_stm_row_mixed.inc +--source suite/rpl/t/rpl_switch_stm_row_mixed.test diff --git a/mysql-test/suite/galera/galera_2x2nodes.cnf b/mysql-test/suite/galera/galera_2x2nodes.cnf new file mode 100644 index 0000000000000..922906eac6f39 --- /dev/null +++ b/mysql-test/suite/galera/galera_2x2nodes.cnf @@ -0,0 +1,77 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf + +[mysqld] +loose-innodb +log-bin=mysqld-bin +log-slave-updates +binlog-format=row +innodb-autoinc-lock-mode=2 +default-storage-engine=innodb +wsrep_gtid_mode=1 +gtid_ignore_duplicates +auto_increment_increment=3 + +wsrep-provider=@ENV.WSREP_PROVIDER +# enforce read-committed characteristics across the cluster +# wsrep-causal-reads=ON +wsrep-sync-wait=15 + +[mysqld.1] +wsrep-on=1 +#galera_port=@OPT.port +#ist_port=@OPT.port +#sst_port=@OPT.port +wsrep_cluster_address=gcomm:// +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M' +wsrep_node_address='127.0.0.1:@mysqld.1.#galera_port' +wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port +wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' + +[mysqld.2] +wsrep-on=1 +#galera_port=@OPT.port +#ist_port=@OPT.port +#sst_port=@OPT.port +wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port' +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M' +wsrep_node_address='127.0.0.1:@mysqld.2.#galera_port' +wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port +wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port' + +[mysqld.3] +wsrep-on=1 +#galera_port=@OPT.port +#ist_port=@OPT.port +#sst_port=@OPT.port +wsrep_cluster_address=gcomm:// +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.3.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M' +wsrep_node_address='127.0.0.1:@mysqld.3.#galera_port' +wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port +wsrep_sst_receive_address='127.0.0.1:@mysqld.3.#sst_port' + +[mysqld.4] +wsrep-on=1 +#galera_port=@OPT.port +#ist_port=@OPT.port +#sst_port=@OPT.port +wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.3.#galera_port' +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.4.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M' +wsrep_node_address='127.0.0.1:@mysqld.4.#galera_port' +wsrep_node_incoming_address=127.0.0.1:@mysqld.4.port +wsrep_sst_receive_address='127.0.0.1:@mysqld.4.#sst_port' + +[ENV] +NODE_MYPORT_1= @mysqld.1.port +NODE_MYSOCK_1= @mysqld.1.socket + +NODE_MYPORT_2= @mysqld.2.port +NODE_MYSOCK_2= @mysqld.2.socket + +NODE_MYPORT_3= @mysqld.3.port +NODE_MYSOCK_3= @mysqld.3.socket + +NODE_MYPORT_4= @mysqld.4.port +NODE_MYSOCK_4= @mysqld.4.socket + + diff --git a/mysql-test/suite/galera/include/kill_galera.inc b/mysql-test/suite/galera/include/kill_galera.inc index 56118df84f976..28a1b0f368c3c 100644 --- a/mysql-test/suite/galera/include/kill_galera.inc +++ b/mysql-test/suite/galera/include/kill_galera.inc @@ -6,8 +6,8 @@ if (!$kill_signal) } # Write file to make mysql-test-run.pl expect the crash, but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Kill the connected server diff --git a/mysql-test/suite/galera/include/shutdown_mysqld.inc b/mysql-test/suite/galera/include/shutdown_mysqld.inc index 54bba1318e717..793be8d76acb2 100644 --- a/mysql-test/suite/galera/include/shutdown_mysqld.inc +++ b/mysql-test/suite/galera/include/shutdown_mysqld.inc @@ -8,8 +8,8 @@ if ($rpl_inited) } # Write file to make mysql-test-run.pl expect the "crash", but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Send shutdown to the connected server diff --git a/mysql-test/suite/galera/r/galera_query_cache_invalidate.result b/mysql-test/suite/galera/r/galera_query_cache_invalidate.result new file mode 100644 index 0000000000000..98438b3b52782 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_query_cache_invalidate.result @@ -0,0 +1,119 @@ +connection node_2; +connection node_1; +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; +connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4; +connection node_2; +call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node."); +connection node_4; +call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node."); +connection node_3; +CHANGE MASTER TO master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_1, master_use_gtid=current_pos;; +START SLAVE; +include/wait_for_slave_to_start.inc +connection node_1; +CREATE TABLE t1 (id bigint primary key, msg varchar(100)) engine=innodb; +SET AUTOCOMMIT=1; +INSERT INTO t1 VALUES (4000000, 'foobar'); +SELECT COUNT(*) FROM t1; +COUNT(*) +50001 +connection node_3; +connection node_1; +# node_1 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +connection node_2; +# node_2 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +connection node_3; +# node_3 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +connection node_4; +# node_4 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +connection node_1; +# node_1 insert new +INSERT INTO t1 values (5000000, 'foobar'); +connection node_3; +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +connection node_2; +# node_2 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +connection node_3; +# node_3 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +connection node_4; +# node_4 +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +SELECT * FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +connection node_2; +# node_3 different query same table +SELECT id, msg FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +connection node_4; +# node_6 different query same table +SELECT id, msg FROM t1 WHERE msg='foobar'; +id msg +4000000 foobar +5000000 foobar +connection node_1; +drop table t1; +connection node_3; +connection node_3; +STOP SLAVE; +RESET SLAVE ALL; +connection node_1; +SET SESSION WSREP_ON=OFF; +RESET MASTER; +SET SESSION WSREP_ON=ON; +disconnect node_2; +disconnect node_1; +# End of test diff --git a/mysql-test/suite/galera/r/galera_sequences.result b/mysql-test/suite/galera/r/galera_sequences.result index 7276cb8dbdeb5..da669e6774e42 100644 --- a/mysql-test/suite/galera/r/galera_sequences.result +++ b/mysql-test/suite/galera/r/galera_sequences.result @@ -1,6 +1,11 @@ connection node_2; connection node_1; connection node_1; +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +CALL mtr.add_suppression("WSREP: CREATE TABLE isolation failure"); +connection node_2; +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +connection node_1; CREATE SEQUENCE `seq` start with 1 minvalue 1 maxvalue 1000000 increment by 0 cache 1000 nocycle ENGINE=InnoDB; SHOW CREATE SEQUENCE seq; Table Create Table @@ -47,6 +52,58 @@ NEXT VALUE FOR Seq1_1 3001 connection node_1; DROP SEQUENCE Seq1_1; -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +connection node_1; +CREATE TABLE t2 (d CHAR(1)KEY); +SET SESSION autocommit=0; +INSERT INTO t2 VALUES(1); +CREATE TEMPORARY SEQUENCE seq1 NOCACHE ENGINE=INNODB; +CREATE SEQUENCE seq2 NOCACHE ENGINE=INNODB; +COMMIT; +SET SESSION AUTOCOMMIT=1; +SHOW CREATE TABLE seq1; +Table Create Table +seq1 CREATE TEMPORARY TABLE `seq1` ( + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', + `increment` bigint(21) NOT NULL COMMENT 'increment value', + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' +) ENGINE=InnoDB SEQUENCE=1 connection node_2; -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +SHOW CREATE SEQUENCE seq1; +ERROR 42S02: Table 'test.seq1' doesn't exist +SHOW CREATE SEQUENCE seq2; +Table Create Table +seq2 CREATE SEQUENCE `seq2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=InnoDB +connection node_1; +SET SESSION autocommit=1; +DROP SEQUENCE seq1; +DROP SEQUENCE seq2; +DROP TABLE t2; +SET SESSION AUTOCOMMIT=0; +SET SESSION wsrep_OSU_method='RSU'; +CREATE TABLE t1(c1 VARCHAR(10)); +INSERT INTO t1 (c1) VALUES(''); +create temporary sequence sq1 NOCACHE engine=innodb; +create sequence sq2 NOCACHE engine=innodb; +COMMIT; +SHOW CREATE SEQUENCE sq1; +Table Create Table +sq1 CREATE SEQUENCE `sq1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=InnoDB +SHOW CREATE SEQUENCE sq2; +Table Create Table +sq2 CREATE SEQUENCE `sq2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=InnoDB +connection node_2; +SHOW CREATE SEQUENCE sq1; +ERROR 42S02: Table 'test.sq1' doesn't exist +SHOW CREATE SEQUENCE sq2; +ERROR 42S02: Table 'test.sq2' doesn't exist +connection node_1; +SET SESSION AUTOCOMMIT=1; +DROP TABLE t1; +DROP SEQUENCE sq1; +DROP SEQUENCE sq2; +SET SESSION wsrep_OSU_method='TOI'; diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test index c535ac455b9a8..b36a0de57b6ee 100644 --- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test +++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test @@ -35,8 +35,8 @@ UPDATE t1 SET f2 = 'c' WHERE f1 > 2; --connection node_2 # Write file to make mysql-test-run.pl expect the crash, but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name --let KILL_NODE_PIDFILE = `SELECT @@pid_file` diff --git a/mysql-test/suite/galera/t/galera_query_cache_invalidate.cnf b/mysql-test/suite/galera/t/galera_query_cache_invalidate.cnf new file mode 100644 index 0000000000000..49269422a35fd --- /dev/null +++ b/mysql-test/suite/galera/t/galera_query_cache_invalidate.cnf @@ -0,0 +1,25 @@ +!include ../galera_2x2nodes.cnf + +[mysqld.1] +wsrep_gtid_domain_id=1 +server-id=11 +query_cache_type=1 +query_cache_size=15M + +[mysqld.2] +wsrep_gtid_domain_id=1 +server-id=12 +query_cache_type=1 +query_cache_size=15M + +[mysqld.3] +wsrep_gtid_domain_id=2 +server-id=13 +query_cache_type=1 +query_cache_size=15M + +[mysqld.4] +wsrep_gtid_domain_id=2 +server-id=21 +query_cache_type=1 +query_cache_size=15M diff --git a/mysql-test/suite/galera/t/galera_query_cache_invalidate.test b/mysql-test/suite/galera/t/galera_query_cache_invalidate.test new file mode 100644 index 0000000000000..d72d8a9ba40b1 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_query_cache_invalidate.test @@ -0,0 +1,120 @@ +--source include/big_test.inc +--source include/force_restart.inc +--source include/galera_cluster.inc +--source include/have_sequence.inc + +# +# MDEV-28641 : Query cache entries not invalidated on slave of a Galera cluster +# +# We use two 2-node galera clusters as follows +# +# A(1) <-> B(2) {Galera cluster 1} +# | {Async replication} +# D(3) <-> E(4) {Galera cluster 2} +# +# Normal asyncronous replication is used between nodes 1 and 3 +# so that node_1 is master and node_3 a slave. +# +# In this test we can't test is some query fast or slow but we can +# test does all nodes see all rows (this is not true before fix) +# +--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 +--connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4 + +--connection node_2 +call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node."); +--connection node_4 +call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node."); + +--connection node_3 + +--replace_result $NODE_MYPORT_1 NODE_MYPORT_1 +--eval CHANGE MASTER TO master_host='127.0.0.1', master_user='root', master_port=$NODE_MYPORT_1, master_use_gtid=current_pos; +START SLAVE; +--source include/wait_for_slave_to_start.inc + +--connection node_1 + +CREATE TABLE t1 (id bigint primary key, msg varchar(100)) engine=innodb; +--disable_query_log +INSERT INTO t1 SELECT seq, md5(rand()) from seq_1_to_50000; +COMMIT; +--enable_query_log +SET AUTOCOMMIT=1; +INSERT INTO t1 VALUES (4000000, 'foobar'); +SELECT COUNT(*) FROM t1; +--sync_slave_with_master node_3 + +# +# All nodes should see one row and first query is slow and second fast +# +--connection node_1 +--echo # node_1 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; +--connection node_2 +--echo # node_2 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; +--connection node_3 +--echo # node_3 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; +--connection node_4 +--echo # node_4 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; +# +# Insert a new row in master, this should cause query cache +# invalidation +# +--connection node_1 +--echo # node_1 insert new +INSERT INTO t1 values (5000000, 'foobar'); +--sync_slave_with_master node_3 + +# +# All nodes should see 2 rows +# +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; + +--connection node_2 +--echo # node_2 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; +--connection node_3 +--echo # node_3 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; +--connection node_4 +--echo # node_4 +SELECT * FROM t1 WHERE msg='foobar'; +SELECT * FROM t1 WHERE msg='foobar'; + +--connection node_2 +--echo # node_3 different query same table +SELECT id, msg FROM t1 WHERE msg='foobar'; + +--connection node_4 +--echo # node_6 different query same table +SELECT id, msg FROM t1 WHERE msg='foobar'; + +# +# Cleanup +# +--connection node_1 +drop table t1; +--sync_slave_with_master node_3 + +--connection node_3 +STOP SLAVE; +RESET SLAVE ALL; + +--connection node_1 +SET SESSION WSREP_ON=OFF; +RESET MASTER; +SET SESSION WSREP_ON=ON; + +--source include/galera_end.inc +--echo # End of test diff --git a/mysql-test/suite/galera/t/galera_sequences.cnf b/mysql-test/suite/galera/t/galera_sequences.cnf new file mode 100644 index 0000000000000..98e724fb2d0f4 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_sequences.cnf @@ -0,0 +1,9 @@ +!include ../galera_2nodes.cnf + +[mysqld.1] +log-bin +log-slave-updates + +[mysqld.2] +log-bin +log-slave-updates diff --git a/mysql-test/suite/galera/t/galera_sequences.test b/mysql-test/suite/galera/t/galera_sequences.test index d469cc73516ea..faa3b46d2a797 100644 --- a/mysql-test/suite/galera/t/galera_sequences.test +++ b/mysql-test/suite/galera/t/galera_sequences.test @@ -4,6 +4,13 @@ # MDEV-19353 : Alter Sequence do not replicate to another nodes with in Galera Cluster # +--connection node_1 +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +CALL mtr.add_suppression("WSREP: CREATE TABLE isolation failure"); +--connection node_2 + +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); + --connection node_1 CREATE SEQUENCE `seq` start with 1 minvalue 1 maxvalue 1000000 increment by 0 cache 1000 nocycle ENGINE=InnoDB; SHOW CREATE SEQUENCE seq; @@ -45,8 +52,48 @@ select NEXT VALUE FOR Seq1_1; --connection node_1 DROP SEQUENCE Seq1_1; -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +# +# MDEV-24045 : Assertion client_state_.mode() != wsrep::client_state::m_toi failed in int wsrep::transaction::before_commit() +# +--connection node_1 +CREATE TABLE t2 (d CHAR(1)KEY); +SET SESSION autocommit=0; +INSERT INTO t2 VALUES(1); +CREATE TEMPORARY SEQUENCE seq1 NOCACHE ENGINE=INNODB; +CREATE SEQUENCE seq2 NOCACHE ENGINE=INNODB; +COMMIT; +SET SESSION AUTOCOMMIT=1; +SHOW CREATE TABLE seq1; --connection node_2 - -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +--error ER_NO_SUCH_TABLE +SHOW CREATE SEQUENCE seq1; +SHOW CREATE SEQUENCE seq2; +--connection node_1 +SET SESSION autocommit=1; +DROP SEQUENCE seq1; +DROP SEQUENCE seq2; +DROP TABLE t2; +# +# Case2 +# +SET SESSION AUTOCOMMIT=0; +SET SESSION wsrep_OSU_method='RSU'; +CREATE TABLE t1(c1 VARCHAR(10)); +INSERT INTO t1 (c1) VALUES(''); +create temporary sequence sq1 NOCACHE engine=innodb; +create sequence sq2 NOCACHE engine=innodb; +COMMIT; +SHOW CREATE SEQUENCE sq1; +SHOW CREATE SEQUENCE sq2; +--connection node_2 +--error ER_NO_SUCH_TABLE +SHOW CREATE SEQUENCE sq1; +--error ER_NO_SUCH_TABLE +SHOW CREATE SEQUENCE sq2; +--connection node_1 +SET SESSION AUTOCOMMIT=1; +DROP TABLE t1; +DROP SEQUENCE sq1; +DROP SEQUENCE sq2; +SET SESSION wsrep_OSU_method='TOI'; diff --git a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf index 9f99adbd711d6..cd7a892f4c911 100644 --- a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf +++ b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf @@ -55,7 +55,7 @@ wsrep-on=1 #galera_port=@OPT.port #ist_port=@OPT.port #sst_port=@OPT.port -wsrep_cluster_address='gcomm:// +wsrep_cluster_address=gcomm:// wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.4.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M' wsrep_node_address='127.0.0.1:@mysqld.4.#galera_port' wsrep_node_incoming_address=127.0.0.1:@mysqld.4.port diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test index eb7f5603452ee..ab8b62b969a91 100644 --- a/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test +++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test @@ -15,8 +15,8 @@ --connection node_2 SET GLOBAL debug_dbug="d,crash_last_fragment_commit_after_fragment_removal"; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name CREATE TABLE t1 (f1 VARCHAR(30)) ENGINE=InnoDB; diff --git a/mysql-test/suite/gcol/r/gcol_rollback.result b/mysql-test/suite/gcol/r/gcol_rollback.result index 5ee94d3ef446b..0bbf034122bd8 100644 --- a/mysql-test/suite/gcol/r/gcol_rollback.result +++ b/mysql-test/suite/gcol/r/gcol_rollback.result @@ -79,10 +79,29 @@ a b c d ROLLBACK; SET DEBUG_SYNC = 'now SIGNAL dml_done'; connection con1; -disconnect con1; connection default; SELECT * FROM t; a b d 9 10 29 DROP TABLE t; SET DEBUG_SYNC = 'RESET'; +# +# MDEV-30597 Assertion `flag == 1' failed in +# row_build_index_entry_low +# +CREATE TABLE t1 ( +col1 INT PRIMARY KEY, col_text TEXT, +col_text_g TEXT GENERATED ALWAYS AS (SUBSTR(col_text,1,499)) +) ENGINE = InnoDB ROW_FORMAT = Compact; +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; +INSERT INTO t1 (col1) VALUES (1) ; +DELETE FROM t1 WHERE col1 = 1; +ALTER TABLE t1 ADD UNIQUE INDEX (col_text_g(9)); +BEGIN; +INSERT INTO t1 (col1) VALUES (1); +ROLLBACK; +disconnect con1; +DROP TABLE t1; +# End of 10.4 tests diff --git a/mysql-test/suite/gcol/t/gcol_rollback.test b/mysql-test/suite/gcol/t/gcol_rollback.test index ba88dda45d7f2..888e6be861e5f 100644 --- a/mysql-test/suite/gcol/t/gcol_rollback.test +++ b/mysql-test/suite/gcol/t/gcol_rollback.test @@ -103,7 +103,6 @@ SET DEBUG_SYNC = 'now SIGNAL dml_done'; connection con1; reap; -disconnect con1; connection default; SELECT * FROM t; @@ -111,5 +110,27 @@ SELECT * FROM t; DROP TABLE t; SET DEBUG_SYNC = 'RESET'; +--echo # +--echo # MDEV-30597 Assertion `flag == 1' failed in +--echo # row_build_index_entry_low +--echo # +CREATE TABLE t1 ( +col1 INT PRIMARY KEY, col_text TEXT, +col_text_g TEXT GENERATED ALWAYS AS (SUBSTR(col_text,1,499)) +) ENGINE = InnoDB ROW_FORMAT = Compact; +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; +INSERT INTO t1 (col1) VALUES (1) ; +DELETE FROM t1 WHERE col1 = 1; +ALTER TABLE t1 ADD UNIQUE INDEX (col_text_g(9)); +BEGIN; +INSERT INTO t1 (col1) VALUES (1); +ROLLBACK; +disconnect con1; +DROP TABLE t1; + # Wait till all disconnects are completed --source include/wait_until_count_sessions.inc + +--echo # End of 10.4 tests diff --git a/mysql-test/suite/innodb/r/full_crc32_import.result b/mysql-test/suite/innodb/r/full_crc32_import.result index 99f1154842013..32964be46d4fc 100644 --- a/mysql-test/suite/innodb/r/full_crc32_import.result +++ b/mysql-test/suite/innodb/r/full_crc32_import.result @@ -177,6 +177,17 @@ UNLOCK TABLES; SET GLOBAL innodb_compression_algorithm=0; ALTER TABLE t1 FORCE; ALTER TABLE t1 DISCARD TABLESPACE; +# Display the discarded table name by using SPACE and PAGE_NO +# column in INNODB_SYS_INDEXES and discard doesn't affect the +# SPACE in INNODB_SYS_TABLES +SELECT t.NAME, t.SPACE BETWEEN 1 and 0xFFFFFFEF as SYS_TABLE_SPACE_RANGE +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t +WHERE t.TABLE_ID IN ( +SELECT i.TABLE_ID FROM +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i WHERE +i.PAGE_NO IS NULL and i.SPACE IS NULL); +NAME SYS_TABLE_SPACE_RANGE +test/t1 1 db.opt t1.frm restore: t1 .ibd and .cfg files diff --git a/mysql-test/suite/innodb/r/innodb-wl5522.result b/mysql-test/suite/innodb/r/innodb-wl5522.result index 7bcc57c5bd59d..6baa5df9e5504 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5522.result +++ b/mysql-test/suite/innodb/r/innodb-wl5522.result @@ -64,16 +64,24 @@ a b c # Done restarting server # List before t1 DISCARD db.opt +t1.cfg.sav t1.frm t1.ibd +t1.ibd.sav +t2.cfg.sav t2.frm t2.ibd +t2.ibd.sav ALTER TABLE t1 DISCARD TABLESPACE; # List after t1 DISCARD db.opt +t1.cfg.sav t1.frm +t1.ibd.sav +t2.cfg.sav t2.frm t2.ibd +t2.ibd.sav ALTER TABLE t1 IMPORT TABLESPACE; ALTER TABLE t1 ENGINE InnoDB; SELECT COUNT(*) FROM t1; @@ -91,10 +99,14 @@ a b c 638 Cavalry ..asdasdfaeraf db.opt t1.cfg +t1.cfg.sav t1.frm t1.ibd +t1.ibd.sav +t2.cfg.sav t2.frm t2.ibd +t2.ibd.sav SELECT COUNT(*) FROM t1; COUNT(*) 640 @@ -113,7 +125,9 @@ ALTER TABLE t2 ROW_FORMAT=DYNAMIC; ALTER TABLE t2 DISCARD TABLESPACE; # List after t2 DISCARD db.opt +t2.cfg.sav t2.frm +t2.ibd.sav ALTER TABLE t2 IMPORT TABLESPACE; ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x21 and the meta-data file has 0x1; .cfg file uses ROW_FORMAT=COMPACT) ALTER TABLE t2 IMPORT TABLESPACE; diff --git a/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result b/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result index d9f5e4dfeed5d..fa17487df9767 100644 --- a/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result +++ b/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result @@ -2,13 +2,11 @@ CREATE TABLE tab5 (col1 int auto_increment primary key, col2 VARCHAR(25), col3 varchar(25)) ENGINE=InnoDB; CREATE INDEX idx1 ON tab5(col2(10)); CREATE INDEX idx2 ON tab5(col3(10)); -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool100; SET GLOBAL innodb_buffer_pool_dump_pct=100; SELECT variable_value INTO @IBPDS FROM information_schema.global_status WHERE variable_name = 'INNODB_BUFFER_POOL_DUMP_STATUS'; SET GLOBAL innodb_buffer_pool_dump_now=ON; -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool1; SET GLOBAL innodb_buffer_pool_dump_pct=1; SELECT @@global.innodb_buffer_pool_dump_pct; @@global.innodb_buffer_pool_dump_pct @@ -18,5 +16,4 @@ FROM information_schema.global_status WHERE variable_name = 'INNODB_BUFFER_POOL_DUMP_STATUS'; SET GLOBAL innodb_buffer_pool_dump_now=ON; SET GLOBAL innodb_buffer_pool_dump_pct=DEFAULT; -SET GLOBAL innodb_buffer_pool_filename=DEFAULT; DROP TABLE tab5; diff --git a/mysql-test/suite/innodb/r/innodb_ctype_utf8.result b/mysql-test/suite/innodb/r/innodb_ctype_utf8.result index 5f29fad8a1c39..3ce1d3a1d53cc 100644 --- a/mysql-test/suite/innodb/r/innodb_ctype_utf8.result +++ b/mysql-test/suite/innodb/r/innodb_ctype_utf8.result @@ -283,3 +283,101 @@ DROP TABLE t1; # # End of 10.2 tests # +# +# Start of 10.4 tests +# +SET NAMES utf8mb3 COLLATE utf8mb3_unicode_nopad_ci; +# +# MDEV-30034 UNIQUE USING HASH accepts duplicate entries for tricky collations +# +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a TEXT COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a TEXT COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(2000) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a VARCHAR(2000) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(2000) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +ERROR 23000: Duplicate entry 'ß ' for key 'a' +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)))', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +EXECUTE IMMEDIATE REPLACE( +'CREATE TABLE t1 ( ' + ' a CHAR(20) COLLATE ,' + 'UNIQUE(a(3)) USING HASH)', +'', @@collation_connection); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_nopad_ci DEFAULT NULL, + UNIQUE KEY `a` (`a`(3)) USING HASH +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +INSERT INTO t1 VALUES ('ss '); +INSERT INTO t1 VALUES (_utf8mb3 0xC39F20)/*SZ+SPACE*/; +DROP TABLE t1; +# +# End 10.4 tests +# diff --git a/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result b/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result index 32d87b4668a23..6932b8f22926e 100644 --- a/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result +++ b/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result @@ -25,27 +25,6 @@ select @@innodb_ft_server_stopword_table; @@innodb_ft_server_stopword_table NULL drop table user_stopword_1, user_stopword_2; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -ib_buffer_pool -set @blah='hello'; -set global innodb_buffer_pool_filename = @blah; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -hello -set global innodb_buffer_pool_filename="bye"; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -bye -set global innodb_buffer_pool_filename=NULL; -ERROR 42000: Variable 'innodb_buffer_pool_filename' can't be set to the value of 'NULL' -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -bye -set global innodb_buffer_pool_filename=default; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -ib_buffer_pool CREATE TABLE t1 ( id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, opening_line TEXT(500), author VARCHAR(200), title VARCHAR(200), FULLTEXT idx (opening_line)) ENGINE=InnoDB; diff --git a/mysql-test/suite/innodb/r/instant_alter_crash.result b/mysql-test/suite/innodb/r/instant_alter_crash.result index f0fedcc767320..7855aa24264d7 100644 --- a/mysql-test/suite/innodb/r/instant_alter_crash.result +++ b/mysql-test/suite/innodb/r/instant_alter_crash.result @@ -206,3 +206,27 @@ Table Op Msg_type Msg_text test.t2 check status OK DROP TABLE t1,t2; db.opt +# +# MDEV-26198 Assertion `0' failed in row_log_table_apply_op during +# ADD PRIMARY KEY or OPTIMIZE TABLE +# +CREATE TABLE t1(f1 year default null, f2 year default null, +f3 text, f4 year default null, f5 year default null, +f6 year default null, f7 year default null, +f8 year default null)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); +ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; +set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ADD COLUMN f10 YEAR DEFAULT NULL, ALGORITHM=INPLACE; +connect con1,localhost,root,,,; +SET DEBUG_SYNC="now WAIT_FOR con1_insert"; +INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); +INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); +SET DEBUG_SYNC="now SIGNAL con1_finish"; +connection default; +disconnect con1; +SET DEBUG_SYNC=RESET; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_crash.test b/mysql-test/suite/innodb/t/alter_crash.test index e0e294ae4f0cc..164ff877b6243 100644 --- a/mysql-test/suite/innodb/t/alter_crash.test +++ b/mysql-test/suite/innodb/t/alter_crash.test @@ -51,8 +51,8 @@ let $MYSQLD_DATADIR= `select @@datadir`; let datadir= `select @@datadir`; # These are from include/shutdown_mysqld.inc and allow to call start_mysqld.inc ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --echo # --echo # Bug #14669848 CRASH DURING ALTER MAKES ORIGINAL TABLE INACCESSIBLE diff --git a/mysql-test/suite/innodb/t/full_crc32_import.test b/mysql-test/suite/innodb/t/full_crc32_import.test index c50e3899fc819..b79fd95471be8 100644 --- a/mysql-test/suite/innodb/t/full_crc32_import.test +++ b/mysql-test/suite/innodb/t/full_crc32_import.test @@ -199,6 +199,16 @@ SET GLOBAL innodb_compression_algorithm=0; ALTER TABLE t1 FORCE; ALTER TABLE t1 DISCARD TABLESPACE; +--echo # Display the discarded table name by using SPACE and PAGE_NO +--echo # column in INNODB_SYS_INDEXES and discard doesn't affect the +--echo # SPACE in INNODB_SYS_TABLES +SELECT t.NAME, t.SPACE BETWEEN 1 and 0xFFFFFFEF as SYS_TABLE_SPACE_RANGE +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t +WHERE t.TABLE_ID IN ( + SELECT i.TABLE_ID FROM + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i WHERE + i.PAGE_NO IS NULL and i.SPACE IS NULL); + --list_files $MYSQLD_DATADIR/test perl; do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl"; diff --git a/mysql-test/suite/innodb/t/import_tablespace_race.test b/mysql-test/suite/innodb/t/import_tablespace_race.test index 10ffe061ed102..aca0b5df06d03 100644 --- a/mysql-test/suite/innodb/t/import_tablespace_race.test +++ b/mysql-test/suite/innodb/t/import_tablespace_race.test @@ -37,8 +37,8 @@ ALTER TABLE t NOWAIT ADD INDEX (c); FLUSH TABLE t FOR EXPORT; --let $create= query_get_value(SHOW CREATE TABLE t, Create Table, 1) ---copy_file $datadir/test/t.cfg $MYSQL_TMP_DIR/t.cfg ---copy_file $datadir/test/t.ibd $MYSQL_TMP_DIR/t.ibd +--copy_file $datadir/test/t.cfg $datadir/test/t.cfg.sav +--copy_file $datadir/test/t.ibd $datadir/test/t.ibd.sav UNLOCK TABLES; DROP TABLE t; @@ -47,8 +47,8 @@ eval $create; --enable_query_log ALTER TABLE t DISCARD TABLESPACE; ---move_file $MYSQL_TMP_DIR/t.cfg $datadir/test/t.cfg ---move_file $MYSQL_TMP_DIR/t.ibd $datadir/test/t.ibd +--move_file $datadir/test/t.cfg.sav $datadir/test/t.cfg +--move_file $datadir/test/t.ibd.sav $datadir/test/t.ibd ALTER TABLE t IMPORT TABLESPACE; # Cleanup diff --git a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test index ab6e15868977e..0ae116f58f006 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test +++ b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test @@ -29,8 +29,8 @@ call mtr.add_suppression("InnoDB could not find key no 1 with name f2 from dict let datadir= `select @@datadir`; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name=$MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect CREATE TABLE t1 (f1 INT NOT NULL, f2 INT NOT NULL) ENGINE=innodb; SET debug_dbug='+d,innodb_alter_commit_crash_before_commit'; diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test index efa2b1ea403d8..e642a9491fe33 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test @@ -40,8 +40,8 @@ SET GLOBAL innodb_file_per_table = 1; CREATE TABLE t1 (c1 INT) ENGINE = InnoDB; INSERT INTO t1 VALUES(1),(2),(3); ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo wait > $_expect_file_name SET SESSION debug_dbug="+d,ib_discard_before_commit_crash"; diff --git a/mysql-test/suite/innodb/t/innodb-wl5522.test b/mysql-test/suite/innodb/t/innodb-wl5522.test index 19652d8e8fd16..8eedb636a5fc9 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522.test @@ -11,7 +11,6 @@ call mtr.add_suppression("InnoDB: Cannot save statistics for table `test`\\.`t1` FLUSH TABLES; -let $MYSQLD_TMPDIR = `SELECT @@tmpdir`; let $MYSQLD_DATADIR = `SELECT @@datadir`; let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`; @@ -41,10 +40,10 @@ CREATE TABLE t2(a INT PRIMARY KEY) ENGINE=InnoDB ROW_FORMAT=COMPACT; FLUSH TABLE t1, t2 FOR EXPORT; --echo # List before copying files --list_files $MYSQLD_DATADIR/test ---copy_file $MYSQLD_DATADIR/test/t1.cfg $MYSQLD_TMPDIR/t1.cfg ---copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_TMPDIR/t1.ibd ---move_file $MYSQLD_DATADIR/test/t2.cfg $MYSQLD_TMPDIR/t2.cfg ---copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_TMPDIR/t2.ibd +--copy_file $MYSQLD_DATADIR/test/t1.cfg $MYSQLD_DATADIR/test/t1.cfg.sav +--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t1.ibd.sav +--move_file $MYSQLD_DATADIR/test/t2.cfg $MYSQLD_DATADIR/test/t2.cfg.sav +--copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t2.ibd.sav UNLOCK TABLES; INSERT INTO t1 (b, c) SELECT b,c FROM t1 ORDER BY a; SELECT COUNT(*) FROM t1; @@ -58,8 +57,8 @@ SELECT * FROM t1 ORDER BY a DESC LIMIT 3; ALTER TABLE t1 DISCARD TABLESPACE; --echo # List after t1 DISCARD --list_files $MYSQLD_DATADIR/test ---copy_file $MYSQLD_TMPDIR/t1.cfg $MYSQLD_DATADIR/test/t1.cfg ---copy_file $MYSQLD_TMPDIR/t1.ibd $MYSQLD_DATADIR/test/t1.ibd +--copy_file $MYSQLD_DATADIR/test/t1.cfg.sav $MYSQLD_DATADIR/test/t1.cfg +--copy_file $MYSQLD_DATADIR/test/t1.ibd.sav $MYSQLD_DATADIR/test/t1.ibd ALTER TABLE t1 IMPORT TABLESPACE; ALTER TABLE t1 ENGINE InnoDB; SELECT COUNT(*) FROM t1; @@ -70,15 +69,15 @@ SELECT COUNT(*) FROM t1; SELECT * FROM t1 ORDER BY b,a DESC LIMIT 3; SELECT * FROM t1 ORDER BY a DESC LIMIT 3; DROP TABLE t1; ---remove_file $MYSQLD_TMPDIR/t1.cfg ---remove_file $MYSQLD_TMPDIR/t1.ibd +--remove_file $MYSQLD_DATADIR/test/t1.cfg.sav +--remove_file $MYSQLD_DATADIR/test/t1.ibd.sav ALTER TABLE t2 ROW_FORMAT=DYNAMIC; ALTER TABLE t2 DISCARD TABLESPACE; --echo # List after t2 DISCARD --list_files $MYSQLD_DATADIR/test ---move_file $MYSQLD_TMPDIR/t2.ibd $MYSQLD_DATADIR/test/t2.ibd ---move_file $MYSQLD_TMPDIR/t2.cfg $MYSQLD_DATADIR/test/t2.cfg +--move_file $MYSQLD_DATADIR/test/t2.ibd.sav $MYSQLD_DATADIR/test/t2.ibd +--move_file $MYSQLD_DATADIR/test/t2.cfg.sav $MYSQLD_DATADIR/test/t2.cfg --error ER_TABLE_SCHEMA_MISMATCH ALTER TABLE t2 IMPORT TABLESPACE; --remove_file $MYSQLD_DATADIR/test/t2.cfg diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test b/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test index a7a414d61da61..381091165ef08 100644 --- a/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test +++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test @@ -15,7 +15,6 @@ col2 VARCHAR(25), col3 varchar(25)) ENGINE=InnoDB; CREATE INDEX idx1 ON tab5(col2(10)); CREATE INDEX idx2 ON tab5(col3(10)); -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool100; SET GLOBAL innodb_buffer_pool_dump_pct=100; #*********************************************************** @@ -58,8 +57,7 @@ AND variable_value != @IBPDS AND variable_value like 'Buffer pool(s) dump completed at%'; --source include/wait_condition.inc ---file_exists $MYSQLD_DATADIR/ib_buffer_pool100 -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool1; +--move_file $MYSQLD_DATADIR/ib_buffer_pool $MYSQLD_DATADIR/ib_buffer_pool100 SET GLOBAL innodb_buffer_pool_dump_pct=1; SELECT @@global.innodb_buffer_pool_dump_pct; @@ -83,17 +81,15 @@ AND variable_value != @IBPDS AND variable_value like 'Buffer pool(s) dump completed at%'; --source include/wait_condition.inc ---file_exists $MYSQLD_DATADIR/ib_buffer_pool1 +--file_exists $MYSQLD_DATADIR/ib_buffer_pool perl; -my $size1 = -s "$ENV{MYSQLD_DATADIR}/ib_buffer_pool1"; +my $size1 = -s "$ENV{MYSQLD_DATADIR}/ib_buffer_pool"; my $size100 = -s "$ENV{MYSQLD_DATADIR}/ib_buffer_pool100"; die "$size100<=$size1\n" unless $size100 > $size1; EOF SET GLOBAL innodb_buffer_pool_dump_pct=DEFAULT; -SET GLOBAL innodb_buffer_pool_filename=DEFAULT; --remove_file $MYSQLD_DATADIR/ib_buffer_pool100 ---remove_file $MYSQLD_DATADIR/ib_buffer_pool1 DROP TABLE tab5; diff --git a/mysql-test/suite/innodb/t/innodb_ctype_utf8.test b/mysql-test/suite/innodb/t/innodb_ctype_utf8.test index c3abaa9973108..99b7925321597 100644 --- a/mysql-test/suite/innodb/t/innodb_ctype_utf8.test +++ b/mysql-test/suite/innodb/t/innodb_ctype_utf8.test @@ -23,3 +23,15 @@ let $coll_pad='utf8_bin'; --echo # --echo # End of 10.2 tests --echo # + + +--echo # +--echo # Start of 10.4 tests +--echo # + +SET NAMES utf8mb3 COLLATE utf8mb3_unicode_nopad_ci; +--source include/ctype_nopad_prefix_unique.inc + +--echo # +--echo # End 10.4 tests +--echo # diff --git a/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test b/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test index 2e1391355b93f..4383e26877d12 100644 --- a/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test +++ b/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test @@ -25,24 +25,6 @@ select @@innodb_ft_server_stopword_table; drop table user_stopword_1, user_stopword_2; -#Test innodb_buffer_pool_filename (global variable) - -select @@innodb_buffer_pool_filename; - -set @blah='hello'; -set global innodb_buffer_pool_filename = @blah; -select @@innodb_buffer_pool_filename; - -set global innodb_buffer_pool_filename="bye"; -select @@innodb_buffer_pool_filename; - ---error ER_WRONG_VALUE_FOR_VAR -set global innodb_buffer_pool_filename=NULL; -select @@innodb_buffer_pool_filename; - -set global innodb_buffer_pool_filename=default; -select @@innodb_buffer_pool_filename; - #Test innodb_ft_aux_table (global variable) CREATE TABLE t1 ( id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, opening_line TEXT(500), author VARCHAR(200), title VARCHAR(200), FULLTEXT idx diff --git a/mysql-test/suite/innodb/t/instant_alter_crash.test b/mysql-test/suite/innodb/t/instant_alter_crash.test index 0bd983a2b4cae..37cdb2862ad18 100644 --- a/mysql-test/suite/innodb/t/instant_alter_crash.test +++ b/mysql-test/suite/innodb/t/instant_alter_crash.test @@ -236,3 +236,29 @@ CHECK TABLE t2; DROP TABLE t1,t2; --list_files $MYSQLD_DATADIR/test + +--echo # +--echo # MDEV-26198 Assertion `0' failed in row_log_table_apply_op during +--echo # ADD PRIMARY KEY or OPTIMIZE TABLE +--echo # +CREATE TABLE t1(f1 year default null, f2 year default null, + f3 text, f4 year default null, f5 year default null, + f6 year default null, f7 year default null, + f8 year default null)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); +ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; +set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; +send ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ADD COLUMN f10 YEAR DEFAULT NULL, ALGORITHM=INPLACE; + +connect(con1,localhost,root,,,); +SET DEBUG_SYNC="now WAIT_FOR con1_insert"; +INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); +INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); +SET DEBUG_SYNC="now SIGNAL con1_finish"; + +connection default; +reap; +disconnect con1; +SET DEBUG_SYNC=RESET; +CHECK TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/purge_thread_shutdown.test b/mysql-test/suite/innodb/t/purge_thread_shutdown.test index 447f1fd38040b..53b375b80fd65 100644 --- a/mysql-test/suite/innodb/t/purge_thread_shutdown.test +++ b/mysql-test/suite/innodb/t/purge_thread_shutdown.test @@ -12,8 +12,8 @@ select user,state from information_schema.processlist order by 2; set global debug_dbug='+d,only_kill_system_threads'; set global innodb_fast_shutdown=0; -let $_server_id= `SELECT @@server_id`; -let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect; +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect exec echo "wait" > $_expect_file_name; send shutdown; diff --git a/mysql-test/suite/innodb/t/restart.test b/mysql-test/suite/innodb/t/restart.test index 5be076c6198e2..a1e45b37f9c89 100644 --- a/mysql-test/suite/innodb/t/restart.test +++ b/mysql-test/suite/innodb/t/restart.test @@ -4,6 +4,7 @@ let datadir= `select @@datadir`; let page_size= `select @@innodb_page_size`; +let tmp_in_vardir=$MYSQLTEST_VARDIR/tmp; --echo # --echo # MDEV-15333 MariaDB (still) slow start @@ -27,19 +28,19 @@ call mtr.add_suppression("\\[Warning\\] InnoDB: Ignoring tablespace for test/td CREATE TABLE tr(a INT)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; CREATE TABLE tc(a INT)ENGINE=InnoDB ROW_FORMAT=COMPACT PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9; ---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +--replace_result $tmp_in_vardir MYSQL_TMP_DIR eval CREATE TABLE td(a INT)ENGINE=InnoDB ROW_FORMAT=DYNAMIC -STATS_PERSISTENT=0 DATA DIRECTORY='$MYSQL_TMP_DIR'; +STATS_PERSISTENT=0 DATA DIRECTORY='$tmp_in_vardir'; --source include/shutdown_mysqld.inc --move_file $datadir/test/tr.ibd $datadir/test/tr0.ibd --move_file $datadir/test/tc.ibd $datadir/test/tc0.ibd ---move_file $MYSQL_TMP_DIR/test/td.ibd $datadir/test/td0.ibd +--move_file $tmp_in_vardir/test/td.ibd $datadir/test/td0.ibd # TODO: test that MariaDB does not even attempt to open the files #--mkdir $datadir/test/tr.ibd #--mkdir $datadir/test/tc.ibd -#--mkdir $MYSQL_TMP_DIR/test/td.ibd +#--mkdir $tmp_in_vardir/test/td.ibd perl; die unless open OUT, ">", "$ENV{datadir}/test/tr.ibd"; @@ -48,7 +49,7 @@ close OUT or die; die unless open OUT, ">", "$ENV{datadir}/test/tc.ibd"; print OUT "bar " x $ENV{page_size}; close OUT or die; -die unless open OUT, ">", "$ENV{MYSQL_TMP_DIR}/test/td.ibd"; +die unless open OUT, ">", "$ENV{tmp_in_vardir}/test/td.ibd"; print OUT "Xyz " x $ENV{page_size}; close OUT or die; die unless open ISL, "+<", "$ENV{datadir}/test/td.isl"; @@ -75,14 +76,14 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED'); # TODO: test that MariaDB does not even attempt to open the files #--rmdir $datadir/test/tr.ibd #--rmdir $datadir/test/tc.ibd -#--rmdir $MYSQL_TMP_DIR/test/td.ibd +#--rmdir $tmp_in_vardir/test/td.ibd --remove_file $datadir/test/tr.ibd --remove_file $datadir/test/tc.ibd ---remove_file $MYSQL_TMP_DIR/test/td.ibd +--remove_file $tmp_in_vardir/test/td.ibd --move_file $datadir/test/tr0.ibd $datadir/test/tr.ibd --move_file $datadir/test/tc0.ibd $datadir/test/tc.ibd ---move_file $datadir/test/td0.ibd $MYSQL_TMP_DIR/test/td.ibd +--move_file $datadir/test/td0.ibd $tmp_in_vardir/test/td.ibd --source include/start_mysqld.inc SELECT * FROM tr; @@ -118,8 +119,6 @@ SET GLOBAL innodb_buffer_pool_size = @innodb_buffer_pool_size_orig; --echo # --let MYSQLD_DATADIR= `SELECT @@datadir` ---let SERVER_ID= `SELECT @@server_id` ---let EXPECT_FILE_NAME= $MYSQLTEST_VARDIR/tmp/mysqld.$SERVER_ID.expect --source include/shutdown_mysqld.inc @@ -154,13 +153,13 @@ if ($MTR_COMBINATION_64K) } --error 1 -exec $MYSQLD --no-defaults --skip-networking --innodb_data_file_path=ibdata1:$ibdata_size --innodb-page-size=$page_size --datadir=$MYSQLD_DATADIR --log-error=$MYSQL_TMP_DIR/attempted_start.err; +exec $MYSQLD --no-defaults --skip-networking --innodb_data_file_path=ibdata1:$ibdata_size --innodb-page-size=$page_size --datadir=$MYSQLD_DATADIR --log-error=$tmp_in_vardir/attempted_start.err; -let SEARCH_FILE= $MYSQL_TMP_DIR/attempted_start.err; +let SEARCH_FILE= $tmp_in_vardir/attempted_start.err; let SEARCH_PATTERN= InnoDB: MySQL-8\.0 tablespace in \./ibdata1; source include/search_pattern_in_file.inc; ---remove_file $MYSQL_TMP_DIR/attempted_start.err +--remove_file $tmp_in_vardir/attempted_start.err --remove_file $MYSQLD_DATADIR/ibdata1 --move_file $MYSQLD_DATADIR/ibdata1.bak $MYSQLD_DATADIR/ibdata1 diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index 6b2cd6b9b2cd0..b09ff7c7c300f 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -11,8 +11,8 @@ --source include/no_valgrind_without_big.inc --disable_query_log -call mtr.add_suppression("Can't create/write to file '/dev/null/nonexistent/ib"); call mtr.add_suppression("Can't create/write to file '' \\\(Errcode: 20 \"Not a directory\"\\\)"); +call mtr.add_suppression("Can't create/write to file '/dev/null/.*/ib"); call mtr.add_suppression("InnoDB: Unable to create temporary file"); call mtr.add_suppression("last file in setting innodb_temp_data_file_path"); call mtr.add_suppression("The table 't1' is full"); @@ -135,7 +135,7 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED'); # We cannot use include/restart_mysqld.inc in this particular test, # because SHOW STATUS would fail due to unwritable (nonexistent) tmpdir. --source include/shutdown_mysqld.inc ---exec echo "restart: --tmpdir=/dev/null/nonexistent --skip-innodb-fast-shutdown" > $_expect_file_name +--exec echo "restart: --tmpdir=/dev/null/$MYSQL_TMP_DIR --skip-innodb-fast-shutdown" > $_expect_file_name --enable_reconnect --disable_result_log --disable_query_log diff --git a/mysql-test/suite/innodb_fts/t/sync.test b/mysql-test/suite/innodb_fts/t/sync.test index 49663f20b8a3d..7c5c835f2ee21 100644 --- a/mysql-test/suite/innodb_fts/t/sync.test +++ b/mysql-test/suite/innodb_fts/t/sync.test @@ -104,7 +104,8 @@ disconnect con1; DROP TABLE t1; --echo # Case 3: Test insert crash recovery ---let $_expect_file_name=$MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect CREATE TABLE t1 ( FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, diff --git a/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result b/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result index ea42e7ea7842c..f898e1a978e77 100644 --- a/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result +++ b/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result @@ -6,7 +6,7 @@ INNODB_SYS_INDEXES CREATE TEMPORARY TABLE `INNODB_SYS_INDEXES` ( `TABLE_ID` bigint(21) unsigned NOT NULL, `TYPE` int(11) NOT NULL, `N_FIELDS` int(11) NOT NULL, - `PAGE_NO` int(11) NOT NULL, - `SPACE` int(11) NOT NULL, + `PAGE_NO` int(11), + `SPACE` int(11), `MERGE_THRESHOLD` int(11) NOT NULL ) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci diff --git a/mysql-test/suite/maria/aria_log_dir_path.result b/mysql-test/suite/maria/aria_log_dir_path.result new file mode 100644 index 0000000000000..0a4201544a5f3 --- /dev/null +++ b/mysql-test/suite/maria/aria_log_dir_path.result @@ -0,0 +1,29 @@ +# +# MDEV-30971 Add a new system variable aria_data_home_dir +# +# restart: --loose-aria-log-file-size=8388608 --loose-aria-log-dir-path=MYSQLTEST_VARDIR/tmp/aria_log_dir_path_1 +SET @@global.aria_log_purge_type=external; +SHOW VARIABLES LIKE 'aria_log_file_size'; +Variable_name Value +aria_log_file_size 8388608 +SELECT @@aria_log_dir_path; +@@aria_log_dir_path +MYSQLTEST_VARDIR/tmp/aria_log_dir_path_1 +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +SHOW ENGINE aria logs; +Type Name Status +Aria aria_log.00000001 in use +CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria; +BEGIN NOT ATOMIC +FOR id IN 0..9 DO +INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); +END FOR; +END; +$$ +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +SHOW ENGINE aria logs; +Type Name Status +Aria aria_log.00000001 free +Aria aria_log.00000002 in use +DROP TABLE t1; +# restart diff --git a/mysql-test/suite/maria/aria_log_dir_path.test b/mysql-test/suite/maria/aria_log_dir_path.test new file mode 100644 index 0000000000000..bc0a31a76b933 --- /dev/null +++ b/mysql-test/suite/maria/aria_log_dir_path.test @@ -0,0 +1,65 @@ +--source include/have_maria.inc +--let $datadir= `SELECT @@datadir` + +--echo # +--echo # MDEV-30971 Add a new system variable aria_data_home_dir +--echo # + +--let $ARIA_LOGDIR=$MYSQLTEST_VARDIR/tmp/aria_log_dir_path_1 +--mkdir $ARIA_LOGDIR +--let $restart_parameters=--loose-aria-log-file-size=8388608 --loose-aria-log-dir-path=$ARIA_LOGDIR +--source include/restart_mysqld.inc + +# +# Test that: +# - aria_log_dir_path is set to a non-default directory. +# - New Aria log files are created in the non-default directory. +# - The contents of the log directory (according to "file_exists" commands) +# is in sync with the "SHOW ENGINE aria logs" ouput. +# + +# Prevent automatic purge +SET @@global.aria_log_purge_type=external; + +SHOW VARIABLES LIKE 'aria_log_file_size'; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +SELECT @@aria_log_dir_path; + + +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +--file_exists $ARIA_LOGDIR/aria_log_control +--file_exists $ARIA_LOGDIR/aria_log.00000001 +--error 1 +--file_exists $ARIA_LOGDIR/aria_log.00000002 +--replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/ +SHOW ENGINE aria logs; + + +CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria; +DELIMITER $$; +BEGIN NOT ATOMIC + FOR id IN 0..9 DO + INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); + END FOR; +END; +$$ +DELIMITER ;$$ + + +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +--file_exists $ARIA_LOGDIR/aria_log_control +--file_exists $ARIA_LOGDIR/aria_log.00000001 +--file_exists $ARIA_LOGDIR/aria_log.00000002 +--error 1 +--file_exists $ARIA_LOGDIR/aria_log.00000003 +--replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/ +SHOW ENGINE aria logs; + +DROP TABLE t1; + +--let $restart_parameters= +--source include/restart_mysqld.inc +--remove_file $ARIA_LOGDIR/aria_log_control +--remove_file $ARIA_LOGDIR/aria_log.00000001 +--remove_file $ARIA_LOGDIR/aria_log.00000002 +--rmdir $ARIA_LOGDIR diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result index ad69d5f205046..fcdd08382a08b 100644 --- a/mysql-test/suite/maria/maria3.result +++ b/mysql-test/suite/maria/maria3.result @@ -311,6 +311,7 @@ aria_encrypt_tables # aria_force_start_after_recovery_failures # aria_group_commit # aria_group_commit_interval # +aria_log_dir_path # aria_log_file_size # aria_log_purge_type # aria_max_sort_file_size # diff --git a/mysql-test/suite/mariabackup/include/restart_and_restore.inc b/mysql-test/suite/mariabackup/include/restart_and_restore.inc index 2d1e5493957a6..aa26d28efba7a 100644 --- a/mysql-test/suite/mariabackup/include/restart_and_restore.inc +++ b/mysql-test/suite/mariabackup/include/restart_and_restore.inc @@ -4,5 +4,5 @@ echo # shutdown server; echo # remove datadir; rmdir $_datadir; echo # xtrabackup move back; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1 $backup_opts; --source include/start_mysqld.inc diff --git a/mysql-test/suite/mariabackup/partial_exclude.result b/mysql-test/suite/mariabackup/partial_exclude.result index a31197b9e9dac..f57a5f63e6819 100644 --- a/mysql-test/suite/mariabackup/partial_exclude.result +++ b/mysql-test/suite/mariabackup/partial_exclude.result @@ -8,8 +8,15 @@ CREATE DATABASE db2; USE db2; CREATE TABLE t1(i INT) ENGINE INNODB; USE test; +BEGIN; +INSERT INTO db2.t1 VALUES(20); +INSERT INTO test.t1 VALUES(20); +INSERT INTO test.t2 VALUES(20); # xtrabackup backup +COMMIT; t1.new DROP TABLE t1; DROP TABLE t2; DROP DATABASE db2; +NOT FOUND /Operating system error number/ in backup.log +NOT FOUND /Could not find a valid tablespace file for/ in backup.log diff --git a/mysql-test/suite/mariabackup/partial_exclude.test b/mysql-test/suite/mariabackup/partial_exclude.test index 3642a2c6f4610..6a1ae13b51287 100644 --- a/mysql-test/suite/mariabackup/partial_exclude.test +++ b/mysql-test/suite/mariabackup/partial_exclude.test @@ -19,6 +19,11 @@ CREATE TABLE t1(i INT) ENGINE INNODB; USE test; +BEGIN; +INSERT INTO db2.t1 VALUES(20); +INSERT INTO test.t1 VALUES(20); +INSERT INTO test.t2 VALUES(20); + echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; @@ -26,6 +31,8 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables-exclude=test.*2" "--databases-exclude=db2" --target-dir=$targetdir; --enable_result_log +COMMIT; + # check that only t1 table is in backup (t2 is excluded) list_files $targetdir/test *.new; list_files $targetdir/test *.ibd; @@ -47,4 +54,17 @@ DROP DATABASE db2; rmdir $MYSQLD_DATADIR/db3; rmdir $MYSQLD_DATADIR/db4; rmdir $MYSQLD_DATADIR/db5; + +--let $backup_log=$MYSQLTEST_VARDIR/tmp/backup.log +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --export --prepare --target-dir="$targetdir" > $backup_log; +--enable_result_log + +--let SEARCH_FILE=$backup_log +--let SEARCH_PATTERN=Operating system error number +--source include/search_pattern_in_file.inc +--let SEARCH_PATTERN=Could not find a valid tablespace file for +--source include/search_pattern_in_file.inc +--remove_file $backup_log + rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/relative_path.opt b/mysql-test/suite/mariabackup/relative_path.opt new file mode 100644 index 0000000000000..3e3c33e44f85a --- /dev/null +++ b/mysql-test/suite/mariabackup/relative_path.opt @@ -0,0 +1 @@ +--innodb-undo-tablespaces=2 diff --git a/mysql-test/suite/mariabackup/relative_path.result b/mysql-test/suite/mariabackup/relative_path.result new file mode 100644 index 0000000000000..7aa0c6968f3dc --- /dev/null +++ b/mysql-test/suite/mariabackup/relative_path.result @@ -0,0 +1,20 @@ +CREATE TABLE t(i INT) ENGINE INNODB; +INSERT INTO t VALUES(1); +# xtrabackup backup +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +# shutdown server +# remove datadir +# xtrabackup move back +# restart +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t; +i +1 +DROP TABLE t; diff --git a/mysql-test/suite/mariabackup/relative_path.test b/mysql-test/suite/mariabackup/relative_path.test new file mode 100644 index 0000000000000..bd25a217e711e --- /dev/null +++ b/mysql-test/suite/mariabackup/relative_path.test @@ -0,0 +1,35 @@ +--source include/have_innodb.inc + +CREATE TABLE t(i INT) ENGINE INNODB; +INSERT INTO t VALUES(1); + +echo # xtrabackup backup; +let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; +--let $backup_log=$MYSQLTEST_VARDIR/tmp/backup.log + +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir > $backup_log 2>&1; +--enable_result_log + +echo # xtrabackup prepare; +--disable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir; + +# If MDEV-28187 is not fixed, the following tries to copy backup to data +# directory will fail, because their destination path will be the same as +# their source path + +--let $backup_opts=--innodb_undo_directory=./ +--source include/restart_and_restore.inc + +--let $backup_opts=--innodb_log_group_home_dir=./ +--source include/restart_and_restore.inc + +--let $backup_opts=--innodb_data_home_dir=./ +--source include/restart_and_restore.inc + +--enable_result_log + +SELECT * FROM t; +DROP TABLE t; +rmdir $targetdir; diff --git a/mysql-test/suite/perfschema/r/events_waits_current_MDEV-29091.result b/mysql-test/suite/perfschema/r/events_waits_current_MDEV-29091.result new file mode 100644 index 0000000000000..8f3a17a0fc51e --- /dev/null +++ b/mysql-test/suite/perfschema/r/events_waits_current_MDEV-29091.result @@ -0,0 +1,41 @@ +SET default_storage_engine=InnoDB; +SELECT @save_instrument_enabled := ENABLED +, @save_instrument_timed := TIMED +FROM performance_schema.setup_instruments +WHERE NAME = 'wait/lock/table/sql/handler'; +@save_instrument_enabled := ENABLED @save_instrument_timed := TIMED +YES YES +SELECT @save_consumer_enabled := ENABLED +FROM performance_schema.setup_consumers +WHERE NAME = 'events_waits_current'; +@save_consumer_enabled := ENABLED +YES +UPDATE performance_schema.setup_instruments +SET ENABLED = 'YES', TIMED = 'YES' +WHERE NAME = 'wait/lock/table/sql/handler'; +UPDATE performance_schema.setup_consumers +SET ENABLED = 'YES' +WHERE NAME = 'events_waits_current'; +CREATE TABLE t1 (id1 INT(11), col1 VARCHAR (200)); +INSERT INTO t1 VALUES (1, 'aa'); +INSERT INTO t1 VALUES (2, 'bb'); +connect con1,localhost,root,,test; +connect con2,localhost,root,,test; +connection con1; +START TRANSACTION; +connection con2; +START TRANSACTION; +SELECT id1 FROM t1 WHERE id1=1 FOR UPDATE; +connection default; +SELECT event_name FROM performance_schema.events_waits_current +WHERE event_name LIKE '%wait/lock/table/sql/handler%'; +event_name +UPDATE performance_schema.setup_instruments +SET ENABLED = @save_instrument_enabled, TIMED = @save_instrument_timed +WHERE NAME = 'wait/lock/table/sql/handler'; +UPDATE performance_schema.setup_consumers +SET ENABLED = @save_consumer_enabled +WHERE NAME = 'events_waits_current'; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result index 52ece6d289d8a..47f4d7ba346fe 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result @@ -202,10 +202,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -262,10 +260,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -328,10 +324,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -429,10 +423,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -500,10 +492,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -613,10 +603,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -689,10 +677,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -814,10 +800,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -891,10 +875,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -965,10 +947,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1039,10 +1019,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1110,10 +1088,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1180,10 +1156,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1249,10 +1223,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1317,10 +1289,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1387,10 +1357,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1456,10 +1424,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1525,10 +1491,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1594,10 +1558,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1663,10 +1625,8 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result index b0ea06f42540c..9810d104ed96f 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result @@ -211,12 +211,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -279,12 +276,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -353,12 +347,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -462,12 +453,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -541,12 +529,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -662,12 +647,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 56 TABLE test t2 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -746,12 +728,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 56 TABLE test t2 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -879,12 +858,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 56 TABLE test t2 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -964,12 +940,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1046,12 +1019,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1128,12 +1098,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1207,12 +1174,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1285,12 +1249,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1362,12 +1323,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1438,12 +1396,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1516,12 +1471,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1593,12 +1545,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1670,12 +1619,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1747,12 +1693,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1824,12 +1767,9 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result index 0a6dea739e498..367a8a089eb51 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result @@ -202,10 +202,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -262,10 +260,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -328,10 +324,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -429,10 +423,8 @@ wait/io/table/sql/handler 58 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -500,10 +492,8 @@ wait/io/table/sql/handler 58 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -613,10 +603,8 @@ wait/io/table/sql/handler 106 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -689,10 +677,8 @@ wait/io/table/sql/handler 106 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -814,10 +800,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 96 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 40 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 56 TABLE test t3 +wait/lock/table/sql/handler 87 TABLE test t1 +wait/lock/table/sql/handler 176 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -891,10 +875,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -965,10 +947,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1039,10 +1019,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1110,10 +1088,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1180,10 +1156,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1249,10 +1223,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1317,10 +1289,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1387,10 +1357,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1456,10 +1424,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1525,10 +1491,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1594,10 +1558,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1663,10 +1625,8 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result index c2eda2b8f23f6..ed1ac9d979be8 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result @@ -211,12 +211,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -279,12 +276,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -353,12 +347,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -462,12 +453,9 @@ wait/io/table/sql/handler 84 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 26 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 50 TABLE test t2 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -541,12 +529,9 @@ wait/io/table/sql/handler 84 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 26 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 50 TABLE test t2 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -662,12 +647,9 @@ wait/io/table/sql/handler 154 wait/lock/table/sql/handler 108 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 48 TABLE test t2 -wait/lock/table/sql/handler 36 TABLE test t2 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 84 TABLE test t2 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -746,12 +728,9 @@ wait/io/table/sql/handler 154 wait/lock/table/sql/handler 108 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 48 TABLE test t2 -wait/lock/table/sql/handler 36 TABLE test t2 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 84 TABLE test t2 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -879,12 +858,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 144 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 40 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 48 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 56 TABLE test t3 +wait/lock/table/sql/handler 87 TABLE test t1 +wait/lock/table/sql/handler 124 TABLE test t2 +wait/lock/table/sql/handler 176 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -964,12 +940,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1046,12 +1019,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1128,12 +1098,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1207,12 +1174,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1285,12 +1249,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1362,12 +1323,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1438,12 +1396,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1516,12 +1471,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1593,12 +1545,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1670,12 +1619,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1747,12 +1693,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1824,12 +1767,9 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/t/events_waits_current_MDEV-29091.test b/mysql-test/suite/perfschema/t/events_waits_current_MDEV-29091.test new file mode 100644 index 0000000000000..d9330ee5a9d4b --- /dev/null +++ b/mysql-test/suite/perfschema/t/events_waits_current_MDEV-29091.test @@ -0,0 +1,62 @@ +# +# proper event name wait/lock/table/sql/handler recorded in +# PERFORMANCE_SCHEMA.EVENTS_WAITS_CURRENT. Before this fix, it was +# labeled as wait/io/table/sql/handler. +# + +--source include/have_innodb.inc +--source include/have_perfschema.inc +--source include/not_embedded.inc + +SET default_storage_engine=InnoDB; + +SELECT @save_instrument_enabled := ENABLED +, @save_instrument_timed := TIMED +FROM performance_schema.setup_instruments +WHERE NAME = 'wait/lock/table/sql/handler'; + +SELECT @save_consumer_enabled := ENABLED +FROM performance_schema.setup_consumers +WHERE NAME = 'events_waits_current'; + +UPDATE performance_schema.setup_instruments +SET ENABLED = 'YES', TIMED = 'YES' +WHERE NAME = 'wait/lock/table/sql/handler'; + +UPDATE performance_schema.setup_consumers +SET ENABLED = 'YES' +WHERE NAME = 'events_waits_current'; + +CREATE TABLE t1 (id1 INT(11), col1 VARCHAR (200)); +INSERT INTO t1 VALUES (1, 'aa'); +INSERT INTO t1 VALUES (2, 'bb'); + +connect (con1,localhost,root,,test); +connect (con2,localhost,root,,test); + +connection con1; +START TRANSACTION; +let $wait_condition= + SELECT id1 FROM t1 WHERE id1=1 FOR UPDATE; +--source include/wait_condition.inc + +connection con2; +START TRANSACTION; +send SELECT id1 FROM t1 WHERE id1=1 FOR UPDATE; + +connection default; +SELECT event_name FROM performance_schema.events_waits_current +WHERE event_name LIKE '%wait/lock/table/sql/handler%'; + +# clean up +UPDATE performance_schema.setup_instruments +SET ENABLED = @save_instrument_enabled, TIMED = @save_instrument_timed +WHERE NAME = 'wait/lock/table/sql/handler'; + +UPDATE performance_schema.setup_consumers +SET ENABLED = @save_consumer_enabled +WHERE NAME = 'events_waits_current'; + +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/mysql-test/suite/period/r/overlaps.result b/mysql-test/suite/period/r/overlaps.result index 3ed8e1a001474..78b1ac18b8da3 100644 --- a/mysql-test/suite/period/r/overlaps.result +++ b/mysql-test/suite/period/r/overlaps.result @@ -351,3 +351,101 @@ primary key(id, p without overlaps) ) engine=heap partition by hash(id); update t set id = 1; drop table t, t1; +# +# MDEV-30415 PERIOD false positive overlap wtih utf8mb4_unicode_nopad_ci +# +CREATE TABLE t1 ( +datetime_column_name_1 DATETIME(6) NOT NULL, +datetime_column_name_2 DATETIME(6) NOT NULL, +text_column_name TEXT COLLATE utf8mb4_unicode_nopad_ci NOT NULL, +PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), +UNIQUE KEY index_name (text_column_name(191),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +TRUNCATE TABLE t1; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +DROP TABLE t1; +CREATE TABLE `t1` ( +datetime_column_name_1 DATETIME(6) NOT NULL, +datetime_column_name_2 DATETIME(6) NOT NULL, +text_column_name TEXT COLLATE utf8mb4_unicode_nopad_ci NOT NULL, +PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), +UNIQUE KEY index_name (text_column_name(191),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def '), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def'); +TRUNCATE TABLE t1; +INSERT INTO t1 VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def'), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def '), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def '); +SELECT *, LENGTH(text_column_name) FROM t1; +datetime_column_name_1 datetime_column_name_2 text_column_name LENGTH(text_column_name) +2000-01-01 00:00:00.000000 2001-01-01 00:00:00.000000 def 3 +2000-01-01 00:00:00.000000 2001-01-01 00:00:00.000000 def 4 +2000-01-01 00:00:00.000000 2001-01-01 00:00:00.000000 def 5 +DROP TABLE t1; +CREATE TABLE t1 ( +datetime_column_name_1 DATETIME(6) NOT NULL, +datetime_column_name_2 DATETIME(6) NOT NULL, +text_column_name TEXT COLLATE utf8mb4_unicode_nopad_ci NOT NULL, +PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), +UNIQUE KEY index_name (text_column_name(3),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +ERROR 23000: Duplicate entry 'abc-2001-01-01 00:00:00.000000-2000-01-01 00:00:00.000000' for key 'index_name' +TRUNCATE TABLE t1; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +ERROR 23000: Duplicate entry 'abc-2001-01-01 00:00:00.000000-2000-01-01 00:00:00.000000' for key 'index_name' +DROP TABLE t1; +CREATE TABLE t1 ( +datetime_column_name_1 DATETIME(6) NOT NULL, +datetime_column_name_2 DATETIME(6) NOT NULL, +char_column_name CHAR(255) COLLATE utf8mb4_unicode_nopad_ci NOT NULL, +PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), +UNIQUE KEY index_name (char_column_name(191),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +ERROR 23000: Duplicate entry 'abc-2001-01-01 00:00:00.000000-2000-01-01 00:00:00.000000' for key 'index_name' +TRUNCATE TABLE t1; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +ERROR 23000: Duplicate entry 'abc-2001-01-01 00:00:00.000000-2000-01-01 00:00:00.000000' for key 'index_name' +DROP TABLE t1; +CREATE TABLE t1 ( +datetime_column_name_1 DATETIME(6) NOT NULL, +datetime_column_name_2 DATETIME(6) NOT NULL, +char_column_name CHAR(255) COLLATE utf8mb4_unicode_nopad_ci NOT NULL, +PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), +UNIQUE KEY index_name (char_column_name(3),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +ERROR 23000: Duplicate entry 'abc-2001-01-01 00:00:00.000000-2000-01-01 00:00:00.000000' for key 'index_name' +TRUNCATE TABLE t1; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), +('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +ERROR 23000: Duplicate entry 'abc-2001-01-01 00:00:00.000000-2000-01-01 00:00:00.000000' for key 'index_name' +DROP TABLE t1; diff --git a/mysql-test/suite/period/t/overlaps.test b/mysql-test/suite/period/t/overlaps.test index 6cd78769d4a40..4611aa15ddd2a 100644 --- a/mysql-test/suite/period/t/overlaps.test +++ b/mysql-test/suite/period/t/overlaps.test @@ -344,3 +344,113 @@ create or replace table t (id int, s date, e date, period for p(s,e), update t set id = 1; drop table t, t1; + +--echo # +--echo # MDEV-30415 PERIOD false positive overlap wtih utf8mb4_unicode_nopad_ci +--echo # + +# The originally reported script with a TEXT column (slightly modified) +CREATE TABLE t1 ( + datetime_column_name_1 DATETIME(6) NOT NULL, + datetime_column_name_2 DATETIME(6) NOT NULL, + text_column_name TEXT COLLATE utf8mb4_unicode_nopad_ci NOT NULL, + PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), + UNIQUE KEY index_name (text_column_name(191),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +TRUNCATE TABLE t1; +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +DROP TABLE t1; + + +# The script reported by Alice with a TEXT column +CREATE TABLE `t1` ( + datetime_column_name_1 DATETIME(6) NOT NULL, + datetime_column_name_2 DATETIME(6) NOT NULL, + text_column_name TEXT COLLATE utf8mb4_unicode_nopad_ci NOT NULL, + PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), + UNIQUE KEY index_name (text_column_name(191),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +INSERT INTO t1 VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def '), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def'); +TRUNCATE TABLE t1; +INSERT INTO t1 VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def'), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def '), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'def '); +--sorted_result +SELECT *, LENGTH(text_column_name) FROM t1; +DROP TABLE t1; + + +# A TEXT column with a short prefix +CREATE TABLE t1 ( + datetime_column_name_1 DATETIME(6) NOT NULL, + datetime_column_name_2 DATETIME(6) NOT NULL, + text_column_name TEXT COLLATE utf8mb4_unicode_nopad_ci NOT NULL, + PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), + UNIQUE KEY index_name (text_column_name(3),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +--error ER_DUP_ENTRY +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +TRUNCATE TABLE t1; +--error ER_DUP_ENTRY +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, text_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +DROP TABLE t1; + + +# A CHAR with a long prefix +CREATE TABLE t1 ( + datetime_column_name_1 DATETIME(6) NOT NULL, + datetime_column_name_2 DATETIME(6) NOT NULL, + char_column_name CHAR(255) COLLATE utf8mb4_unicode_nopad_ci NOT NULL, + PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), + UNIQUE KEY index_name (char_column_name(191),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +--error ER_DUP_ENTRY +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +TRUNCATE TABLE t1; +--error ER_DUP_ENTRY +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +DROP TABLE t1; + + +# A CHAR column with a short prefix +CREATE TABLE t1 ( + datetime_column_name_1 DATETIME(6) NOT NULL, + datetime_column_name_2 DATETIME(6) NOT NULL, + char_column_name CHAR(255) COLLATE utf8mb4_unicode_nopad_ci NOT NULL, + PERIOD FOR period_name (datetime_column_name_1, datetime_column_name_2), + UNIQUE KEY index_name (char_column_name(3),period_name WITHOUT OVERLAPS) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_nopad_ci; +--error ER_DUP_ENTRY +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '); +TRUNCATE TABLE t1; +--error ER_DUP_ENTRY +INSERT INTO t1 (datetime_column_name_1, datetime_column_name_2, char_column_name) +VALUES + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc '), + ('2000-01-01 00:00:00.000000', '2001-01-01 00:00:00.000000', 'abc'); +DROP TABLE t1; diff --git a/mysql-test/suite/plugins/r/feedback_plugin_install.result b/mysql-test/suite/plugins/r/feedback_plugin_install.result index c7f7a5c79f3fd..d2291f20b4f5f 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_install.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_install.result @@ -11,6 +11,6 @@ FEEDBACK version 1.1 FEEDBACK_HTTP_PROXY FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 -FEEDBACK_URL http://mariadb.org/feedback_plugin/post +FEEDBACK_URL http://feedback.mariadb.org/rest/v1/post FEEDBACK_USER_INFO mysql-test uninstall plugin feedback; diff --git a/mysql-test/suite/plugins/r/feedback_plugin_load.result b/mysql-test/suite/plugins/r/feedback_plugin_load.result index f31c7325ca112..23376c684dd27 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_load.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_load.result @@ -13,7 +13,7 @@ FEEDBACK version 1.1 FEEDBACK_HTTP_PROXY FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 -FEEDBACK_URL http://mariadb.org/feedback_plugin/post +FEEDBACK_URL http://feedback.mariadb.org/rest/v1/post FEEDBACK_USER_INFO mysql-test SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK WHERE VARIABLE_NAME LIKE 'Collation used %' diff --git a/mysql-test/suite/plugins/r/feedback_plugin_send.result b/mysql-test/suite/plugins/r/feedback_plugin_send.result index b10ea915a4fd7..1f7539978c8b3 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_send.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_send.result @@ -13,7 +13,7 @@ FEEDBACK version 1.1 FEEDBACK_HTTP_PROXY FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 -FEEDBACK_URL http://mariadb.org/feedback_plugin/post +FEEDBACK_URL http://feedback.mariadb.org/rest/v1/post FEEDBACK_USER_INFO mysql-test SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK WHERE VARIABLE_NAME LIKE 'Collation used %' @@ -42,5 +42,5 @@ VARIABLE_VALUE>0 VARIABLE_NAME deallocate prepare stmt; set global sql_mode=ONLY_FULL_GROUP_BY; # restart -feedback plugin: report to 'http://mariadb.org/feedback_plugin/post' was sent +feedback plugin: report to 'http://feedback.mariadb.org/rest/v1/post' was sent feedback plugin: server replied 'ok' diff --git a/mysql-test/suite/plugins/r/locales.result b/mysql-test/suite/plugins/r/locales.result index e906d27c21ef0..5229b7ffaa885 100644 --- a/mysql-test/suite/plugins/r/locales.result +++ b/mysql-test/suite/plugins/r/locales.result @@ -57,7 +57,7 @@ ID NAME DESCRIPTION MAX_MONTH_NAME_LENGTH MAX_DAY_NAME_LENGTH DECIMAL_POINT THOU 53 uk_UA Ukrainian - Ukraine 8 9 , . ukrainian 54 ur_PK Urdu - Pakistan 6 6 . , english 55 vi_VN Vietnamese - Vietnam 16 11 , . english -56 zh_CN Chinese - Peoples Republic of China 3 3 . , english +56 zh_CN Chinese - Peoples Republic of China 3 3 . , chinese 57 zh_TW Chinese - Taiwan 3 2 . , english 58 ar_DZ Arabic - Algeria 6 8 . , english 59 ar_EG Arabic - Egypt 6 8 . , english @@ -170,7 +170,7 @@ Id Name Description Error_Message_Language 53 uk_UA Ukrainian - Ukraine ukrainian 54 ur_PK Urdu - Pakistan english 55 vi_VN Vietnamese - Vietnam english -56 zh_CN Chinese - Peoples Republic of China english +56 zh_CN Chinese - Peoples Republic of China chinese 57 zh_TW Chinese - Taiwan english 58 ar_DZ Arabic - Algeria english 59 ar_EG Arabic - Egypt english diff --git a/mysql-test/suite/roles/role_grant_propagate.result b/mysql-test/suite/roles/role_grant_propagate.result index 7804b7b7a3c0c..111fd4dbc28c7 100644 --- a/mysql-test/suite/roles/role_grant_propagate.result +++ b/mysql-test/suite/roles/role_grant_propagate.result @@ -163,5 +163,18 @@ drop role student; drop role admin; drop database crm; # +# MDEV-30526 Assertion `rights == merged->cols' failed in update_role_columns +# +create table t1 ( pk int, i int); +create role a; +grant select (i), update (pk) on t1 to a; +revoke update (pk) on t1 from a; +show grants for a; +Grants for a +GRANT USAGE ON *.* TO `a` +GRANT SELECT (`i`) ON `test`.`t1` TO `a` +drop role a; +drop table t1; +# # End of 10.3 tests # diff --git a/mysql-test/suite/roles/role_grant_propagate.test b/mysql-test/suite/roles/role_grant_propagate.test index bf20bc008097c..02d451f0afdcb 100644 --- a/mysql-test/suite/roles/role_grant_propagate.test +++ b/mysql-test/suite/roles/role_grant_propagate.test @@ -196,6 +196,17 @@ drop role student; drop role admin; drop database crm; +--echo # +--echo # MDEV-30526 Assertion `rights == merged->cols' failed in update_role_columns +--echo # +create table t1 ( pk int, i int); +create role a; +grant select (i), update (pk) on t1 to a; +revoke update (pk) on t1 from a; +show grants for a; +drop role a; +drop table t1; + --echo # --echo # End of 10.3 tests --echo # diff --git a/mysql-test/suite/rpl/include/rpl_binlog_errors.inc b/mysql-test/suite/rpl/include/rpl_binlog_errors.inc deleted file mode 100644 index ab95c9b4ee353..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_binlog_errors.inc +++ /dev/null @@ -1,443 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# -# Usage: -# --let $binlog_limit= X[,Y] # optional -# -# Semantics of the value is the same as in include/show_binlog_events.inc -# which the script calls as a part of the test flow. -# The goal is to print the event demonstrating the triggered error, -# so normally Y should be 1 (print the exact event only); -# however, depending on test-specific server options, the offset X -# can be different. -# - -# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error -# when generating new name. -# -# WHY -# === -# -# We want to check whether error is reported or not when -# new_file_impl fails (this may happen when rotation is not -# possible because there is some problem finding an -# unique filename). -# -# HOW -# === -# -# Test cases are documented inline. - --- source include/have_innodb.inc --- source include/have_debug.inc --- source include/master-slave.inc - --- echo ####################################################################### --- echo ####################### PART 1: MASTER TESTS ########################## --- echo ####################################################################### - - -### ACTION: stopping slave as it is not needed for the first part of -### the test - --- connection slave --- source include/stop_slave.inc --- connection master - -call mtr.add_suppression("Can't generate a unique log-filename"); -call mtr.add_suppression("Writing one row to the row-based binary log failed.*"); -call mtr.add_suppression("Error writing file .*"); -call mtr.add_suppression("Could not use master-bin for logging"); - -SET @old_debug= @@global.debug_dbug; - -### ACTION: create a large file (> 4096 bytes) that will be later used -### in LOAD DATA INFILE to check binlog errors in its vacinity --- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data --- let $MYSQLD_DATADIR= `select @@datadir` --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file' - -### ACTION: create a small file (< 4096 bytes) that will be later used -### in LOAD DATA INFILE to check for absence of binlog errors -### when file loading this file does not force flushing and -### rotating the binary log --- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data --- let $MYSQLD_DATADIR= `select @@datadir` --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2' - -RESET MASTER; - --- echo ###################### TEST #1 - -### ASSERTION: no problem flushing logs (should show two binlogs) -FLUSH LOGS; --- echo # assert: must show two binlogs --- source include/show_binary_logs.inc - --- echo ###################### TEST #2 - -### ASSERTION: check that FLUSH LOGS actually fails and reports -### failure back to the user if find_uniq_filename fails -### (should show just one binlog) - -RESET MASTER; -SET @@global.debug_dbug="d,error_unique_log_filename"; --- error ER_NO_UNIQUE_LOGFILE -FLUSH LOGS; --- echo # assert: must show one binlog --- source include/show_binary_logs.inc - -### ACTION: clean up and move to next test -SET @@global.debug_dbug=@old_debug; -RESET MASTER; - --- echo ###################### TEST #3 - -### ACTION: create some tables (t1, t2, t4) and insert some values in -### table t1 -CREATE TABLE t1 (a INT); -CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB; -CREATE TABLE t4 (a VARCHAR(16384)); -INSERT INTO t1 VALUES (1); -RESET MASTER; - -### ASSERTION: we force rotation of the binary log because it exceeds -### the max_binlog_size option (should show two binary -### logs) - --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 - -# shows two binary logs --- echo # assert: must show two binlog --- source include/show_binary_logs.inc - -# clean up the table and the binlog to be used in next part of test -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #4 - -### ASSERTION: load the big file into a transactional table and check -### that it reports error. The table will contain the -### changes performed despite the fact that it reported an -### error. - -SET @@global.debug_dbug="d,error_unique_log_filename"; --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 - -# show table --- echo # assert: must show one entry -SELECT count(*) FROM t2; - -# clean up the table and the binlog to be used in next part of test -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #5 - -### ASSERTION: load the small file into a transactional table and -### check that it succeeds - -SET @@global.debug_dbug="d,error_unique_log_filename"; --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2 - -# show table --- echo # assert: must show one entry -SELECT count(*) FROM t2; - -# clean up the table and the binlog to be used in next part of test -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #6 - -### ASSERTION: check that even if one is using a transactional table -### and explicit transactions (no autocommit) if rotation -### fails we get the error. Transaction is not rolledback -### because rotation happens after the commit. - -SET @@global.debug_dbug="d,error_unique_log_filename"; -SET AUTOCOMMIT=0; -INSERT INTO t2 VALUES ('muse'); --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 -INSERT INTO t2 VALUES ('muse'); --- error ER_NO_UNIQUE_LOGFILE -COMMIT; - -### ACTION: Show the contents of the table after the test --- echo # assert: must show three entries -SELECT count(*) FROM t2; - -### ACTION: clean up and move to the next test -SET AUTOCOMMIT= 1; -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #7 - -### ASSERTION: check that on a non-transactional table, if rotation -### fails then an error is reported and an incident event -### is written to the current binary log. - -SET @@global.debug_dbug="d,error_unique_log_filename"; - -# Disable logging Annotate_rows events to preserve events count. -let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; -SET @@binlog_annotate_row_events= 0; - -SELECT count(*) FROM t4; --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 - --- echo # assert: must show 1 entry -SELECT count(*) FROM t4; - --- echo ### check that the incident event is written to the current log -SET @@global.debug_dbug=@old_debug; -if (!$binlog_limit) -{ - -- let $binlog_limit= 4,1 -} --- source include/show_binlog_events.inc - -# clean up and move to next test -DELETE FROM t4; - ---disable_query_log -eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; ---enable_query_log - -RESET MASTER; - --- echo ###################### TEST #8 - -### ASSERTION: check that statements end up in error but they succeed -### on changing the data. - -SET @@global.debug_dbug="d,error_unique_log_filename"; --- echo # must show 0 entries -SELECT count(*) FROM t4; -SELECT count(*) FROM t2; - --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 --- error ER_NO_UNIQUE_LOGFILE -INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'); - --- echo # INFO: Count(*) Before Offending DELETEs --- echo # assert: must show 1 entry -SELECT count(*) FROM t4; --- echo # assert: must show 4 entries -SELECT count(*) FROM t2; - --- error ER_NO_UNIQUE_LOGFILE -DELETE FROM t4; --- error ER_NO_UNIQUE_LOGFILE -DELETE FROM t2; - --- echo # INFO: Count(*) After Offending DELETEs --- echo # assert: must show zero entries -SELECT count(*) FROM t4; -SELECT count(*) FROM t2; - -# remove fault injection -SET @@global.debug_dbug=@old_debug; - --- echo ###################### TEST #9 - -### ASSERTION: check that if we disable binlogging, then statements -### succeed. -SET @@global.debug_dbug="d,error_unique_log_filename"; -SET SQL_LOG_BIN=0; -INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd'); -INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh'); --- echo # assert: must show four entries -SELECT count(*) FROM t2; -SELECT count(*) FROM t4; -DELETE FROM t2; -DELETE FROM t4; --- echo # assert: must show zero entries -SELECT count(*) FROM t2; -SELECT count(*) FROM t4; -SET SQL_LOG_BIN=1; -SET @@global.debug_dbug=@old_debug; - --- echo ###################### TEST #10 - -### ASSERTION: check that error is reported if there is a failure -### while registering the index file and the binary log -### file or failure to write the rotate event. - -call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); -call mtr.add_suppression("Could not use .*"); - -RESET MASTER; -SHOW WARNINGS; - -# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open -SET @@global.debug_dbug="d,fault_injection_registering_index"; --- replace_regex /\.[\\\/]master/master/ --- error ER_CANT_OPEN_FILE -FLUSH LOGS; -SET @@global.debug_dbug=@old_debug; - --- error ER_NO_BINARY_LOGGING -SHOW BINARY LOGS; - -# issue some statements and check that they don't fail -CREATE TABLE t5 (a INT); -INSERT INTO t4 VALUES ('bbbbb'); -INSERT INTO t2 VALUES ('aaaaa'); -DELETE FROM t4; -DELETE FROM t2; -DROP TABLE t5; -flush tables; - --- echo ###################### TEST #11 - -### ASSERTION: check that error is reported if there is a failure -### while opening the index file and the binary log file or -### failure to write the rotate event. - -# restart the server so that we have binlog again ---let $rpl_server_number= 1 ---source include/rpl_restart_server.inc - -# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file -SET @@global.debug_dbug="d,fault_injection_openning_index"; --- replace_regex /\.[\\\/]master/master/ --- error ER_CANT_OPEN_FILE -FLUSH LOGS; -SET @@global.debug_dbug=@old_debug; - --- error ER_FLUSH_MASTER_BINLOG_CLOSED -RESET MASTER; - -# issue some statements and check that they don't fail -CREATE TABLE t5 (a INT); -INSERT INTO t4 VALUES ('bbbbb'); -INSERT INTO t2 VALUES ('aaaaa'); -DELETE FROM t4; -DELETE FROM t2; -DROP TABLE t5; -flush tables; - -# restart the server so that we have binlog again ---let $rpl_server_number= 1 ---source include/rpl_restart_server.inc - --- echo ###################### TEST #12 - -### ASSERTION: check that error is reported if there is a failure -### while writing the rotate event when creating a new log -### file. - -# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl -SET @@global.debug_dbug="d,fault_injection_new_file_rotate_event"; --- error ER_ERROR_ON_WRITE -FLUSH LOGS; -SET @@global.debug_dbug=@old_debug; - --- error ER_FLUSH_MASTER_BINLOG_CLOSED -RESET MASTER; - -# issue some statements and check that they don't fail -CREATE TABLE t5 (a INT); -INSERT INTO t4 VALUES ('bbbbb'); -INSERT INTO t2 VALUES ('aaaaa'); -DELETE FROM t4; -DELETE FROM t2; -DROP TABLE t5; -flush tables; - -# restart the server so that we have binlog again ---let $rpl_server_number= 1 ---source include/rpl_restart_server.inc - -## clean up -DROP TABLE t1, t2, t4; -RESET MASTER; - -# restart slave again --- connection slave --- source include/start_slave.inc --- connection master - --- echo ####################################################################### --- echo ####################### PART 2: SLAVE TESTS ########################### --- echo ####################################################################### - -### setup ---source include/rpl_reset.inc --- connection slave - -# Slave tests rely on logic of non-gtid mode ---source include/stop_slave.inc -CHANGE MASTER TO MASTER_USE_GTID=NO; ---source include/start_slave.inc - -# slave suppressions - -call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*"); -call mtr.add_suppression("Error writing file .*"); -call mtr.add_suppression("Could not use .*"); -call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); -call mtr.add_suppression("Can't generate a unique log-filename .*"); --- echo ###################### TEST #13 - -#### ASSERTION: check against unique log filename error --- let $io_thd_injection_fault_flag= error_unique_log_filename --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - --- echo ###################### TEST #14 - -#### ASSERTION: check against rotate failing --- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - --- echo ###################### TEST #15 - -#### ASSERTION: check against relay log open failure --- let $io_thd_injection_fault_flag= fault_injection_registering_index --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - --- echo ###################### TEST #16 - -#### ASSERTION: check against relay log index open failure --- let $io_thd_injection_fault_flag= fault_injection_openning_index --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - -### clean up --- source include/stop_slave_sql.inc -RESET SLAVE; -RESET MASTER; ---remove_file $load_file ---remove_file $load_file2 ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_cant_read_event_incident.inc b/mysql-test/suite/rpl/include/rpl_cant_read_event_incident.inc deleted file mode 100644 index 7c1d0ea25e2dc..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_cant_read_event_incident.inc +++ /dev/null @@ -1,88 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# -# Bug#11747416 : 32228 A disk full makes binary log corrupt. -# -# -# The test demonstrates reading from binlog error propagation to slave -# and reporting there. -# Conditions for the bug include a crash at time of the last event to -# the binlog was written partly. With the fixes the event is not sent out -# any longer, but rather the dump thread sends out a sound error message. -# -# Crash is not simulated. A binlog with partly written event in its end is installed -# and replication is started from it. -# - ---source include/have_binlog_format_mixed.inc ---source include/master-slave.inc - ---connection slave ---source include/stop_slave.inc -CHANGE MASTER TO MASTER_USE_GTID=NO; ---source include/start_slave.inc - -# Make sure the slave is stopped while we are messing with master. -# Otherwise we get occasional failures as the slave manages to re-connect -# to the newly started master and we get extra events applied, causing -# conflicts. ---source include/stop_slave.inc - ---connection master -call mtr.add_suppression("Error in Log_event::read_log_event()"); ---let $datadir= `SELECT @@datadir` - ---let $rpl_server_number= 1 ---source include/rpl_stop_server.inc - ---remove_file $datadir/master-bin.000001 ---copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001 - ---let $rpl_server_number= 1 ---source include/rpl_start_server.inc - ---source include/wait_until_connected_again.inc - -# evidence of the partial binlog ---error ER_ERROR_WHEN_EXECUTING_COMMAND -show binlog events; - ---connection slave -call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log"); ---let $master_use_gtid_option= No ---source include/reset_slave.inc -start slave; - -# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236 ---let $slave_param=Last_IO_Errno ---let $slave_param_value=1236 ---source include/wait_for_slave_param.inc - ---let $slave_field_result_replace= / at [0-9]*/ at XXX/ ---let $status_items= Last_IO_Errno, Last_IO_Error ---source include/show_slave_status.inc - -# -# Cleanup -# - ---connection master -reset master; - ---connection slave -stop slave; -reset slave; -# Table was created from binlog, it may not be created if SQL thread is running -# slowly and IO thread reaches incident before SQL thread applies it. ---disable_warnings -drop table if exists t; ---enable_warnings -reset master; - ---echo End of the tests ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_checksum.inc b/mysql-test/suite/rpl/include/rpl_checksum.inc deleted file mode 100644 index fc765744b3faa..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_checksum.inc +++ /dev/null @@ -1,342 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# WL2540 replication events checksum -# Testing configuration parameters - ---source include/have_debug.inc ---source include/have_binlog_format_mixed.inc ---source include/master-slave.inc - ---connection slave ---source include/stop_slave.inc -CHANGE MASTER TO MASTER_USE_GTID=NO; ---source include/start_slave.inc - ---connection master -call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log'); -call mtr.add_suppression('Replication event checksum verification failed'); -# due to C failure simulation -call mtr.add_suppression('Relay log write failure: could not queue event from master'); -call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them'); - -# A. read/write access to the global vars: -# binlog_checksum master_verify_checksum slave_sql_verify_checksum - -connection master; - -set @master_save_binlog_checksum= @@global.binlog_checksum; -set @save_master_verify_checksum = @@global.master_verify_checksum; - -select @@global.binlog_checksum as 'must be CRC32 because of the command line option'; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.binlog_checksum as 'no session var'; - -select @@global.master_verify_checksum as 'must be zero because of default'; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.master_verify_checksum as 'no session var'; - -connection slave; - -set @slave_save_binlog_checksum= @@global.binlog_checksum; -set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum; - -select @@global.slave_sql_verify_checksum as 'must be one because of default'; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.slave_sql_verify_checksum as 'no session var'; - -connection master; - -source include/show_binary_logs.inc; -set @@global.binlog_checksum = NONE; -select @@global.binlog_checksum; ---echo *** must be rotations seen *** -source include/show_binary_logs.inc; - -set @@global.binlog_checksum = default; -select @@global.binlog_checksum; - -# testing lack of side-effects in non-effective update of binlog_checksum: -set @@global.binlog_checksum = CRC32; -select @@global.binlog_checksum; -set @@global.binlog_checksum = CRC32; - -set @@global.master_verify_checksum = 0; -set @@global.master_verify_checksum = default; - ---error ER_WRONG_VALUE_FOR_VAR -set @@global.binlog_checksum = ADLER32; ---error ER_WRONG_VALUE_FOR_VAR -set @@global.master_verify_checksum = 2; # the var is of bool type - -connection slave; - -set @@global.slave_sql_verify_checksum = 0; -set @@global.slave_sql_verify_checksum = default; ---error ER_WRONG_VALUE_FOR_VAR -set @@global.slave_sql_verify_checksum = 2; # the var is of bool type - -# -# B. Old Slave to New master conditions -# -# while master does not send a checksum-ed binlog the Old Slave can -# work with the New Master - -connection master; - -set @@global.binlog_checksum = NONE; -create table t1 (a int); - -# testing that binlog rotation preserves opt_binlog_checksum value -flush logs; -flush logs; --- source include/wait_for_binlog_checkpoint.inc -flush logs; - -sync_slave_with_master; -#connection slave; -# checking that rotation on the slave side leaves slave stable -flush logs; -flush logs; -flush logs; -select count(*) as zero from t1; - -source include/stop_slave.inc; - -connection master; -set @@global.binlog_checksum = CRC32; --- source include/wait_for_binlog_checkpoint.inc -insert into t1 values (1) /* will not be applied on slave due to simulation */; - -# instruction to the dump thread - -connection slave; -set @saved_dbug = @@global.debug_dbug; -set @@global.debug_dbug='d,simulate_slave_unaware_checksum'; -start slave; ---let $slave_io_errno= 1236 ---let $show_slave_io_error= 1 -source include/wait_for_slave_io_error.inc; - -select count(*) as zero from t1; - -set @@global.debug_dbug = @saved_dbug; - -connection slave; -source include/start_slave.inc; - -# -# C. checksum failure simulations -# - -# C1. Failure by a client thread -connection master; -set @@global.master_verify_checksum = 1; -set @save_dbug = @@session.debug_dbug; -set @@session.debug_dbug='d,simulate_checksum_test_failure'; ---error ER_ERROR_WHEN_EXECUTING_COMMAND -show binlog events; -SET debug_dbug= @save_dbug; -set @@global.master_verify_checksum = default; - -#connection master; -sync_slave_with_master; - -connection slave; -source include/stop_slave.inc; - -connection master; -create table t2 (a int); -let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1); - -connection slave; - -# C2. Failure by IO thread -# instruction to io thread -set @saved_dbug = @@global.debug_dbug; -set @@global.debug_dbug='d,simulate_checksum_test_failure'; -start slave io_thread; -# When the checksum error is detected, the slave sets error code 1913 -# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately -# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). -# So we usually get 1595, but it is occasionally possible to get 1913. ---let $slave_io_errno= 1595,1913 ---let $show_slave_io_error= 0 -source include/wait_for_slave_io_error.inc; -set @@global.debug_dbug = @saved_dbug; - -# to make IO thread re-read it again w/o the failure -start slave io_thread; -let $slave_param= Read_Master_Log_Pos; -let $slave_param_value= $pos_master; -source include/wait_for_slave_param.inc; - -# C3. Failure by SQL thread -# instruction to sql thread; -set @@global.slave_sql_verify_checksum = 1; - -set @@global.debug_dbug='d,simulate_checksum_test_failure'; - -start slave sql_thread; ---let $slave_sql_errno= 1593 ---let $show_slave_sql_error= 1 -source include/wait_for_slave_sql_error.inc; - -# resuming SQL thread to parse out the event w/o the failure - -set @@global.debug_dbug = @saved_dbug; -source include/start_slave.inc; - -connection master; -sync_slave_with_master; - -#connection slave; -select count(*) as 'must be zero' from t2; - -# -# D. Reset slave, Change-Master, Binlog & Relay-log rotations with -# random value on binlog_checksum on both master and slave -# -connection slave; -stop slave; ---let $master_use_gtid_option= No ---source include/reset_slave.inc - -# randomize slave server's own checksum policy -set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); -flush logs; - -connection master; -set @@global.binlog_checksum= CRC32; -reset master; -flush logs; -create table t3 (a int, b char(5)); - -connection slave; -source include/start_slave.inc; - -connection master; -sync_slave_with_master; - -#connection slave; -select count(*) as 'must be zero' from t3; -source include/stop_slave.inc; ---replace_result $MASTER_MYPORT MASTER_PORT -eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root'; - -connection master; -flush logs; -reset master; -insert into t3 value (1, @@global.binlog_checksum); - -connection slave; -source include/start_slave.inc; -flush logs; - -connection master; -sync_slave_with_master; - -#connection slave; -select count(*) as 'must be one' from t3; - -connection master; -set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); -insert into t3 value (1, @@global.binlog_checksum); -sync_slave_with_master; - -#connection slave; - -#clean-up - -connection master; -drop table t1, t2, t3; -set @@global.binlog_checksum = @master_save_binlog_checksum; -set @@global.master_verify_checksum = @save_master_verify_checksum; - -# -# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540 -# -# Sanity check that verifies that no assertions are triggered because -# of old FD events (generated by versions prior to server released with -# checksums feature) -# -# There is no need for query log, if something wrong this should trigger -# an assertion - ---disable_query_log - -BINLOG ' -MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA== -'; - ---enable_query_log - -#connection slave; -sync_slave_with_master; - - ---echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** - ---connection master - ---source include/wait_for_binlog_checkpoint.inc -CREATE TABLE t4 (a INT PRIMARY KEY); -INSERT INTO t4 VALUES (1); - -SET sql_log_bin=0; -CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); -SET sql_log_bin=1; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET debug_dbug= '+d,binlog_inject_new_name_error'; ---error ER_NO_UNIQUE_LOGFILE -FLUSH LOGS; -SET debug_dbug= @old_dbug; - -INSERT INTO t4 VALUES (2); - ---connection slave ---let $slave_sql_errno= 1590 ---source include/wait_for_slave_sql_error.inc - -# Search the error log for the error message. -# The bug was that 4 garbage bytes were output in the middle of the error -# message; by searching for a pattern that spans that location, we can -# catch the error. -let $log_error_= `SELECT @@GLOBAL.log_error`; -if(!$log_error_) -{ - # MySQL Server on windows is started with --console and thus - # does not know the location of its .err log, use default location - let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; -} ---let SEARCH_FILE= $log_error_ ---let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 ---source include/search_pattern_in_file.inc - -SELECT * FROM t4 ORDER BY a; -STOP SLAVE IO_THREAD; -SET sql_slave_skip_counter= 1; ---source include/start_slave.inc - ---connection master ---save_master_pos - ---connection slave ---sync_with_master -SELECT * FROM t4 ORDER BY a; - - ---connection slave -set @@global.binlog_checksum = @slave_save_binlog_checksum; -set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; - ---echo End of tests - ---connection master -DROP TABLE t4; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_checksum_cache.inc b/mysql-test/suite/rpl/include/rpl_checksum_cache.inc deleted file mode 100644 index e04f618b81ea1..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_checksum_cache.inc +++ /dev/null @@ -1,261 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - --- source include/have_innodb.inc --- source include/master-slave.inc - ---disable_warnings -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t2 set data=repeat.*'a', @act_size.*"); -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*"); ---enable_warnings - -connection master; -set @save_binlog_cache_size = @@global.binlog_cache_size; -set @save_binlog_checksum = @@global.binlog_checksum; -set @save_master_verify_checksum = @@global.master_verify_checksum; -set @@global.binlog_cache_size = 4096; -set @@global.binlog_checksum = CRC32; -set @@global.master_verify_checksum = 1; - -# restart slave to force the dump thread to verify events (on master side) -connection slave; -source include/stop_slave.inc; -source include/start_slave.inc; - -connection master; - -# -# Testing a critical part of checksum handling dealing with transaction cache. -# The cache's buffer size is set to be less than the transaction's footprint -# in binlog. -# -# To verify combined buffer-by-buffer read out of the file and fixing crc per event -# there are the following parts: -# -# 1. the event size is much less than the cache's buffer -# 2. the event size is bigger than the cache's buffer -# 3. the event size if approximately the same as the cache's buffer -# 4. all in above - -# -# 1. the event size is much less than the cache's buffer -# - -flush status; -show status like "binlog_cache_use"; -show status like "binlog_cache_disk_use"; ---disable_warnings -drop table if exists t1; ---enable_warnings - -# -# parameter to ensure the test slightly varies binlog content -# between different invocations -# -let $deviation_size=32; -eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb; - -# Now we are going to create transaction which is long enough so its -# transaction binlog will be flushed to disk... - -delimiter |; -create procedure test.p_init (n int, size int) -begin - while n > 0 do - select round(RAND() * size) into @act_size; - set @data = repeat('a', @act_size); - insert into t1 values(n, @data ); - set n= n-1; - end while; -end| - -delimiter ;| - -let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s - -begin; ---disable_warnings -# todo: check if it is really so. -#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave. -eval call test.p_init($1, $deviation_size); ---enable_warnings -commit; - -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t1, slave:test.t1; -source include/diff_tables.inc; - -# undoing changes with verifying the above once again -connection master; - -begin; -delete from t1; -commit; - -sync_slave_with_master; - - -# -# 2. the event size is bigger than the cache's buffer -# -connection master; - -flush status; -let $t2_data_size= `select 3 * @@global.binlog_cache_size`; -let $t2_aver_size= `select 2 * @@global.binlog_cache_size`; -let $t2_max_rand= `select 1 * @@global.binlog_cache_size`; - -eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb; -let $1=100; ---disable_query_log -begin; -while ($1) -{ - eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; - set @data = repeat('a', @act_size); - insert into t2 set data = @data; - dec $1; -} -commit; ---enable_query_log -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t2, slave:test.t2; -source include/diff_tables.inc; - -# undoing changes with verifying the above once again -connection master; - -begin; -delete from t2; -commit; - -sync_slave_with_master; - -# -# 3. the event size if approximately the same as the cache's buffer -# - -connection master; - -flush status; -let $t3_data_size= `select 2 * @@global.binlog_cache_size`; -let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`; -let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`; - -eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb; - -let $1= 300; ---disable_query_log -begin; -while ($1) -{ - eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; - insert into t3 set data= repeat('a', @act_size); - dec $1; -} -commit; ---enable_query_log -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t3, slave:test.t3; -source include/diff_tables.inc; - -# undoing changes with verifying the above once again -connection master; - -begin; -delete from t3; -commit; - -sync_slave_with_master; - - -# -# 4. all in above -# - -connection master; -flush status; - -delimiter |; -eval create procedure test.p1 (n int) -begin - while n > 0 do - case (select (round(rand()*100) % 3) + 1) - when 1 then - select round(RAND() * $deviation_size) into @act_size; - set @data = repeat('a', @act_size); - insert into t1 values(n, @data); - when 2 then - begin - select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; - insert into t2 set data=repeat('a', @act_size); - end; - when 3 then - begin - select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; - insert into t3 set data= repeat('a', @act_size); - end; - end case; - set n= n-1; - end while; -end| -delimiter ;| - -let $1= 1000; -set autocommit= 0; -begin; ---disable_warnings -eval call test.p1($1); ---enable_warnings -commit; - -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t1, slave:test.t1; -source include/diff_tables.inc; - -let $diff_tables=master:test.t2, slave:test.t2; -source include/diff_tables.inc; - -let $diff_tables=master:test.t3, slave:test.t3; -source include/diff_tables.inc; - - -connection master; - -begin; -delete from t1; -delete from t2; -delete from t3; -commit; - -drop table t1, t2, t3; -set @@global.binlog_cache_size = @save_binlog_cache_size; -set @@global.binlog_checksum = @save_binlog_checksum; -set @@global.master_verify_checksum = @save_master_verify_checksum; -drop procedure test.p_init; -drop procedure test.p1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_corruption.inc b/mysql-test/suite/rpl/include/rpl_corruption.inc deleted file mode 100644 index cd2a1cc7ef207..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_corruption.inc +++ /dev/null @@ -1,181 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -############################################################ -# Purpose: WL#5064 Testing with corrupted events. -# The test emulates the corruption at the vary stages -# of replication: -# - in binlog file -# - in network -# - in relay log -############################################################ - -# -# The tests intensively utilize @@global.debug. Note, -# Bug#11765758 - 58754, -# @@global.debug is read by the slave threads through dbug-interface. -# Hence, before a client thread set @@global.debug we have to ensure that: -# (a) the slave threads are stopped, or (b) the slave threads are in -# sync and waiting. - ---source include/have_debug.inc ---source include/master-slave.inc - ---connection slave ---source include/stop_slave.inc -CHANGE MASTER TO MASTER_USE_GTID=NO; ---source include/start_slave.inc ---connection master - -# Block legal errors for MTR -call mtr.add_suppression('Found invalid event in binary log'); -call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master'); -call mtr.add_suppression('event read from binlog did not pass crc check'); -call mtr.add_suppression('Replication event checksum verification failed'); -call mtr.add_suppression('Event crc check failed! Most likely there is event corruption'); -call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593'); - -SET @old_master_verify_checksum = @@master_verify_checksum; - -# Creating test table/data and set corruption position for testing ---echo # 1. Creating test table/data and set corruption position for testing ---connection master ---echo * insert/update/delete rows in table t1 * -# Corruption algorithm modifies only the first event and -# then will be reset. To avoid checking always the first event -# from binlog (usually it is FD) we randomly execute different -# statements and set position for corruption inside events. - -CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100)); ---disable_query_log -let $i=`SELECT 3+CEILING(10*RAND())`; -let $j=1; -let $pos=0; -while ($i) { - eval INSERT INTO t1 VALUES ($j, 'a', NULL); - if (`SELECT RAND() > 0.7`) - { - eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j; - } - if (`SELECT RAND() > 0.8`) - { - eval DELETE FROM t1 WHERE a = $j; - } - if (!$pos) { - let $pos= query_get_value(SHOW MASTER STATUS, Position, 1); - --sync_slave_with_master - --source include/stop_slave.inc - --disable_query_log - --connection master - } - dec $i; - inc $j; -} ---enable_query_log - - -# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing ---echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS -SET @saved_dbug = @@global.debug_dbug; -SET @@global.debug_dbug="d,corrupt_read_log_event_char"; ---echo SHOW BINLOG EVENTS; ---disable_query_log -send_eval SHOW BINLOG EVENTS FROM $pos; ---enable_query_log ---error ER_ERROR_WHEN_EXECUTING_COMMAND -reap; - -SET @@global.debug_dbug=@saved_dbug; - -# Emulate corruption on master with crc checking on master ---echo # 3. Master read a corrupted event from binlog and send the error to slave - -# We have a rare but nasty potential race here: if the dump thread on -# the master for the _old_ slave connection has not yet discovered -# that the slave has disconnected, we will inject the corrupt event on -# the wrong connection, and the test will fail -# (+d,corrupt_read_log_event2 corrupts only one event). -# So kill any lingering dump thread (we need to kill; otherwise dump thread -# could manage to send all events down the socket before seeing it close, and -# hang forever waiting for new binlog events to be created). -let $id= `select id from information_schema.processlist where command = "Binlog Dump"`; -if ($id) -{ - --disable_query_log - --error 0,1094 - eval kill $id; - --enable_query_log -} -let $wait_condition= - SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump'; ---source include/wait_condition.inc - -SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; ---connection slave -START SLAVE IO_THREAD; -let $slave_io_errno= 1236; ---let $slave_timeout= 10 ---source include/wait_for_slave_io_error.inc ---connection master -SET @@global.debug_dbug=@saved_dbug; - -# Emulate corruption on master without crc checking on master ---echo # 4. Master read a corrupted event from binlog and send it to slave ---connection master -SET GLOBAL master_verify_checksum=0; -SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; ---connection slave -START SLAVE IO_THREAD; -# When the checksum error is detected, the slave sets error code 1743 -# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately -# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). -# So we usually get 1595, but it is occasionally possible to get 1743. -let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE ---source include/wait_for_slave_io_error.inc ---connection master -SET @@global.debug_dbug=@saved_dbug; -SET GLOBAL master_verify_checksum=1; - -# Emulate corruption in network ---echo # 5. Slave. Corruption in network ---connection slave -SET @saved_dbug_slave = @@GLOBAL.debug_dbug; -SET @@global.debug_dbug="d,corrupt_queue_event"; -START SLAVE IO_THREAD; -let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE ---source include/wait_for_slave_io_error.inc -SET @@global.debug_dbug=@saved_dbug_slave; - -# Emulate corruption in relay log ---echo # 6. Slave. Corruption in relay log - -SET @@global.debug_dbug="d,corrupt_read_log_event_char"; - -START SLAVE SQL_THREAD; -let $slave_sql_errno= 1593; ---source include/wait_for_slave_sql_error.inc - -SET @@global.debug_dbug=@saved_dbug_slave; - -# Start normal replication and compare same table on master -# and slave ---echo # 7. Seek diff for tables on master and slave ---connection slave ---source include/start_slave.inc ---connection master ---sync_slave_with_master -let $diff_tables= master:test.t1, slave:test.t1; ---source include/diff_tables.inc - -# Clean up ---echo # 8. Clean up ---connection master -set @@global.debug_dbug = @saved_dbug; -SET GLOBAL master_verify_checksum = @old_master_verify_checksum; -DROP TABLE t1; ---sync_slave_with_master - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_gtid_basic.inc b/mysql-test/suite/rpl/include/rpl_gtid_basic.inc deleted file mode 100644 index 68a5d05ffe947..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_gtid_basic.inc +++ /dev/null @@ -1,572 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/have_innodb.inc ---let $rpl_topology=1->2->3->4 ---source include/rpl_init.inc - -# Set up a 4-deep replication topology, then test various fail-overs -# using GTID. -# -# A -> B -> C -> D - -connection server_1; ---source include/wait_for_binlog_checkpoint.inc ---let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) ---let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) ---echo *** GTID position should be empty here *** ---replace_result $binlog_file $binlog_pos -eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); - -CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; -CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1, "m1"); -INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4"); -INSERT INTO t2 VALUES (1, "i1"); -BEGIN; -INSERT INTO t2 VALUES (2, "i2"), (3, "i3"); -INSERT INTO t2 VALUES (4, "i4"); -COMMIT; -save_master_pos; -source include/wait_for_binlog_checkpoint.inc; ---let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) ---let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) ---let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos` ---echo *** GTID position should be non-empty here *** ---replace_result $binlog_file $binlog_pos $gtid_pos_server_1 -eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); - -connection server_2; -sync_with_master; -source include/wait_for_binlog_checkpoint.inc; ---let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) ---let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) ---echo *** GTID position should be the same as on server_1 *** ---replace_result $binlog_file $binlog_pos $gtid_pos_server_1 -eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; -save_master_pos; - -connection server_3; -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; -save_master_pos; - -connection server_4; -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; - - ---echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A *** -connection server_4; ---source include/stop_slave.inc - -connection server_1; -INSERT INTO t1 VALUES (5, "m1a"); -INSERT INTO t2 VALUES (5, "i1a"); -save_master_pos; - -connection server_4; ---replace_result $MASTER_MYPORT MASTER_PORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, - MASTER_USE_GTID=CURRENT_POS; ---source include/start_slave.inc -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; - ---echo *** Now move B to D (C is still replicating from B) *** -connection server_2; ---source include/stop_slave.inc ---replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, - MASTER_USE_GTID=CURRENT_POS; ---source include/start_slave.inc - -connection server_4; -UPDATE t2 SET b="j1a" WHERE a=5; -save_master_pos; - -connection server_2; -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; - ---echo *** Now move C to D, after letting it fall a little behind *** -connection server_3; ---source include/stop_slave.inc - -connection server_1; -INSERT INTO t2 VALUES (6, "i6b"); -INSERT INTO t2 VALUES (7, "i7b"); ---source include/save_master_gtid.inc - -connection server_3; ---replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, - MASTER_USE_GTID=CURRENT_POS; ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -SELECT * FROM t2 ORDER BY a; - ---echo *** Now change everything back to what it was, to make rpl_end.inc happy -# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled. -connection server_2; -# We need to sync up server_2 before switching. If it happened to have reached -# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to -# server_1, which is (deliberately) missing that transaction. ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc ---replace_result $MASTER_MYPORT MASTER_MYPORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT; ---source include/start_slave.inc ---source include/wait_for_slave_to_start.inc - -connection server_3; ---source include/stop_slave.inc ---replace_result $SLAVE_MYPORT SLAVE_MYPORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT; ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -connection server_4; ---source include/stop_slave.inc ---replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3 -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3; ---source include/start_slave.inc - -connection server_1; -DROP TABLE t1,t2; ---source include/save_master_gtid.inc - ---echo *** A few more checks for BINLOG_GTID_POS function *** ---let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1) ---error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT -SELECT BINLOG_GTID_POS(); ---error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT -SELECT BINLOG_GTID_POS('a'); ---error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT -SELECT BINLOG_GTID_POS('a',1,NULL); -SELECT BINLOG_GTID_POS(1,'a'); -SELECT BINLOG_GTID_POS(NULL,NULL); -SELECT BINLOG_GTID_POS('',1); -SELECT BINLOG_GTID_POS('a',1); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616); - - ---echo *** Some tests of @@GLOBAL.gtid_binlog_state *** ---connection server_2 ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc - ---connection server_1 -SET @old_state= @@GLOBAL.gtid_binlog_state; - ---error ER_BINLOG_MUST_BE_EMPTY -SET GLOBAL gtid_binlog_state = ''; -RESET MASTER; -SET GLOBAL gtid_binlog_state = ''; -FLUSH LOGS; ---source include/show_binary_logs.inc -SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30'; ---source include/show_binary_logs.inc ---let $binlog_file= master-bin.000001 ---let $binlog_start= 4 ---source include/show_binlog_events.inc -#SELECT @@GLOBAL.gtid_binlog_pos; -#SELECT @@GLOBAL.gtid_binlog_state; ---error ER_BINLOG_MUST_BE_EMPTY -SET GLOBAL gtid_binlog_state = @old_state; -RESET MASTER; -SET GLOBAL gtid_binlog_state = @old_state; - -# Check that slave can reconnect again, despite the RESET MASTER, as we -# restored the state. - -CREATE TABLE t1 (a INT PRIMARY KEY); -SET gtid_seq_no=100; -INSERT INTO t1 VALUES (1); ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc -# We cannot just use sync_with_master as we've done RESET MASTER, so -# slave old-style position is wrong. -# So sync on gtid position instead. ---source include/sync_with_master_gtid.inc - -SELECT * FROM t1; -# Check that the IO gtid position in SHOW SLAVE STATUS is also correct. ---let $status_items= Gtid_IO_Pos ---source include/show_slave_status.inc - ---echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() *** - ---connection server_1 -DROP TABLE t1; -CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; ---save_master_pos - ---connection server_2 ---sync_with_master ---source include/stop_slave.inc - ---connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SELECT @@last_gtid; -SET gtid_seq_no=110; -SELECT @@last_gtid; -BEGIN; -SELECT @@last_gtid; -INSERT INTO t1 VALUES (2); -SELECT @@last_gtid; -COMMIT; -SELECT @@last_gtid; ---let $pos= `SELECT @@gtid_binlog_pos` - ---connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -eval SET @pos= '$pos'; -# Check NULL argument. -SELECT master_gtid_wait(NULL); -# Check empty argument returns immediately. -SELECT master_gtid_wait('', NULL); -# Check this gets counted -SHOW STATUS LIKE 'Master_gtid_wait_count'; -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_time'; -# Let's check that we get a timeout -SELECT master_gtid_wait(@pos, 0.5); -SELECT * FROM t1 ORDER BY a; -# Now actually wait until the slave reaches the position -send SELECT master_gtid_wait(@pos); - ---connection server_2 ---source include/start_slave.inc - ---connection s1 -reap; -SELECT * FROM t1 ORDER BY a; - -# Test waiting on a domain that does not exist yet. ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id= 1; -INSERT INTO t1 VALUES (3); ---let $pos= `SELECT @@gtid_binlog_pos` - ---connection s1 ---replace_result $pos POS -eval SET @pos= '$pos'; -SELECT master_gtid_wait(@pos, 0); -SELECT * FROM t1 WHERE a >= 3; -send SELECT master_gtid_wait(@pos, -1); - ---connection server_2 ---source include/start_slave.inc - ---connection s1 -reap; -SELECT * FROM t1 WHERE a >= 3; -# Waiting for only part of the position. -SELECT master_gtid_wait('1-1-1', 0); - -# Now test a lot of parallel master_gtid_wait() calls, completing in different -# order, and some of which time out or get killed on the way. - ---connection s1 -send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110'); - ---connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -# This will time out. No event 0-1-1000 exists -send SELECT master_gtid_wait('0-1-1000', 0.5); - ---connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -# This one we will kill ---let $kill1_id= `SELECT connection_id()` -send SELECT master_gtid_wait('0-1-2000'); - ---connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-10'); - ---connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-6', 1); - -# This one we will kill also. ---connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,) ---let $kill2_id= `SELECT connection_id()` -send SELECT master_gtid_wait('2-1-5'); - ---connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-10'); - ---connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110'); - ---connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-2'); - ---connection server_2 -# This one completes immediately. -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -SELECT master_gtid_wait('1-1-1'); -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); ---replace_result $wait_time MASTER_GTID_WAIT_TIME -eval SET @a= $wait_time; -SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected")) - AS Master_gtid_wait_time_as_expected; - - ---connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('0-1-109'); - ---connection server_2 -# This one should time out. -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -SELECT master_gtid_wait('2-1-2', 0.5); -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); ---replace_result $wait_time MASTER_GTID_WAIT_TIME -eval SET @a= $wait_time; -# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling -# and timer inaccuracies could introduce significant jitter. So allow a -# generous interval. -SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected; - ---replace_result $kill1_id KILL_ID -eval KILL QUERY $kill1_id; ---connection s3 ---error ER_QUERY_INTERRUPTED -reap; - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=2; -INSERT INTO t1 VALUES (4); - ---connection s9 -reap; - ---connection server_2 ---replace_result $kill2_id KILL_ID -eval KILL CONNECTION $kill2_id; - ---connection s6 ---error 2013,ER_CONNECTION_KILLED -reap; - ---connection server_1 -SET gtid_domain_id=1; -SET gtid_seq_no=4; -INSERT INTO t1 VALUES (5); -SET gtid_domain_id=2; -SET gtid_seq_no=5; -INSERT INTO t1 VALUES (6); - ---connection s8 -reap; ---connection s1 -reap; ---connection s2 -reap; ---connection s5 -reap; ---connection s10 -reap; - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=10; -INSERT INTO t1 VALUES (7); - ---connection s4 -reap; ---connection s7 -reap; - - ---echo *** Test gtid_slave_pos when used with GTID *** - ---connection server_2 ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1000; -INSERT INTO t1 VALUES (10); -INSERT INTO t1 VALUES (11); ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 1; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1010; -INSERT INTO t1 VALUES (12); -INSERT INTO t1 VALUES (13); ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 2; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1020; -INSERT INTO t1 VALUES (14); -INSERT INTO t1 VALUES (15); -INSERT INTO t1 VALUES (16); ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 3; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1030; -# Disable logging Annotate_rows events to preserve events count. -let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; -SET @@binlog_annotate_row_events= 0; -INSERT INTO t1 VALUES (17); -INSERT INTO t1 VALUES (18); -INSERT INTO t1 VALUES (19); -eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 5; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=3; -SET gtid_seq_no=100; -CREATE TABLE t2 (a INT PRIMARY KEY); -DROP TABLE t2; -SET gtid_domain_id=2; -SET gtid_seq_no=1040; -INSERT INTO t1 VALUES (20); ---save_master_pos - ---connection server_2 -SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; -SET GLOBAL slave_ddl_exec_mode=STRICT; -SET sql_slave_skip_counter=1; -START SLAVE UNTIL master_gtid_pos="3-1-100"; ---let $master_pos=3-1-100 ---source include/sync_with_master_gtid.inc ---source include/wait_for_slave_to_stop.inc ---error ER_NO_SUCH_TABLE -SELECT * FROM t2; -SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - -# Start the slave again, it should fail on the DROP TABLE as the table is not there. -SET sql_log_bin=0; -CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); -SET sql_log_bin=1; -START SLAVE; ---let $slave_sql_errno=1051 ---source include/wait_for_slave_sql_error.inc -SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - -STOP SLAVE IO_THREAD; -SET sql_slave_skip_counter=2; ---source include/start_slave.inc ---sync_with_master - -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; -SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - -SET GLOBAL slave_ddl_exec_mode= @saved_mode; - - ---echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. *** - -# Create an out-of-order binlog on server 2. -# Let server 3 replicate to an out-of-order point, stop it, restart it, -# and check that it replicates correctly despite the out-of-order. - ---connection server_1 -SET gtid_domain_id= @@GLOBAL.gtid_domain_id; -INSERT INTO t1 VALUES (31); ---save_master_pos - ---connection server_2 ---sync_with_master -SET gtid_domain_id= @@GLOBAL.gtid_domain_id; -INSERT INTO t1 VALUES (32); - ---connection server_1 -INSERT INTO t1 VALUES (33); ---save_master_pos - ---connection server_2 ---sync_with_master ---save_master_pos - ---connection server_3 ---sync_with_master ---source include/stop_slave.inc - ---connection server_1 -INSERT INTO t1 VALUES (34); ---save_master_pos - ---connection server_2 ---sync_with_master ---save_master_pos - ---connection server_3 ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 30 ORDER BY a; ---save_master_pos - ---connection server_4 ---sync_with_master -SELECT * FROM t1 WHERE a >= 30 ORDER BY a; - - -# Clean up. ---connection server_1 -DROP TABLE t1; - - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_incident.inc b/mysql-test/suite/rpl/include/rpl_incident.inc deleted file mode 100644 index 75d28d6a6c6b0..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_incident.inc +++ /dev/null @@ -1,61 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/have_debug.inc ---source include/master-slave.inc - -SET @old_binlog_checksum=@@binlog_checksum; -SET GLOBAL BINLOG_CHECKSUM=none; -connection slave; -SET @old_binlog_checksum=@@binlog_checksum; -SET GLOBAL BINLOG_CHECKSUM=none; -connection master; - ---echo **** On Master **** -CREATE TABLE t1 (a INT); - -INSERT INTO t1 VALUES (1),(2),(3); -SELECT * FROM t1; - -set @saved_dbug = @@global.debug_dbug; -SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*'; - -# This will generate an incident log event and store it in the binary -# log before the replace statement. -REPLACE INTO t1 VALUES (4); ---save_master_pos -SELECT * FROM t1; - -set @@global.debug_dbug = @saved_dbug; - -connection slave; -# Wait until SQL thread stops with error LOST_EVENT on master -call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590"); -let $slave_sql_errno= 1590; -let $show_slave_sql_error= 1; -source include/wait_for_slave_sql_error.inc; - -# The 4 should not be inserted into the table, since the incident log -# event should have stop the slave. ---echo **** On Slave **** -SELECT * FROM t1; - -SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; -START SLAVE; ---sync_with_master - -# Now, we should have inserted the row into the table and the slave -# should be running. We should also have rotated to a new binary log. - -SELECT * FROM t1; -source include/check_slave_is_running.inc; - -connection master; -SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; -DROP TABLE t1; ---sync_slave_with_master -SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_init_slave_errors.inc b/mysql-test/suite/rpl/include/rpl_init_slave_errors.inc deleted file mode 100644 index 46673ea47649f..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_init_slave_errors.inc +++ /dev/null @@ -1,96 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -###################################################################### -# Some errors that cause the slave SQL thread to stop are not shown in -# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error -# is only in the server's error log. -# -# Two failures and their respective reporting are verified: -# -# 1 - Failures during slave thread initialization -# 2 - Failures while processing queries passed through the init_slave -# option. -# -# In order to check the first type of failure, we inject a fault in the -# SQL/IO Threads through SET GLOBAL debug. -# -# To check the second type, we set @@global.init_slave to an invalid -# command thus preventing the initialization of the SQL Thread. -# -# Obs: -# 1 - Note that testing failures while initializing the relay log position -# is hard as the same function is called before the code reaches the point -# that we want to test. -# -# 2 - This test does not target failures that are reported while applying -# events such as duplicate keys, errors while reading the relay-log.bin*, -# etc. Such errors are already checked on other tests. -###################################################################### - -###################################################################### -# Configuring the Environment -###################################################################### -source include/have_debug.inc; -source include/have_log_bin.inc; -source include/master-slave.inc; - -connection slave; - ---disable_warnings -stop slave; ---enable_warnings -reset slave; - -###################################################################### -# Injecting faults in the threads' initialization -###################################################################### -connection slave; - -# Set debug flags on slave to force errors to occur -set @saved_dbug = @@global.debug_dbug; -SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init"; - -start slave; - -# -# slave is going to stop because of emulated failures -# but there won't be any crashes nor asserts hit. -# -# 1593 = ER_SLAVE_FATAL_ERROR ---let $slave_sql_errno= 1593 ---let $show_slave_sql_error= 1 ---source include/wait_for_slave_sql_error.inc - -call mtr.add_suppression("Failed during slave.* thread initialization"); - -set @@global.debug_dbug = @saved_dbug; - -###################################################################### -# Injecting faults in the init_slave option -###################################################################### -connection slave; - -reset slave; - -SET GLOBAL init_slave= "garbage"; - -start slave; -# 1064 = ER_PARSE_ERROR ---let $slave_sql_errno= 1064 ---let $show_slave_sql_error= 1 ---source include/wait_for_slave_sql_error.inc - -###################################################################### -# Clean up -###################################################################### -SET GLOBAL init_slave= ""; - -# Clean up Last_SQL_Error ---source include/stop_slave_io.inc -RESET SLAVE; ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_loadfile.inc b/mysql-test/suite/rpl/include/rpl_loadfile.inc deleted file mode 100644 index 9cd645306908c..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_loadfile.inc +++ /dev/null @@ -1,120 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -############################################################################# -# Original Author: JBM # -# Original Date: Aug/18/2005 # -############################################################################# -# TEST: To test the LOAD_FILE() in rbr # -############################################################################# -# Change Author: JBM -# Change Date: 2006-01-16 -########## - -# Includes --- source include/have_binlog_format_mixed_or_row.inc --- source include/master-slave.inc - --- source suite/rpl/include/rpl_loadfile.test - -# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE -# -# DESCRIPTION -# -# Problem: when using load_file string function and mixed binlogging format -# there was no switch to row based binlogging format. This leads -# to scenarios on which the slave replicates the statement and it -# will try to load the file from local file system, which in most -# likely it will not exist. -# -# Solution: -# Marking this function as unsafe for statement format, makes the -# statement using it to be logged in row based format. As such, data -# replicated from the master, becomes the content of the loaded file. -# Consequently, the slave receives the necessary data to complete -# the load_file instruction correctly. -# -# IMPLEMENTATION -# -# The test is implemented as follows: -# -# On Master, -# i) write to file the desired content. -# ii) create table and stored procedure with load_file -# iii) stop slave -# iii) execute load_file -# iv) remove file -# -# On Slave, -# v) start slave -# vi) sync it with master so that it gets the updates from binlog (which -# should have bin logged in row format). -# -# If the the binlog format does not change to row, then the assertion -# done in the following step fails. This happens because tables differ -# since the file does not exist anymore, meaning that when slave -# attempts to execute LOAD_FILE statement it inserts NULL on table -# instead of the same contents that the master loaded when it executed -# the procedure (which was executed when file existed). -# -# vii) assert that the contents of master and slave -# table are the same - ---source include/rpl_reset.inc - -connection master; -let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data; - ---replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---eval SELECT repeat('x',20) INTO OUTFILE '$file' - -disable_warnings; -DROP TABLE IF EXISTS t1; -enable_warnings; - -CREATE TABLE t1 (t text); -DELIMITER |; -CREATE PROCEDURE p(file varchar(4096)) - BEGIN - INSERT INTO t1 VALUES (LOAD_FILE(file)); - END| -DELIMITER ;| - -# stop slave before issuing the load_file on master -connection slave; -source include/stop_slave.inc; - -connection master; - -# test: check that logging falls back to rbr. ---replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---eval CALL p('$file') - -# test: remove the file from the filesystem and assert that slave still -# gets the loaded file -remove_file $file; - -# now that the file is removed it is safe (regarding what we want to test) -# to start slave -connection slave; -source include/start_slave.inc; - -connection master; -sync_slave_with_master; - -# assertion: assert that the slave got the updates even -# if the file was removed before the slave started, -# meaning that contents were indeed transfered -# through binlog (in row format) -let $diff_tables= master:t1, slave:t1; -source include/diff_tables.inc; - -# CLEAN UP ---connection master -DROP TABLE t1; -DROP PROCEDURE p; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_packet.inc b/mysql-test/suite/rpl/include/rpl_packet.inc deleted file mode 100644 index cbde486bcbbda..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_packet.inc +++ /dev/null @@ -1,184 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# ==== Purpose ==== -# -# Check replication protocol packet size handling -# -# ==== Related bugs ==== -# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave -# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits -# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors -# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET - -# max-out size db name -source include/have_binlog_format_row.inc; -source include/master-slave.inc; - -call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); -call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); -let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; -disable_warnings; -eval drop database if exists $db; -enable_warnings; -eval create database $db; - -connection master; -let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`; -let $old_net_buffer_length= `SELECT @@global.net_buffer_length`; -let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`; -SET @@global.max_allowed_packet=1024; -SET @@global.net_buffer_length=1024; - -sync_slave_with_master; -# Restart slave for setting to take effect -source include/stop_slave.inc; -source include/start_slave.inc; - -# Reconnect to master for new setting to take effect -disconnect master; - -# alas, can't use eval here; if db name changed apply the change here -connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________); - -connection master; -select @@net_buffer_length, @@max_allowed_packet; - -create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM; - -INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023'); -sync_slave_with_master; - -eval select count(*) from `$db`.`t1` /* must be 1 */; - -SHOW STATUS LIKE 'Slave_running'; -select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING'; -connection master; -eval drop database $db; -sync_slave_with_master; - -# -# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits -# -# Check that a situation when the size of event on the master is greater than -# max_allowed_packet on the slave does not lead to infinite re-transmits. - -connection master; - -# Change the max packet size on master - -SET @@global.max_allowed_packet=4096; -SET @@global.net_buffer_length=4096; - -# Restart slave for new setting to take effect -connection slave; -source include/stop_slave.inc; -source include/start_slave.inc; - -# Reconnect to master for new setting to take effect -disconnect master; -connect (master, localhost, root); -connection master; - -CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM; - -sync_slave_with_master; - -connection master; - -INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048'); - - -# -# Bug#42914: The slave I/O thread must stop after trying to read the above -# event, However there is no Last_IO_Error report. -# - -# The slave I/O thread must stop after trying to read the above event -connection slave; -# 1153 = ER_NET_PACKET_TOO_LARGE ---let $slave_io_errno= 1153 ---let $show_slave_io_error= 1 ---source include/wait_for_slave_io_error.inc - -# TODO: this is needed because of BUG#55790. Remove once that is fixed. ---source include/stop_slave_sql.inc - -# -# Bug#42914: On the master, if a binary log event is larger than -# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG -# is sent to a slave when it requests a dump from the master, thus leading the -# I/O thread to stop. However, there is no Last_IO_Error reported. -# - ---let $rpl_only_running_threads= 1 ---source include/rpl_reset.inc ---connection master -DROP TABLE t1; ---sync_slave_with_master - - -connection master; -CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM; -sync_slave_with_master; - -connection master; -INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet)); - -connection slave; -# The slave I/O thread must stop after receiving -# 1153 = ER_NET_PACKET_TOO_LARGE ---let $slave_io_errno= 1153 ---let $show_slave_io_error= 1 ---source include/wait_for_slave_io_error.inc - -# Remove the bad binlog and clear error status on slave. -STOP SLAVE; -RESET SLAVE; ---connection master -RESET MASTER; - - -# -# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET -# -# In BUG#55322, @@session.max_allowed_packet increased each time SHOW -# BINLOG EVENTS was issued. To verify that this bug is fixed, we -# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet -# never changes. We turn off the result log because we don't care -# about the contents of the binlog. - ---disable_result_log -SET @max_allowed_packet_0= @@session.max_allowed_packet; -SHOW BINLOG EVENTS; -SET @max_allowed_packet_1= @@session.max_allowed_packet; -SHOW BINLOG EVENTS; -SET @max_allowed_packet_2= @@session.max_allowed_packet; ---enable_result_log -if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`) -{ - --echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS - --source include/show_rpl_debug_info.inc - SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2; - --die @max_allowed_packet changed after executing SHOW BINLOG EVENTS -} - - ---echo ==== clean up ==== -connection master; -DROP TABLE t1; -eval SET @@global.max_allowed_packet= $old_max_allowed_packet; -eval SET @@global.net_buffer_length= $old_net_buffer_length; -eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet; -# slave is stopped -connection slave; -DROP TABLE t1; - -# Clear Last_IO_Error -RESET SLAVE; - ---source include/rpl_end.inc -# End of tests diff --git a/mysql-test/suite/rpl/include/rpl_parallel_ignored_errors.inc b/mysql-test/suite/rpl/include/rpl_parallel_ignored_errors.inc deleted file mode 100644 index 493385f1ae304..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_parallel_ignored_errors.inc +++ /dev/null @@ -1,115 +0,0 @@ -# ==== Purpose ==== -# -# Test verifies that, in parallel replication, transaction failure notification -# is propagated to all the workers. Workers should abort the execution of -# transaction event groups, whose event positions are higher than the failing -# transaction group. -# -# ==== Implementation ==== -# -# Steps: -# 0 - Create a table t1 on master which has a primary key. Enable parallel -# replication on slave with slave_parallel_mode='optimistic' and -# slave_parallel_threads=3. -# 1 - On slave start a transaction and execute a local INSERT statement -# which will insert value 32. This is done to block the INSERT coming -# from master. -# 2 - On master execute an INSERT statement with value 32, so that it is -# blocked on slave. -# 3 - On slave enable a debug sync point such that it holds the worker thread -# execution as soon as work is scheduled to it. -# 4 - INSERT value 33 on master. It will be held on slave by other worker -# thread due to debug simulation. -# 5 - INSERT value 34 on master. -# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for -# its prior transactions to commit. -# 7 - Commit the local INSERT 32 on slave server so that first worker will -# error out. -# 8 - Now send a continue signal to second worker processing 33. It should -# wakeup and propagate the error to INSERT 34. -# 9 - Upon slave stop due to error, check that no rows are found after the -# failed INSERT 32. -# -# ==== References ==== -# -# MDEV-20645: Replication consistency is broken as workers miss the error -# notification from an earlier failed group. -# - ---source include/have_innodb.inc ---source include/have_debug.inc ---source include/have_debug_sync.inc ---source include/have_binlog_format_statement.inc ---source include/master-slave.inc - ---enable_connect_log ---connection server_2 ---source include/stop_slave.inc -SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; -SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; -SET @old_debug= @@GLOBAL.debug_dbug; -SET GLOBAL slave_parallel_mode='optimistic'; -SET GLOBAL slave_parallel_threads= 3; -CHANGE MASTER TO master_use_gtid=slave_pos; -CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); ---source include/start_slave.inc - ---connection server_1 -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB; -# MDEV-515 takes X-lock on the table for the first insert. -# So concurrent insert won't happen on the table -INSERT INTO t1 VALUES(1); ---source include/save_master_gtid.inc - ---connection server_2 ---source include/sync_with_master_gtid.inc - ---connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -BEGIN; -INSERT INTO t1 VALUES (32); - ---connection server_1 -INSERT INTO t1 VALUES (32); - ---connection server_2 ---let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)" ---source include/wait_condition.inc -SET GLOBAL debug_dbug="+d,hold_worker_on_schedule"; -SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker"; - ---connection server_1 -SET gtid_seq_no=100; -INSERT INTO t1 VALUES (33); - ---connection server_2 -SET debug_sync='now WAIT_FOR reached_pause'; - ---connection server_1 -INSERT INTO t1 VALUES (34); - ---connection server_2 ---let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit" ---source include/wait_condition.inc ---connection con_temp2 -COMMIT; - -# Clean up. ---connection server_2 ---source include/stop_slave.inc ---let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32 ---let $assert_text= table t1 should have zero rows where a>32 ---source include/assert.inc -SELECT * FROM t1 WHERE a>32; -DELETE FROM t1 WHERE a=32; - -SET GLOBAL slave_parallel_threads=@old_parallel_threads; -SET GLOBAL slave_parallel_mode=@old_parallel_mode; -SET GLOBAL debug_dbug=@old_debug; -SET DEBUG_SYNC= 'RESET'; ---source include/start_slave.inc - ---connection server_1 -DROP TABLE t1; ---disable_connect_log ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc b/mysql-test/suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc deleted file mode 100644 index cddc9286bd28d..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc +++ /dev/null @@ -1,38 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER -# -# The function mysql_show_binlog_events has a local stack variable -# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however -# this variable goes out of scope and is destroyed before clean -# thd->current_linfo. -# -# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure -# that with the fix local variable linfo is valid along all -# mysql_show_binlog_events function scope. -# ---source include/have_debug.inc ---source include/have_debug_sync.inc ---source include/master-slave.inc - ---connection slave -SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end'; ---send SHOW BINLOG EVENTS - ---connection slave1 -SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events'; -FLUSH LOGS; -SET DEBUG_SYNC= 'now SIGNAL end'; - ---connection slave ---disable_result_log ---reap ---enable_result_log -SET DEBUG_SYNC= 'RESET'; - ---connection master ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_relayrotate.inc b/mysql-test/suite/rpl/include/rpl_relayrotate.inc deleted file mode 100644 index 4de554d314359..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_relayrotate.inc +++ /dev/null @@ -1,18 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -####################################################### -# Wrapper for rpl_relayrotate.test to allow multi # -# Engines to reuse test code. By JBM 2006-02-15 # -####################################################### --- source include/have_innodb.inc -# Slow test, don't run during staging part --- source include/not_staging.inc --- source include/master-slave.inc - -let $engine_type=innodb; --- source suite/rpl/include/rpl_relayrotate.test ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_semi_sync.inc b/mysql-test/suite/rpl/include/rpl_semi_sync.inc deleted file mode 100644 index 720ec05935056..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_semi_sync.inc +++ /dev/null @@ -1,532 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -source include/not_embedded.inc; -source include/have_innodb.inc; -source include/master-slave.inc; - -let $engine_type= InnoDB; - -# Suppress warnings that might be generated during the test -connection master; -call mtr.add_suppression("Timeout waiting for reply of binlog"); -call mtr.add_suppression("Read semi-sync reply"); -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); -call mtr.add_suppression("mysqld: Got an error reading communication packets"); -connection slave; -# While 'Current_Pos' exists as an option for Using_Gtd, keeping these -# events in the binlog will update gtid_binlog_pos, and the later calls to -# set `@@global.gtid_slave_pos= ""` will provide warning messages with -# inconsistent GTID values because the seq_nos are non-deterministic with -# the masters events coming in concurrently -set sql_log_bin=0; -call mtr.add_suppression("Master server does not support semi-sync"); -call mtr.add_suppression("Semi-sync slave .* reply"); -call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group"); -set sql_log_bin=1; -connection master; - -# wait for dying connections (if any) to disappear -let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed'; ---source include/wait_condition.inc - -# After fix of BUG#45848, semi-sync slave should not create any extra -# connections on master, save the count of connections before start -# semi-sync slave for comparison below. -let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); - ---echo # ---echo # Uninstall semi-sync plugins on master and slave ---echo # -connection slave; -source include/stop_slave.inc; ---source include/reset_slave.inc -set global rpl_semi_sync_master_enabled= 0; -set global rpl_semi_sync_slave_enabled= 0; - -connection master; -reset master; -set global rpl_semi_sync_master_enabled= 0; -set global rpl_semi_sync_slave_enabled= 0; - ---echo # ---echo # Main test of semi-sync replication start here ---echo # - -connection master; - -set global rpl_semi_sync_master_timeout= 60000; # 60s - -echo [ default state of semi-sync on master should be OFF ]; -show variables like 'rpl_semi_sync_master_enabled'; - -echo [ enable semi-sync on master ]; -set global rpl_semi_sync_master_enabled = 1; -show variables like 'rpl_semi_sync_master_enabled'; - -echo [ status of semi-sync on master should be ON even without any semi-sync slaves ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed ---echo # BUG#45673 Semisynch reports correct operation even if no slave is connected ---echo # - -# BUG#45672 When semi-sync is enabled on master, it would allocate -# transaction node even without semi-sync slave connected, and would -# finally result in transaction node allocation error. -# -# Semi-sync master will pre-allocate 'max_connections' transaction -# nodes, so here we do more than that much transactions to check if it -# will fail or not. -# select @@global.max_connections + 1; -let $i= `select @@global.max_connections + 1`; -disable_query_log; -eval create table t1 (a int) engine=$engine_type; -while ($i) -{ - eval insert into t1 values ($i); - dec $i; -} -drop table t1; -enable_query_log; - -# BUG#45673 -echo [ status of semi-sync on master should be OFF ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -# reset master to make sure the following test will start with a clean environment -reset master; - -connection slave; - -echo [ default state of semi-sync on slave should be OFF ]; -show variables like 'rpl_semi_sync_slave_enabled'; - -echo [ enable semi-sync on slave ]; -set global rpl_semi_sync_slave_enabled = 1; -show variables like 'rpl_semi_sync_slave_enabled'; -source include/start_slave.inc; - -connection master; - -# NOTE: Rpl_semi_sync_master_client will only be updated when -# semi-sync slave has started binlog dump request -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; - -echo [ initial master state after the semi-sync slave connected ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -replace_result $engine_type ENGINE_TYPE; -eval create table t1(a int) engine = $engine_type; - -echo [ master state after CREATE TABLE statement ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -# After fix of BUG#45848, semi-sync slave should not create any extra -# connections on master. -let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); -replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE; -eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0'; - -echo [ insert records to table ]; -insert t1 values (10); -insert t1 values (9); -insert t1 values (8); -insert t1 values (7); -insert t1 values (6); -insert t1 values (5); -insert t1 values (4); -insert t1 values (3); -insert t1 values (2); -insert t1 values (1); - -echo [ master status after inserts ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -sync_slave_with_master; - -echo [ slave status after replicated inserts ]; -show status like 'Rpl_semi_sync_slave_status'; - -select count(distinct a) from t1; -select min(a) from t1; -select max(a) from t1; - ---echo ---echo # BUG#50157 ---echo # semi-sync replication crashes when replicating a transaction which ---echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ; - -connection master; -SET SESSION AUTOCOMMIT= 0; -CREATE TABLE t2(c1 INT) ENGINE=innodb; -sync_slave_with_master; - -connection master; -BEGIN; ---echo ---echo # Even though it is in a transaction, this statement is binlogged into binlog ---echo # file immediately. ---disable_warnings -CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1; ---enable_warnings ---echo ---echo # These statements will not be binlogged until the transaction is committed -INSERT INTO t2 VALUES(11); -INSERT INTO t2 VALUES(22); -COMMIT; - -DROP TABLE t2, t3; -SET SESSION AUTOCOMMIT= 1; -sync_slave_with_master; - - ---echo # ---echo # Test semi-sync master will switch OFF after one transaction ---echo # timeout waiting for slave reply. ---echo # -connection slave; -source include/stop_slave.inc; - -connection master; ---source include/kill_binlog_dump_threads.inc -set global rpl_semi_sync_master_timeout= 5000; - -# The first semi-sync check should be on because after slave stop, -# there are no transactions on the master. -echo [ master status should be ON ]; - -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= ON; -source include/wait_for_status_var.inc; - -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 0; -source include/wait_for_status_var.inc; - -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -echo [ semi-sync replication of these transactions will fail ]; -insert into t1 values (500); - -# Wait for the semi-sync replication of this transaction to timeout -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= OFF; -source include/wait_for_status_var.inc; - -# The second semi-sync check should be off because one transaction -# times out during waiting. -echo [ master status should be OFF ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -# Semi-sync status on master is now OFF, so all these transactions -# will be replicated asynchronously. -delete from t1 where a=10; -delete from t1 where a=9; -delete from t1 where a=8; -delete from t1 where a=7; -delete from t1 where a=6; -delete from t1 where a=5; -delete from t1 where a=4; -delete from t1 where a=3; -delete from t1 where a=2; -delete from t1 where a=1; - -insert into t1 values (100); - -echo [ master status should be OFF ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # Test semi-sync status on master will be ON again when slave catches up ---echo # - -# Save the master position for later use. -save_master_pos; - -connection slave; - -echo [ slave status should be OFF ]; -show status like 'Rpl_semi_sync_slave_status'; -source include/start_slave.inc; -sync_with_master; - -echo [ slave status should be ON ]; -show status like 'Rpl_semi_sync_slave_status'; - -select count(distinct a) from t1; -select min(a) from t1; -select max(a) from t1; - -connection master; - -# The master semi-sync status should be on again after slave catches up. -echo [ master status should be ON again after slave catches up ]; - -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= ON; -source include/wait_for_status_var.inc; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; -show status like 'Rpl_semi_sync_master_clients'; - ---echo # ---echo # Test disable/enable master semi-sync on the fly. ---echo # - -drop table t1; -sync_slave_with_master; - -source include/stop_slave.inc; - ---echo # ---echo # Flush status ---echo # -connection master; -echo [ Semi-sync master status variables before FLUSH STATUS ]; -SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; -SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; -# Do not write the FLUSH STATUS to binlog, to make sure we'll get a -# clean status after this. -FLUSH NO_WRITE_TO_BINLOG STATUS; -echo [ Semi-sync master status variables after FLUSH STATUS ]; -SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; -SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; - -connection master; - -source include/show_master_logs.inc; -show variables like 'rpl_semi_sync_master_enabled'; - -echo [ disable semi-sync on the fly ]; -set global rpl_semi_sync_master_enabled=0; -show variables like 'rpl_semi_sync_master_enabled'; -show status like 'Rpl_semi_sync_master_status'; - -echo [ enable semi-sync on the fly ]; -set global rpl_semi_sync_master_enabled=1; -show variables like 'rpl_semi_sync_master_enabled'; -show status like 'Rpl_semi_sync_master_status'; - ---echo # ---echo # Test RESET MASTER/SLAVE ---echo # - -connection slave; - -source include/start_slave.inc; - -connection master; - -replace_result $engine_type ENGINE_TYPE; -eval create table t1 (a int) engine = $engine_type; -drop table t1; - -sync_slave_with_master; - -echo [ test reset master ]; -connection master; - -reset master; - -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -connection slave; - -source include/stop_slave.inc; ---source include/reset_slave.inc - -# Kill the dump thread on master for previous slave connection and ---source include/kill_binlog_dump_threads.inc - -connection slave; -source include/start_slave.inc; - -connection master; - -# Wait for dump thread to start, Rpl_semi_sync_master_clients will be -# 1 after dump thread started. -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; - -replace_result $engine_type ENGINE_TYPE; -eval create table t1 (a int) engine = $engine_type; -insert into t1 values (1); -insert into t1 values (2), (3); - -sync_slave_with_master; - -select * from t1; - -connection master; - -echo [ master semi-sync status should be ON ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # Start semi-sync replication without SUPER privilege ---echo # -connection slave; -source include/stop_slave.inc; ---source include/reset_slave.inc -connection master; -reset master; - -# Kill the dump thread on master for previous slave connection and wait for it to exit ---source include/kill_binlog_dump_threads.inc - -# Do not binlog the following statement because it will generate -# different events for ROW and STATEMENT format -set sql_log_bin=0; -grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; -flush privileges; -set sql_log_bin=1; -connection slave; -grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; -flush privileges; -change master to master_user='rpl',master_password='rpl_password'; -source include/start_slave.inc; -show status like 'Rpl_semi_sync_slave_status'; -connection master; - -# Wait for the semi-sync binlog dump thread to start -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; -echo [ master semi-sync should be ON ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; -insert into t1 values (4); -insert into t1 values (5); -echo [ master semi-sync should be ON ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # Test semi-sync slave connect to non-semi-sync master ---echo # - -# Disable semi-sync on master -connection slave; -source include/stop_slave.inc; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; - -connection master; - -# Kill the dump thread on master for previous slave connection and wait for it to exit ---source include/kill_binlog_dump_threads.inc - -echo [ Semi-sync status on master should be ON ]; -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 0; -source include/wait_for_status_var.inc; -show status like 'Rpl_semi_sync_master_status'; -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= ON; -source include/wait_for_status_var.inc; -set global rpl_semi_sync_master_enabled= 0; - -connection slave; -SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; -source include/start_slave.inc; -connection master; -insert into t1 values (8); -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; -echo [ master semi-sync clients should be 1, status should be OFF ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -sync_slave_with_master; -show status like 'Rpl_semi_sync_slave_status'; - -# Uninstall semi-sync plugin on master -connection slave; -source include/stop_slave.inc; -connection master; -set global rpl_semi_sync_master_enabled= 0; - -connection slave; -SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; -source include/start_slave.inc; - -connection master; -insert into t1 values (10); -sync_slave_with_master; - ---echo # ---echo # Test non-semi-sync slave connect to semi-sync master ---echo # - -connection master; -set global rpl_semi_sync_master_timeout= 5000; # 5s -set global rpl_semi_sync_master_enabled= 1; - -connection slave; -source include/stop_slave.inc; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; - -echo [ uninstall semi-sync slave plugin ]; -set global rpl_semi_sync_slave_enabled= 0; - -echo [ reinstall semi-sync slave plugin and disable semi-sync ]; -SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; -source include/start_slave.inc; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; - ---echo # ---echo # Clean up ---echo # - -connection slave; -source include/stop_slave.inc; -set global rpl_semi_sync_slave_enabled= 0; - -connection master; -set global rpl_semi_sync_master_enabled= 0; - -connection slave; -change master to master_user='root',master_password=''; -source include/start_slave.inc; - -connection master; -drop table t1; -sync_slave_with_master; - -connection master; -drop user rpl@127.0.0.1; -flush privileges; -set global rpl_semi_sync_master_timeout= default; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_skip_replication.inc b/mysql-test/suite/rpl/include/rpl_skip_replication.inc deleted file mode 100644 index d1044c4bf51ba..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_skip_replication.inc +++ /dev/null @@ -1,408 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it. -# -# Usage: -# -# --let $use_remote_mysqlbinlog= 1 # optional -# --source suite/rpl/include/rpl_skip_replication.inc -# -# The script uses MYSQLBINLOG to verify certain results. -# By default, it uses binary logs directly. If it is undesirable, -# this behavior can be overridden by setting $use_remote_binlog -# as shown above. -# The value will be unset after every execution of the script, -# so if it is needed, it should be set explicitly before each call. -# - ---source include/have_innodb.inc ---source include/master-slave.inc - ---connection slave ---source include/stop_slave.inc -CHANGE MASTER TO MASTER_USE_GTID=NO; ---source include/start_slave.inc ---connection master - -connection slave; -# Test that SUPER is required to change @@replicate_events_marked_for_skip. -CREATE USER 'nonsuperuser'@'127.0.0.1'; -GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE, - SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1'; -connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,); -connection nonpriv; ---error ER_SPECIFIC_ACCESS_DENIED_ERROR -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -disconnect nonpriv; -connection slave; -DROP USER'nonsuperuser'@'127.0.0.1'; - -SELECT @@global.replicate_events_marked_for_skip; ---error ER_SLAVE_MUST_STOP -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -SELECT @@global.replicate_events_marked_for_skip; -STOP SLAVE; ---error ER_GLOBAL_VARIABLE -SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER; -SELECT @@global.replicate_events_marked_for_skip; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -SELECT @@global.replicate_events_marked_for_skip; -START SLAVE; - -connection master; -SELECT @@skip_replication; ---error ER_LOCAL_VARIABLE -SET GLOBAL skip_replication=1; -SELECT @@skip_replication; - -CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb; -INSERT INTO t1(a) VALUES (1); -INSERT INTO t2(a) VALUES (1); - - -# Test that master-side filtering works. -SET skip_replication=1; - -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -INSERT INTO t1(a) VALUES (2); -INSERT INTO t2(a) VALUES (2); - -# Inject a rotate event in the binlog stream sent to slave (otherwise we will -# fail sync_slave_with_master as the last event on the master is not present -# on the slave). -FLUSH NO_WRITE_TO_BINLOG LOGS; - -sync_slave_with_master; -connection slave; -SHOW TABLES; -SELECT * FROM t1; -SELECT * FROM t2; - -connection master; -DROP TABLE t3; - -FLUSH NO_WRITE_TO_BINLOG LOGS; -sync_slave_with_master; - - -# Test that slave-side filtering works. -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -START SLAVE; - -connection master; -SET skip_replication=1; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -INSERT INTO t1(a) VALUES (3); -INSERT INTO t2(a) VALUES (3); - -# Inject a rotate event in the binlog stream sent to slave (otherwise we will -# fail sync_slave_with_master as the last event on the master is not present -# on the slave). -FLUSH NO_WRITE_TO_BINLOG LOGS; - -sync_slave_with_master; -connection slave; -SHOW TABLES; -SELECT * FROM t1; -SELECT * FROM t2; - -connection master; -DROP TABLE t3; - -FLUSH NO_WRITE_TO_BINLOG LOGS; -sync_slave_with_master; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; - - -# Test that events with @@skip_replication=1 are not filtered when filtering is -# not set on slave. -connection master; -SET skip_replication=1; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -INSERT INTO t3(a) VALUES(2); -sync_slave_with_master; -connection slave; -SELECT * FROM t3; -connection master; -DROP TABLE t3; - -# -# Test that the slave will preserve the @@skip_replication flag in its -# own binlog. -# - -TRUNCATE t1; -sync_slave_with_master; -connection slave; -RESET MASTER; - -connection master; -SET skip_replication=0; -INSERT INTO t1 VALUES (1,0); -SET skip_replication=1; -INSERT INTO t1 VALUES (2,0); -SET skip_replication=0; -INSERT INTO t1 VALUES (3,0); - -sync_slave_with_master; -connection slave; -# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have -# applied all events. -SELECT * FROM t1 ORDER by a; - -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -let $SLAVE_DATADIR= `select @@datadir`; - -connection master; -TRUNCATE t1; - -# Now apply the slave binlog to the master, to check that both the slave -# and mysqlbinlog will preserve the @@skip_replication flag. - ---let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001 -if ($use_remote_mysqlbinlog) -{ - --let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001 - --let $use_remote_mysqlbinlog= 0 -} ---exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog ---exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog - -# The master should have all three events. -SELECT * FROM t1 ORDER by a; - -# The slave should be missing event 2, which is marked with the -# @@skip_replication flag. - -connection slave; -START SLAVE; - -connection master; -sync_slave_with_master; - -connection slave; -SELECT * FROM t1 ORDER by a; - -# -# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication -# events. -# - -connection master; -TRUNCATE t1; - -sync_slave_with_master; -connection slave; -STOP SLAVE; -# We will skip two INSERTs (in addition to any skipped due to -# @@skip_replication). Since from 5.5 every statement is wrapped in -# BEGIN ... END, we need to skip 6 events for this. -SET GLOBAL sql_slave_skip_counter=6; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -START SLAVE; - -connection master; -# Need to fix @@binlog_format to get consistent event count. -SET @old_binlog_format= @@binlog_format; -SET binlog_format= statement; -SET skip_replication=0; -INSERT INTO t1 VALUES (1,5); -SET skip_replication=1; -INSERT INTO t1 VALUES (2,5); -SET skip_replication=0; -INSERT INTO t1 VALUES (3,5); -INSERT INTO t1 VALUES (4,5); -SET binlog_format= @old_binlog_format; - -sync_slave_with_master; -connection slave; - -# The slave should have skipped the first three inserts (number 1 and 3 due -# to @@sql_slave_skip_counter=2, number 2 due to -# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4 -# should be left. -SELECT * FROM t1; - - -# -# Check that BINLOG statement preserves the @@skip_replication flag. -# -connection slave; -# Need row @@binlog_format for BINLOG statements containing row events. ---source include/stop_slave.inc -SET @old_slave_binlog_format= @@global.binlog_format; -SET GLOBAL binlog_format= row; ---source include/start_slave.inc - -connection master; -TRUNCATE t1; - -SET @old_binlog_format= @@binlog_format; -SET binlog_format= row; -# Format description log event. -BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAA371saA=='; -# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1 -BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC -wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA='; -# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0 -BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC -wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA='; -SET binlog_format= @old_binlog_format; - -SELECT * FROM t1 ORDER BY a; -sync_slave_with_master; -connection slave; -# Slave should have only the second insert, the first should be ignored due to -# the @@skip_replication flag. -SELECT * FROM t1 ORDER by a; - ---source include/stop_slave.inc -SET GLOBAL binlog_format= @old_slave_binlog_format; ---source include/start_slave.inc - - -# Test that it is not possible to change @@skip_replication inside a -# transaction or statement, thereby replicating only parts of statements -# or transactions. -connection master; -SET skip_replication=0; - -BEGIN; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=0; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=1; -ROLLBACK; -SET skip_replication=1; -BEGIN; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=0; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=1; -COMMIT; -SET autocommit=0; -INSERT INTO t2(a) VALUES(100); ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=1; -ROLLBACK; -SET autocommit=1; - -SET skip_replication=1; ---delimiter | -CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END| -CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END| -CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END| ---delimiter ; ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SELECT foo(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SELECT baz(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET @a= foo(1); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET @a= baz(1); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -UPDATE t2 SET b=foo(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -UPDATE t2 SET b=baz(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -INSERT INTO t1 VALUES (101, foo(1)); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -INSERT INTO t1 VALUES (101, baz(0)); -SELECT @@skip_replication; -CALL bar(0); -SELECT @@skip_replication; -CALL bar(1); -SELECT @@skip_replication; -DROP FUNCTION foo; -DROP PROCEDURE bar; -DROP FUNCTION baz; - - -# Test that master-side filtering happens on the master side, and that -# slave-side filtering happens on the slave. - -# First test that events do not reach the slave when master-side filtering -# is configured. Do this by replicating first with only the IO thread running -# and master-side filtering; then change to no filtering and start the SQL -# thread. This should still skip the events, as master-side filtering -# means the events never reached the slave. -connection master; -SET skip_replication= 0; -TRUNCATE t1; -sync_slave_with_master; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -START SLAVE IO_THREAD; -connection master; -SET skip_replication= 1; -INSERT INTO t1(a) VALUES (1); -SET skip_replication= 0; -INSERT INTO t1(a) VALUES (2); ---source include/save_master_pos.inc -connection slave; ---source include/sync_io_with_master.inc -STOP SLAVE IO_THREAD; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; -connection master; -sync_slave_with_master; -connection slave; -# Now only the second insert of (2) should be visible, as the first was -# filtered on the master, so even though the SQL thread ran without skipping -# events, it will never see the event in the first place. -SELECT * FROM t1; - -# Now tests that when slave-side filtering is configured, events _do_ reach -# the slave. -connection master; -SET skip_replication= 0; -TRUNCATE t1; -sync_slave_with_master; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -START SLAVE IO_THREAD; -connection master; -SET skip_replication= 1; -INSERT INTO t1(a) VALUES (1); -SET skip_replication= 0; -INSERT INTO t1(a) VALUES (2); ---source include/save_master_pos.inc -connection slave; ---source include/sync_io_with_master.inc -STOP SLAVE IO_THREAD; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; -connection master; -sync_slave_with_master; -connection slave; -# Now both inserts should be visible. Since filtering was configured to be -# slave-side, the event is in the relay log, and when the SQL thread ran we -# had disabled filtering again. -SELECT * FROM t1 ORDER BY a; - - -# Clean up. -connection master; -SET skip_replication=0; -DROP TABLE t1,t2; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_special_charset.inc b/mysql-test/suite/rpl/include/rpl_special_charset.inc deleted file mode 100644 index 641aa483d32b7..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_special_charset.inc +++ /dev/null @@ -1,32 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -################################################################################ -# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS -# Problem: IO thread fails to connect to master if servers are configured with -# special character sets like utf16, utf32, ucs2. -# -# Analysis: MySQL server does not support few special character sets like -# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2). -# When IO thread is trying to connect to Master, it sets server's character -# set as client's character set. When Slave server is started with these -# special character sets, IO thread (a connection to Master) fails because -# of the above said reason. -# -# Fix: If server's character set is not supported as client's character set, -# then set default's client character set(latin1) as client's character set. -############################################################################### ---source include/master-slave.inc -call mtr.add_suppression("'utf16' can not be used as client character set"); -CREATE TABLE t1(i VARCHAR(20)); -INSERT INTO t1 VALUES (0xFFFF); ---sync_slave_with_master ---let diff_tables=master:t1, slave:t1 ---source include/diff_tables.inc -# Cleanup ---connection master -DROP TABLE t1; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_sporadic_master.inc b/mysql-test/suite/rpl/include/rpl_sporadic_master.inc deleted file mode 100644 index ad4c44cbf7409..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_sporadic_master.inc +++ /dev/null @@ -1,32 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# test to see if replication can continue when master sporadically fails on -# COM_BINLOG_DUMP and additionally limits the number of events per dump - -source include/master-slave.inc; - -create table t2(n int); -create table t1(n int not null auto_increment primary key); -insert into t1 values (NULL),(NULL); -truncate table t1; -# We have to use 4 in the following to make this test work with all table types -insert into t1 values (4),(NULL); -sync_slave_with_master; ---source include/stop_slave.inc ---source include/start_slave.inc -connection master; -insert into t1 values (NULL),(NULL); -flush logs; -truncate table t1; -insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL); -sync_slave_with_master; -select * from t1 ORDER BY n; -connection master; -drop table t1,t2; -sync_slave_with_master; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_ssl.inc b/mysql-test/suite/rpl/include/rpl_ssl.inc deleted file mode 100644 index 59a2af9f137c0..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_ssl.inc +++ /dev/null @@ -1,116 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -source include/have_ssl_communication.inc; -source include/master-slave.inc; -source include/no_valgrind_without_big.inc; - -# create a user for replication that requires ssl encryption -connection master; -create user replssl@localhost; -grant replication slave on *.* to replssl@localhost require ssl; -create table t1 (t int auto_increment, KEY(t)); - -sync_slave_with_master; - -# Set slave to use SSL for connection to master -stop slave; ---replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR -eval change master to - master_user='replssl', - master_password='', - master_ssl=1, - master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem', - master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem', - master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem'; -start slave; - -# Switch to master and insert one record, then sync it to slave -connection master; -insert into t1 values(1); -sync_slave_with_master; - -# The record should now be on slave -select * from t1; - -# The slave is synced and waiting/reading from master -# SHOW SLAVE STATUS will show "Waiting for master to send event" -let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Crl, Master_SSL_Crlpath, Master_SSL_Cert, Master_SSL_Key; -source include/show_slave_status.inc; -source include/check_slave_is_running.inc; - -# Stop the slave, as reported in bug#21871 it would hang -STOP SLAVE; - -select * from t1; - -# Do the same thing a number of times -disable_query_log; -disable_result_log; -# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows -# After discussions with Engineering, I'm disabling this part of the test to avoid it causing -# red trees. -disable_parsing; -let $i= 100; -while ($i) -{ - start slave; - connection master; - insert into t1 values (NULL); - select * from t1; # Some variance - connection slave; - select * from t1; # Some variance - stop slave; - dec $i; -} -enable_parsing; -START SLAVE; -enable_query_log; -enable_result_log; -connection master; -# INSERT one more record to make sure -# the sync has something to do -insert into t1 values (NULL); -let $master_count= `select count(*) from t1`; - -sync_slave_with_master; ---source include/wait_for_slave_to_start.inc -source include/show_slave_status.inc; -source include/check_slave_is_running.inc; - -let $slave_count= `select count(*) from t1`; - -if ($slave_count != $master_count) -{ - echo master and slave differed in number of rows; - echo master: $master_count; - echo slave: $slave_count; - - connection master; - select count(*) t1; - select * from t1; - connection slave; - select count(*) t1; - select * from t1; - query_vertical show slave status; -} - -connection master; -drop user replssl@localhost; -drop table t1; -sync_slave_with_master; - ---source include/stop_slave.inc -CHANGE MASTER TO - master_user = 'root', - master_ssl = 0, - master_ssl_ca = '', - master_ssl_cert = '', - master_ssl_key = ''; - ---echo End of 5.0 tests ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_stm_relay_ign_space.inc b/mysql-test/suite/rpl/include/rpl_stm_relay_ign_space.inc deleted file mode 100644 index 654a5d47cb910..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_stm_relay_ign_space.inc +++ /dev/null @@ -1,107 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# -# BUG#12400313 / BUG#64503 test case -# -# -# Description -# ----------- -# -# This test case starts the slave server with: -# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096 -# -# Then it issues some queries that will cause the slave to reach -# relay-log-space-limit. We lock the table so that the SQL thread is -# not able to purge the log and then we issue some more statements. -# -# The purpose is to show that the IO thread will honor the limits -# while the SQL thread is not able to purge the relay logs, which did -# not happen before this patch. In addition we assert that while -# ignoring the limit (SQL thread needs to rotate before purging), the -# IO thread does not do it in an uncontrolled manner. - ---source include/have_binlog_format_statement.inc ---source include/have_innodb.inc ---source include/master-slave.inc - ---disable_query_log -CREATE TABLE t1 (c1 TEXT) engine=InnoDB; - -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); - ---sync_slave_with_master - -# wait for the SQL thread to sleep ---let $show_statement= SHOW PROCESSLIST ---let $field= State ---let $condition= = 'Slave has read all relay log; waiting for more updates' ---source include/wait_show_condition.inc - -# now the io thread has set rli->ignore_space_limit -# lets lock the table so that once the SQL thread awakes -# it blocks there and does not set rli->ignore_space_limit -# back to zero -LOCK TABLE t1 WRITE; - -# now issue more statements that will overflow the -# rli->log_space_limit (in this case ~10K) ---connection master - -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); - ---connection slave - -# ASSERT that the IO thread waits for the SQL thread to release some -# space before continuing ---let $show_statement= SHOW PROCESSLIST ---let $field= State ---let $condition= LIKE 'Waiting for %' -# before the patch (IO would have transfered everything) -#--let $condition= = 'Waiting for master to send event' -# after the patch (now it waits for space to be freed) -#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space' ---source include/wait_show_condition.inc - -# without the patch we can uncomment the following two lines and -# watch the IO thread synchronize with the master, thus writing -# relay logs way over the space limit -#--connection master -#--source include/sync_slave_io_with_master.inc - -## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge ---let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1) ---let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1) ---let $assert_text= Assert that relay log space is close to the limit ---let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15 ---source include/assert.inc - -# unlock the table and let SQL thread continue applying events -UNLOCK TABLES; - ---connection master ---sync_slave_with_master ---let $diff_tables=master:test.t1,slave:test.t1 ---source include/diff_tables.inc - ---connection master -DROP TABLE t1; ---enable_query_log ---sync_slave_with_master - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_switch_stm_row_mixed.inc b/mysql-test/suite/rpl/include/rpl_switch_stm_row_mixed.inc deleted file mode 100644 index d814a25771168..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_switch_stm_row_mixed.inc +++ /dev/null @@ -1,633 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# -# rpl_switch_stm_row_mixed tests covers -# -# - Master is switching explicitly between STATEMENT, ROW, and MIXED -# binlog format showing when it is possible and when not. -# - Master switching from MIXED to RBR implicitly listing all use -# cases, e.g a query invokes SYS_GUID(), thereafter to serve as the -# definition of MIXED binlog format -# - correctness of execution - - --- source include/have_binlog_format_mixed_or_row.inc --- source include/master-slave.inc - -# Since this test generates row-based events in the binary log, the -# slave SQL thread cannot be in STATEMENT mode to execute this test, -# so we only execute it for MIXED and ROW as default value of -# BINLOG_FORMAT. - -connection slave; - -connection master; ---disable_warnings -drop database if exists mysqltest1; -create database mysqltest1; ---enable_warnings -use mysqltest1; - -# Save binlog format -set @my_binlog_format= @@global.binlog_format; - -# play with switching -set session binlog_format=mixed; -show session variables like "binlog_format%"; -set session binlog_format=statement; -show session variables like "binlog_format%"; -set session binlog_format=row; -show session variables like "binlog_format%"; - -set global binlog_format=DEFAULT; -show global variables like "binlog_format%"; -set global binlog_format=MIXED; -show global variables like "binlog_format%"; -set global binlog_format=STATEMENT; -show global variables like "binlog_format%"; -set global binlog_format=ROW; -show global variables like "binlog_format%"; -show session variables like "binlog_format%"; -select @@global.binlog_format, @@session.binlog_format; - -CREATE TABLE t1 (a varchar(100)); - -prepare stmt1 from 'insert into t1 select concat(SYS_GUID(),?)'; -set @string="emergency_1_"; -insert into t1 values("work_2_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(SYS_GUID(),"work_3_")); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values(concat("for_4_",SYS_GUID())); -insert into t1 select "yesterday_5_"; - -# verify that temp tables prevent a switch to SBR -create temporary table tmp(a char(100)); -insert into tmp values("see_6_"); ---error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR -set binlog_format=statement; -insert into t1 select * from tmp; -drop temporary table tmp; - -# Now we go to SBR -set binlog_format=statement; -show global variables like "binlog_format%"; -show session variables like "binlog_format%"; -select @@global.binlog_format, @@session.binlog_format; -set global binlog_format=statement; -show global variables like "binlog_format%"; -show session variables like "binlog_format%"; -select @@global.binlog_format, @@session.binlog_format; - -prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency_7_"; -insert into t1 values("work_8_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work_9_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values("for_10_"); -insert into t1 select "yesterday_11_"; - -# test statement (is not default after wl#3368) -set binlog_format=statement; -select @@global.binlog_format, @@session.binlog_format; -set global binlog_format=statement; -select @@global.binlog_format, @@session.binlog_format; - -prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency_12_"; -insert into t1 values("work_13_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work_14_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values("for_15_"); -insert into t1 select "yesterday_16_"; - -# and now the mixed mode - -set global binlog_format=mixed; -select @@global.binlog_format, @@session.binlog_format; -set binlog_format=default; -select @@global.binlog_format, @@session.binlog_format; - -prepare stmt1 from 'insert into t1 select concat(SYS_GUID(),?)'; -set @string="emergency_17_"; -insert into t1 values("work_18_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(SYS_GUID(),"work_19_")); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values(concat("for_20_",SYS_GUID())); -insert into t1 select "yesterday_21_"; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(SYS_GUID(),"work_22_")); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values(concat("for_23_",SYS_GUID())); -insert into t1 select "yesterday_24_"; - -# Test of CREATE TABLE SELECT - -create table t2 ENGINE=MyISAM select rpad(SYS_GUID(),100,' '); -create table t3 select 1 union select SYS_GUID(); ---disable_warnings -SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR -create table t4 select * from t1 where 3 in (select 1 union select 2 union select SYS_GUID() union select 3); ---enable_warnings -SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR -create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); -# what if SYS_GUID() is first: ---disable_warnings -insert ignore into t5 select SYS_GUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4); ---enable_warnings - -# inside a stored procedure - -delimiter |; -create procedure foo() -begin -insert into t1 values("work_25_"); -insert into t1 values(concat("for_26_",SYS_GUID())); -insert into t1 select "yesterday_27_"; -end| -create procedure foo2() -begin -insert into t1 values(concat("emergency_28_",SYS_GUID())); -insert into t1 values("work_29_"); -insert into t1 values(concat("for_30_",SYS_GUID())); -set session binlog_format=row; # accepted for stored procs -insert into t1 values("more work_31_"); -set session binlog_format=mixed; -end| -create function foo3() returns bigint unsigned -begin - set session binlog_format=row; # rejected for stored funcs - insert into t1 values("alarm"); - return 100; -end| -create procedure foo4(x varchar(100)) -begin -insert into t1 values(concat("work_250_",x)); -insert into t1 select "yesterday_270_"; -end| -delimiter ;| -call foo(); -call foo2(); -call foo4("hello"); -call foo4(SYS_GUID()); -call foo4("world"); - -# test that can't SET in a stored function ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT -select foo3(); -select * from t1 where a="alarm"; - -# Tests of stored functions/triggers/views for BUG#20930 "Mixed -# binlogging mode does not work with stored functions, triggers, -# views" - -# Function which calls procedure -drop function foo3; -delimiter |; -create function foo3() returns bigint unsigned -begin - insert into t1 values("foo3_32_"); - call foo(); - return 100; -end| -delimiter ;| -insert into t2 select foo3(); - -prepare stmt1 from 'insert into t2 select foo3()'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# Test if stored function calls stored function which calls procedure -# which requires row-based. - -delimiter |; -create function foo4() returns bigint unsigned -begin - insert into t2 select foo3(); - return 100; -end| -delimiter ;| -select foo4(); - -prepare stmt1 from 'select foo4()'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# A simple stored function -delimiter |; -create function foo5() returns bigint unsigned -begin - insert into t2 select SYS_GUID(); - return 100; -end| -delimiter ;| -select foo5(); - -prepare stmt1 from 'select foo5()'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# A simple stored function where SYS_GUID() is in the argument -delimiter |; -create function foo6(x varchar(100)) returns bigint unsigned -begin - insert into t2 select x; - return 100; -end| -delimiter ;| -select foo6("foo6_1_"); -select foo6(concat("foo6_2_",SYS_GUID())); - -prepare stmt1 from 'select foo6(concat("foo6_3_",SYS_GUID()))'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - - -# Test of views using SYS_GUID() - -create view v1 as select SYS_GUID(); -create table t11 (data varchar(255)); -insert into t11 select * from v1; -# Test of querying INFORMATION_SCHEMA which parses the view's body, -# to verify that it binlogs statement-based (is not polluted by -# the parsing of the view's body). -insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11'); -prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')"; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# Test of triggers with SYS_GUID() -delimiter |; -create trigger t11_bi before insert on t11 for each row -begin - set NEW.data = concat(NEW.data,SYS_GUID()); -end| -delimiter ;| -insert into t11 values("try_560_"); - -# Test that INSERT DELAYED works in mixed mode (BUG#20649) -insert delayed into t2 values("delay_1_"); -insert delayed into t2 values(concat("delay_2_",SYS_GUID())); -insert delayed into t2 values("delay_6_"); - -# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not -# replicate fine in statement-based ; we test that in mixed mode it -# works). -insert delayed into t2 values(rand()); -set @a=2.345; -insert delayed into t2 values(@a); - -# With INSERT DELAYED, rows are written to the binlog after they are -# written to the table. Therefore, it is not enough to wait until the -# rows make it to t2 on the master (the rows may not be in the binlog -# at that time, and may still not be in the binlog when -# sync_slave_with_master is later called). Instead, we wait until the -# rows make it to t2 on the slave. We first call -# sync_slave_with_master, so that we are sure that t2 has been created -# on the slave. -sync_slave_with_master; -let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2; ---source include/wait_condition.inc -connection master; - -# If you want to do manual testing of the mixed mode regarding UDFs (not -# testable automatically as quite platform- and compiler-dependent), -# you just need to set the variable below to 1, and to -# "make udf_example.so" in sql/, and to copy sql/udf_example.so to -# MYSQL_TEST_DIR/lib/mysql. -let $you_want_to_test_UDF=0; -if ($you_want_to_test_UDF) -{ - CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so'; - prepare stmt1 from 'insert into t1 select metaphon(?)'; - set @string="emergency_133_"; - insert into t1 values("work_134_"); - execute stmt1 using @string; - deallocate prepare stmt1; - prepare stmt1 from 'insert into t1 select ?'; - insert into t1 values(metaphon("work_135_")); - execute stmt1 using @string; - deallocate prepare stmt1; - insert into t1 values(metaphon("for_136_")); - insert into t1 select "yesterday_137_"; - create table t6 select metaphon("for_138_"); - create table t7 select 1 union select metaphon("for_139_"); - create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3); - create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); -} - -create table t20 select * from t1; # save for comparing later -create table t21 select * from t2; -create table t22 select * from t3; -drop table t1,t2,t3; - -# This tests the fix to -# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog -# We verify that under the mixed binlog mode, a stored function -# modifying at least two tables having an auto_increment column, -# is binlogged row-based. Indeed in statement-based binlogging, -# only the auto_increment value generated for the first table -# is recorded in the binlog, the value generated for the 2nd table -# lacking. - -create table t1 (a int primary key auto_increment, b varchar(100)); -create table t2 (a int primary key auto_increment, b varchar(100)); -create table t3 (b varchar(100)); -delimiter |; -create function f (x varchar(100)) returns int deterministic -begin - insert into t1 values(null,x); - insert into t2 values(null,x); - return 1; -end| -delimiter ;| -select f("try_41_"); -# Two operations which compensate each other except that their net -# effect is that they advance the auto_increment counter of t2 on slave: -sync_slave_with_master; -use mysqltest1; -insert into t2 values(2,null),(3,null),(4,null); -delete from t2 where a>=2; - -connection master; -# this is the call which didn't replicate well -select f("try_42_"); -sync_slave_with_master; - -# now use prepared statement and test again, just to see that the RBB -# mode isn't set at PREPARE but at EXECUTE. - -insert into t2 values(3,null),(4,null); -delete from t2 where a>=3; - -connection master; -prepare stmt1 from 'select f(?)'; -set @string="try_43_"; -insert into t1 values(null,"try_44_"); # should be SBB -execute stmt1 using @string; # should be RBB -deallocate prepare stmt1; -sync_slave_with_master; - -# verify that if only one table has auto_inc, it does not trigger RBB -# (we'll check in binlog further below) - -connection master; -create table t12 select * from t1; # save for comparing later -drop table t1; -create table t1 (a int, b varchar(100), key(a)); -select f("try_45_"); - -# restore table's key -create table t13 select * from t1; -drop table t1; -create table t1 (a int primary key auto_increment, b varchar(100)); - -# now test if it's two functions, each of them inserts in one table - -drop function f; -# we need a unique key to have sorting of rows by mysqldump -create table t14 (unique (a)) select * from t2; -truncate table t2; -delimiter |; -create function f1 (x varchar(100)) returns int deterministic -begin - insert into t1 values(null,x); - return 1; -end| -create function f2 (x varchar(100)) returns int deterministic -begin - insert into t2 values(null,x); - return 1; -end| -delimiter ;| -select f1("try_46_"),f2("try_47_"); - -sync_slave_with_master; -insert into t2 values(2,null),(3,null),(4,null); -delete from t2 where a>=2; - -connection master; -# Test with SELECT and INSERT -select f1("try_48_"),f2("try_49_"); -insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_"))); -sync_slave_with_master; - -# verify that if f2 does only read on an auto_inc table, this does not -# switch to RBB -connection master; -drop function f2; -delimiter |; -create function f2 (x varchar(100)) returns int deterministic -begin - declare y int; - insert into t1 values(null,x); - set y = (select count(*) from t2); - return y; -end| -delimiter ;| -select f1("try_53_"),f2("try_54_"); -sync_slave_with_master; - -# And now, a normal statement with a trigger (no stored functions) - -connection master; -drop function f2; -delimiter |; -create trigger t1_bi before insert on t1 for each row -begin - insert into t2 values(null,"try_55_"); -end| -delimiter ;| -insert into t1 values(null,"try_56_"); -# and now remove one auto_increment and verify SBB -alter table t1 modify a int, drop primary key; -insert into t1 values(null,"try_57_"); -sync_slave_with_master; - -# Test for BUG#20499 "mixed mode with temporary table breaks binlog" -# Slave used to have only 2 rows instead of 3. -connection master; -CREATE TEMPORARY TABLE t15 SELECT SYS_GUID(); -create table t16 like t15; -INSERT INTO t16 SELECT * FROM t15; -# we'll verify that this one is done RBB -insert into t16 values("try_65_"); -drop table t15; -# we'll verify that this one is done SBB -insert into t16 values("try_66_"); -sync_slave_with_master; - -# and now compare: - -connection master; - -# first check that data on master is sensible -select count(*) from t1; -select count(*) from t2; -select count(*) from t3; -select count(*) from t4; -select count(*) from t5; -select count(*) from t11; -select count(*) from t20; -select count(*) from t21; -select count(*) from t22; -select count(*) from t12; -select count(*) from t13; -select count(*) from t14; -select count(*) from t16; -if ($you_want_to_test_UDF) -{ - select count(*) from t6; - select count(*) from t7; - select count(*) from t8; - select count(*) from t9; -} - -sync_slave_with_master; - -# -# Bug#20863 If binlog format is changed between update and unlock of -# tables, wrong binlog -# - -connection master; -DROP TABLE IF EXISTS t11; -SET SESSION BINLOG_FORMAT=STATEMENT; -CREATE TABLE t11 (song VARCHAR(255)); -LOCK TABLES t11 WRITE; -SET SESSION BINLOG_FORMAT=ROW; -INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict'); -SET SESSION BINLOG_FORMAT=STATEMENT; -INSERT INTO t11 VALUES('Careful With That Axe, Eugene'); -UNLOCK TABLES; - ---query_vertical SELECT * FROM t11 -sync_slave_with_master; -USE mysqltest1; ---query_vertical SELECT * FROM t11 - -connection master; -DROP TABLE IF EXISTS t12; -SET SESSION BINLOG_FORMAT=MIXED; -CREATE TABLE t12 (data LONG); -LOCK TABLES t12 WRITE; -INSERT INTO t12 VALUES(SYS_GUID()); -UNLOCK TABLES; -sync_slave_with_master; - -# -# BUG#28086: SBR of USER() becomes corrupted on slave -# - -connection master; - -# Just to get something that is non-trivial, albeit still simple, we -# stuff the result of USER() and CURRENT_USER() into a variable. ---delimiter $$ -CREATE FUNCTION my_user() - RETURNS CHAR(64) -BEGIN - DECLARE user CHAR(64); - SELECT USER() INTO user; - RETURN user; -END $$ ---delimiter ; - ---delimiter $$ -CREATE FUNCTION my_current_user() - RETURNS CHAR(64) -BEGIN - DECLARE user CHAR(64); - SELECT CURRENT_USER() INTO user; - RETURN user; -END $$ ---delimiter ; - -DROP TABLE IF EXISTS t13; -CREATE TABLE t13 (data CHAR(64)); -INSERT INTO t13 VALUES (USER()); -INSERT INTO t13 VALUES (my_user()); -INSERT INTO t13 VALUES (CURRENT_USER()); -INSERT INTO t13 VALUES (my_current_user()); - -sync_slave_with_master; - -# as we're using SYS_GUID we don't SELECT but use "diff" like in rpl_row_UUID ---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql ---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql - -# Let's compare. Note: If they match test will pass, if they do not match -# the test will show that the diff statement failed and not reject file -# will be created. You will need to go to the mysql-test dir and diff -# the files your self to see what is not matching - -diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; - -connection master; - -# Now test that mysqlbinlog works fine on a binlog generated by the -# mixed mode - -# BUG#11312 "DELIMITER is not written to the binary log that causes -# syntax error" makes that mysqlbinlog will fail if we pass it the -# text of queries; this forces us to use --base64-output here. - -# BUG#20929 "BINLOG command causes invalid free plus assertion -# failure" makes mysqld segfault when receiving --base64-output - -# So I can't enable this piece of test -# SIGH - -if ($enable_when_11312_or_20929_fixed) -{ ---exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql -drop database mysqltest1; ---exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql ---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql -# the old mysqldump output on slave is the same as what it was on -# master before restoring on master. -diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; -} - -drop database mysqltest1; -sync_slave_with_master; - -connection master; -# Restore binlog format setting -set global binlog_format =@my_binlog_format; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_sync_test.inc b/mysql-test/suite/rpl/include/rpl_sync_test.inc deleted file mode 100644 index 1e2ec2ca83b85..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_sync_test.inc +++ /dev/null @@ -1,159 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -######################################################################################## -# This test verifies the options --sync-relay-log-info and --relay-log-recovery by -# crashing the slave in two different situations: -# (case-1) - Corrupt the relay log with changes which were not processed by -# the SQL Thread and crashes it. -# (case-2) - Corrupt the master.info with wrong coordinates and crashes it. -# -# Case 1: -# 1 - Stops the SQL Thread -# 2 - Inserts new records into the master. -# 3 - Corrupts the relay-log.bin* which most likely has such changes. -# 4 - Crashes the slave -# 5 - Verifies if the slave is sync with the master which means that the information -# loss was circumvented by the recovery process. -# -# Case 2: -# 1 - Stops the SQL/IO Threads -# 2 - Inserts new records into the master. -# 3 - Corrupts the master.info with wrong coordinates. -# 4 - Crashes the slave -# 5 - Verifies if the slave is sync with the master which means that the information -# loss was circumvented by the recovery process. -######################################################################################## - -######################################################################################## -# Configuring the environment -######################################################################################## ---echo =====Configuring the enviroment=======; ---source include/not_embedded.inc ---source include/not_valgrind.inc ---source include/have_debug.inc ---source include/have_innodb.inc ---source include/not_crashrep.inc ---source include/master-slave.inc - -call mtr.add_suppression('Attempting backtrace'); -call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001"); -# Use innodb so we do not get "table should be repaired" issues. -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -flush tables; -CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb; - -insert into t1(a) values(1); -insert into t1(a) values(2); -insert into t1(a) values(3); - -######################################################################################## -# Case 1: Corrupt a relay-log.bin* -######################################################################################## ---echo =====Inserting data on the master but without the SQL Thread being running=======; -sync_slave_with_master; - -connection slave; -let $MYSQLD_SLAVE_DATADIR= `select @@datadir`; ---replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR ---copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup ---source include/stop_slave_sql.inc - -connection master; -insert into t1(a) values(4); -insert into t1(a) values(5); -insert into t1(a) values(6); - ---echo =====Removing relay log files and crashing/recoverying the slave=======; -connection slave; ---source include/stop_slave_io.inc - -let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1); - ---let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file -perl; -$file= $ENV{'FILE_TO_CORRUPT'}; -open(FILE, ">$file") || die "Unable to open $file."; -truncate(FILE,0); -print FILE "failure"; -close ($file); -EOF - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect -SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; ---error 2013 -FLUSH LOGS; - ---let $rpl_server_number= 2 ---source include/rpl_reconnect.inc - ---echo =====Dumping and comparing tables=======; ---source include/start_slave.inc - -connection master; -sync_slave_with_master; - -let $diff_tables=master:t1,slave:t1; -source include/diff_tables.inc; - -######################################################################################## -# Case 2: Corrupt a master.info -######################################################################################## ---echo =====Corrupting the master.info=======; -connection slave; ---source include/stop_slave.inc - -connection master; -FLUSH LOGS; - -insert into t1(a) values(7); -insert into t1(a) values(8); -insert into t1(a) values(9); - -connection slave; -let MYSQLD_SLAVE_DATADIR=`select @@datadir`; - ---perl -use strict; -use warnings; -my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup"; -my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info"; -open(FILE, "<", $src) or die; -my @content= ; -close FILE; -open(FILE, ">", $dst) or die; -binmode FILE; -print FILE @content; -close FILE; -EOF - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect -SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; ---error 2013 -FLUSH LOGS; - ---let $rpl_server_number= 2 ---source include/rpl_reconnect.inc - ---echo =====Dumping and comparing tables=======; ---source include/start_slave.inc - -connection master; -sync_slave_with_master; - -let $diff_tables=master:t1,slave:t1; -source include/diff_tables.inc; - -######################################################################################## -# Clean up -######################################################################################## ---echo =====Clean up=======; -connection master; -drop table t1; - ---remove_file $MYSQLD_SLAVE_DATADIR/master.backup ---source include/rpl_end.inc - diff --git a/mysql-test/suite/rpl/include/rpl_temporal_format_default_to_default.inc b/mysql-test/suite/rpl/include/rpl_temporal_format_default_to_default.inc deleted file mode 100644 index 6728ff55d6f5e..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_temporal_format_default_to_default.inc +++ /dev/null @@ -1,82 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/master-slave.inc - -if ($force_master_mysql56_temporal_format) -{ - connection master; - eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format; -} - -if ($force_slave_mysql56_temporal_format) -{ - connection slave; - eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format; -} - -connection master; -SELECT @@global.mysql56_temporal_format AS on_master; -connection slave; -SELECT @@global.mysql56_temporal_format AS on_slave; -connection master; - -CREATE TABLE t1 -( - c0 TIME(0), - c1 TIME(1), - c2 TIME(2), - c3 TIME(3), - c4 TIME(4), - c5 TIME(5), - c6 TIME(6) -); -CREATE TABLE t2 -( - c0 TIMESTAMP(0), - c1 TIMESTAMP(1), - c2 TIMESTAMP(2), - c3 TIMESTAMP(3), - c4 TIMESTAMP(4), - c5 TIMESTAMP(5), - c6 TIMESTAMP(6) -); - -CREATE TABLE t3 -( - c0 DATETIME(0), - c1 DATETIME(1), - c2 DATETIME(2), - c3 DATETIME(3), - c4 DATETIME(4), - c5 DATETIME(5), - c6 DATETIME(6) -); -INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111'); -INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); -INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); -SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES -WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; -sync_slave_with_master; - -connection slave; ---query_vertical SELECT * FROM t1; ---query_vertical SELECT * FROM t2; ---query_vertical SELECT * FROM t3; -SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES -WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; - -connection master; -DROP TABLE t1; -DROP TABLE t2; -DROP TABLE t3; - -connection slave; -SET @@global.mysql56_temporal_format=DEFAULT; -connection master; -SET @@global.mysql56_temporal_format=DEFAULT; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_typeconv.inc b/mysql-test/suite/rpl/include/rpl_typeconv.inc deleted file mode 100644 index 9e5662588827e..0000000000000 --- a/mysql-test/suite/rpl/include/rpl_typeconv.inc +++ /dev/null @@ -1,78 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption suite). -# Please check all dependent tests after modifying it -# - ---source include/have_binlog_format_row.inc ---source include/master-slave.inc - -connection slave; -set @saved_slave_type_conversions = @@global.slave_type_conversions; -CREATE TABLE type_conversions ( - TestNo INT AUTO_INCREMENT PRIMARY KEY, - Source TEXT, - Target TEXT, - Flags TEXT, - On_Master LONGTEXT, - On_Slave LONGTEXT, - Expected LONGTEXT, - Compare INT, - Error TEXT); - -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; -SELECT @@global.slave_type_conversions; ---error ER_WRONG_VALUE_FOR_VAR -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT'; -SELECT @@global.slave_type_conversions; - -# Checking strict interpretation of type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; -source suite/rpl/include/type_conversions.test; - -# Checking lossy integer type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; -source suite/rpl/include/type_conversions.test; - -# Checking non-lossy integer type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; -source suite/rpl/include/type_conversions.test; - -# Checking all type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; -source suite/rpl/include/type_conversions.test; - -connection slave; ---echo **** Result of conversions **** -disable_query_log; -SELECT RPAD(Source, 15, ' ') AS Source_Type, - RPAD(Target, 15, ' ') AS Target_Type, - RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags, - IF(Compare IS NULL AND Error IS NOT NULL, '', - IF(Compare, '', - CONCAT("'", On_Slave, "' != '", Expected, "'"))) - AS Value_On_Slave - FROM type_conversions; -enable_query_log; -DROP TABLE type_conversions; - -call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677"); - -connection master; -DROP TABLE t1; -sync_slave_with_master; - -set global slave_type_conversions = @saved_slave_type_conversions; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_loaddatalocal.result b/mysql-test/suite/rpl/r/rpl_loaddata_local.result similarity index 100% rename from mysql-test/suite/rpl/r/rpl_loaddatalocal.result rename to mysql-test/suite/rpl/r/rpl_loaddata_local.result diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic_error_stop.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_error_stop.result new file mode 100644 index 0000000000000..48672651c3608 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_error_stop.result @@ -0,0 +1,81 @@ +include/rpl_init.inc [topology=1->2] +call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit"); +call mtr.add_suppression("Slave: Duplicate entry '99'"); +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1,1); +INSERT INTO t1 VALUES(2,1); +INSERT INTO t1 VALUES(3,1); +INSERT INTO t1 VALUES(4,1); +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET @old_debug_dbug = @@global.debug_dbug; +SET @@global.debug_dbug = "d,hold_worker2_favor_worker3"; +SET GLOBAL slave_parallel_threads=4; +CHANGE MASTER TO master_use_gtid=slave_pos; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='optimistic'; +connection server_1; +SET @@gtid_seq_no = 2001; +BEGIN; +UPDATE t1 SET b = 11 WHERE a = 4; +UPDATE t1 SET b = 11 WHERE a = 3; +UPDATE t1 SET a = 99 WHERE a = 1; +COMMIT; +UPDATE t1 SET b = 2 WHERE a = 2; +UPDATE t1 SET b = 3 WHERE a = 3; +DROP TABLE IF EXISTS phantom_1; +Warnings: +Note 1051 Unknown table 'test.phantom_1' +include/save_master_gtid.inc +connect slave_local_0, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +UPDATE t1 set b = 11 where a = 4; +connect slave_local_1, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +INSERT INTO t1 VALUES (99, 11); +connect slave_local_2, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +UPDATE t1 SET b = 12 WHERE a = 2; +connect slave_local_3, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +UPDATE t1 SET b = 13 WHERE a = 3; +connection server_2; +include/start_slave.inc +# W4 is waiting to start its DROP +connection slave_local_3; +rollback; +connection slave_local_0; +rollback; +SELECT count(*) = 0 as "W3 undid its commit state" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%"; +W3 undid its commit state +1 +connection slave_local_2; +rollback; +connection slave_local_1; +commit; +SELECT COUNT(*) = 1 as "W4 remains with the same status" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit%"; +W4 remains with the same status +1 +# Slave_SQL_Running YES = Yes +# while W2 is held back ... +SET DEBUG_SYNC = 'now SIGNAL cont_worker2'; +include/wait_for_slave_sql_error.inc [errno=1062] +DELETE FROM t1 WHERE a=99; +include/start_slave.inc +include/sync_with_master_gtid.inc +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET @@global.debug_dbug = @old_debug_dbug; +SET debug_sync = RESET; +include/start_slave.inc +connection server_1; +DROP TABLE t1; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result b/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result index a0ea06afa89da..c303abc672c0d 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result @@ -7,8 +7,6 @@ SET @@global.rpl_semi_sync_master_timeout = 60000; SET @@global.rpl_semi_sync_master_wait_no_slave = 1; # It's okay to see "Killed" but we should not see "Timeout" in the log. call mtr.add_suppression("Killed waiting for reply of binlog"); -call mtr.add_suppression("Run function 'after_commit' in plugin 'rpl_semi_sync_master' failed"); -call mtr.add_suppression("Run function 'after_sync' in plugin 'rpl_semi_sync_master' failed"); # # Test wait point = AFTER_COMMIT # diff --git a/mysql-test/suite/rpl/t/rpl_binlog_errors.test b/mysql-test/suite/rpl/t/rpl_binlog_errors.test index 30faaf7961308..ab95c9b4ee353 100644 --- a/mysql-test/suite/rpl/t/rpl_binlog_errors.test +++ b/mysql-test/suite/rpl/t/rpl_binlog_errors.test @@ -1 +1,443 @@ ---source include/rpl_binlog_errors.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# +# Usage: +# --let $binlog_limit= X[,Y] # optional +# +# Semantics of the value is the same as in include/show_binlog_events.inc +# which the script calls as a part of the test flow. +# The goal is to print the event demonstrating the triggered error, +# so normally Y should be 1 (print the exact event only); +# however, depending on test-specific server options, the offset X +# can be different. +# + +# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error +# when generating new name. +# +# WHY +# === +# +# We want to check whether error is reported or not when +# new_file_impl fails (this may happen when rotation is not +# possible because there is some problem finding an +# unique filename). +# +# HOW +# === +# +# Test cases are documented inline. + +-- source include/have_innodb.inc +-- source include/have_debug.inc +-- source include/master-slave.inc + +-- echo ####################################################################### +-- echo ####################### PART 1: MASTER TESTS ########################## +-- echo ####################################################################### + + +### ACTION: stopping slave as it is not needed for the first part of +### the test + +-- connection slave +-- source include/stop_slave.inc +-- connection master + +call mtr.add_suppression("Can't generate a unique log-filename"); +call mtr.add_suppression("Writing one row to the row-based binary log failed.*"); +call mtr.add_suppression("Error writing file .*"); +call mtr.add_suppression("Could not use master-bin for logging"); + +SET @old_debug= @@global.debug_dbug; + +### ACTION: create a large file (> 4096 bytes) that will be later used +### in LOAD DATA INFILE to check binlog errors in its vacinity +-- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data +-- let $MYSQLD_DATADIR= `select @@datadir` +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file' + +### ACTION: create a small file (< 4096 bytes) that will be later used +### in LOAD DATA INFILE to check for absence of binlog errors +### when file loading this file does not force flushing and +### rotating the binary log +-- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data +-- let $MYSQLD_DATADIR= `select @@datadir` +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2' + +RESET MASTER; + +-- echo ###################### TEST #1 + +### ASSERTION: no problem flushing logs (should show two binlogs) +FLUSH LOGS; +-- echo # assert: must show two binlogs +-- source include/show_binary_logs.inc + +-- echo ###################### TEST #2 + +### ASSERTION: check that FLUSH LOGS actually fails and reports +### failure back to the user if find_uniq_filename fails +### (should show just one binlog) + +RESET MASTER; +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- error ER_NO_UNIQUE_LOGFILE +FLUSH LOGS; +-- echo # assert: must show one binlog +-- source include/show_binary_logs.inc + +### ACTION: clean up and move to next test +SET @@global.debug_dbug=@old_debug; +RESET MASTER; + +-- echo ###################### TEST #3 + +### ACTION: create some tables (t1, t2, t4) and insert some values in +### table t1 +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB; +CREATE TABLE t4 (a VARCHAR(16384)); +INSERT INTO t1 VALUES (1); +RESET MASTER; + +### ASSERTION: we force rotation of the binary log because it exceeds +### the max_binlog_size option (should show two binary +### logs) + +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 + +# shows two binary logs +-- echo # assert: must show two binlog +-- source include/show_binary_logs.inc + +# clean up the table and the binlog to be used in next part of test +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #4 + +### ASSERTION: load the big file into a transactional table and check +### that it reports error. The table will contain the +### changes performed despite the fact that it reported an +### error. + +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 + +# show table +-- echo # assert: must show one entry +SELECT count(*) FROM t2; + +# clean up the table and the binlog to be used in next part of test +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #5 + +### ASSERTION: load the small file into a transactional table and +### check that it succeeds + +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2 + +# show table +-- echo # assert: must show one entry +SELECT count(*) FROM t2; + +# clean up the table and the binlog to be used in next part of test +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #6 + +### ASSERTION: check that even if one is using a transactional table +### and explicit transactions (no autocommit) if rotation +### fails we get the error. Transaction is not rolledback +### because rotation happens after the commit. + +SET @@global.debug_dbug="d,error_unique_log_filename"; +SET AUTOCOMMIT=0; +INSERT INTO t2 VALUES ('muse'); +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 +INSERT INTO t2 VALUES ('muse'); +-- error ER_NO_UNIQUE_LOGFILE +COMMIT; + +### ACTION: Show the contents of the table after the test +-- echo # assert: must show three entries +SELECT count(*) FROM t2; + +### ACTION: clean up and move to the next test +SET AUTOCOMMIT= 1; +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #7 + +### ASSERTION: check that on a non-transactional table, if rotation +### fails then an error is reported and an incident event +### is written to the current binary log. + +SET @@global.debug_dbug="d,error_unique_log_filename"; + +# Disable logging Annotate_rows events to preserve events count. +let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; +SET @@binlog_annotate_row_events= 0; + +SELECT count(*) FROM t4; +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 + +-- echo # assert: must show 1 entry +SELECT count(*) FROM t4; + +-- echo ### check that the incident event is written to the current log +SET @@global.debug_dbug=@old_debug; +if (!$binlog_limit) +{ + -- let $binlog_limit= 4,1 +} +-- source include/show_binlog_events.inc + +# clean up and move to next test +DELETE FROM t4; + +--disable_query_log +eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; +--enable_query_log + +RESET MASTER; + +-- echo ###################### TEST #8 + +### ASSERTION: check that statements end up in error but they succeed +### on changing the data. + +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- echo # must show 0 entries +SELECT count(*) FROM t4; +SELECT count(*) FROM t2; + +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 +-- error ER_NO_UNIQUE_LOGFILE +INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'); + +-- echo # INFO: Count(*) Before Offending DELETEs +-- echo # assert: must show 1 entry +SELECT count(*) FROM t4; +-- echo # assert: must show 4 entries +SELECT count(*) FROM t2; + +-- error ER_NO_UNIQUE_LOGFILE +DELETE FROM t4; +-- error ER_NO_UNIQUE_LOGFILE +DELETE FROM t2; + +-- echo # INFO: Count(*) After Offending DELETEs +-- echo # assert: must show zero entries +SELECT count(*) FROM t4; +SELECT count(*) FROM t2; + +# remove fault injection +SET @@global.debug_dbug=@old_debug; + +-- echo ###################### TEST #9 + +### ASSERTION: check that if we disable binlogging, then statements +### succeed. +SET @@global.debug_dbug="d,error_unique_log_filename"; +SET SQL_LOG_BIN=0; +INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd'); +INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh'); +-- echo # assert: must show four entries +SELECT count(*) FROM t2; +SELECT count(*) FROM t4; +DELETE FROM t2; +DELETE FROM t4; +-- echo # assert: must show zero entries +SELECT count(*) FROM t2; +SELECT count(*) FROM t4; +SET SQL_LOG_BIN=1; +SET @@global.debug_dbug=@old_debug; + +-- echo ###################### TEST #10 + +### ASSERTION: check that error is reported if there is a failure +### while registering the index file and the binary log +### file or failure to write the rotate event. + +call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); +call mtr.add_suppression("Could not use .*"); + +RESET MASTER; +SHOW WARNINGS; + +# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open +SET @@global.debug_dbug="d,fault_injection_registering_index"; +-- replace_regex /\.[\\\/]master/master/ +-- error ER_CANT_OPEN_FILE +FLUSH LOGS; +SET @@global.debug_dbug=@old_debug; + +-- error ER_NO_BINARY_LOGGING +SHOW BINARY LOGS; + +# issue some statements and check that they don't fail +CREATE TABLE t5 (a INT); +INSERT INTO t4 VALUES ('bbbbb'); +INSERT INTO t2 VALUES ('aaaaa'); +DELETE FROM t4; +DELETE FROM t2; +DROP TABLE t5; +flush tables; + +-- echo ###################### TEST #11 + +### ASSERTION: check that error is reported if there is a failure +### while opening the index file and the binary log file or +### failure to write the rotate event. + +# restart the server so that we have binlog again +--let $rpl_server_number= 1 +--source include/rpl_restart_server.inc + +# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file +SET @@global.debug_dbug="d,fault_injection_openning_index"; +-- replace_regex /\.[\\\/]master/master/ +-- error ER_CANT_OPEN_FILE +FLUSH LOGS; +SET @@global.debug_dbug=@old_debug; + +-- error ER_FLUSH_MASTER_BINLOG_CLOSED +RESET MASTER; + +# issue some statements and check that they don't fail +CREATE TABLE t5 (a INT); +INSERT INTO t4 VALUES ('bbbbb'); +INSERT INTO t2 VALUES ('aaaaa'); +DELETE FROM t4; +DELETE FROM t2; +DROP TABLE t5; +flush tables; + +# restart the server so that we have binlog again +--let $rpl_server_number= 1 +--source include/rpl_restart_server.inc + +-- echo ###################### TEST #12 + +### ASSERTION: check that error is reported if there is a failure +### while writing the rotate event when creating a new log +### file. + +# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl +SET @@global.debug_dbug="d,fault_injection_new_file_rotate_event"; +-- error ER_ERROR_ON_WRITE +FLUSH LOGS; +SET @@global.debug_dbug=@old_debug; + +-- error ER_FLUSH_MASTER_BINLOG_CLOSED +RESET MASTER; + +# issue some statements and check that they don't fail +CREATE TABLE t5 (a INT); +INSERT INTO t4 VALUES ('bbbbb'); +INSERT INTO t2 VALUES ('aaaaa'); +DELETE FROM t4; +DELETE FROM t2; +DROP TABLE t5; +flush tables; + +# restart the server so that we have binlog again +--let $rpl_server_number= 1 +--source include/rpl_restart_server.inc + +## clean up +DROP TABLE t1, t2, t4; +RESET MASTER; + +# restart slave again +-- connection slave +-- source include/start_slave.inc +-- connection master + +-- echo ####################################################################### +-- echo ####################### PART 2: SLAVE TESTS ########################### +-- echo ####################################################################### + +### setup +--source include/rpl_reset.inc +-- connection slave + +# Slave tests rely on logic of non-gtid mode +--source include/stop_slave.inc +CHANGE MASTER TO MASTER_USE_GTID=NO; +--source include/start_slave.inc + +# slave suppressions + +call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*"); +call mtr.add_suppression("Error writing file .*"); +call mtr.add_suppression("Could not use .*"); +call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); +call mtr.add_suppression("Can't generate a unique log-filename .*"); +-- echo ###################### TEST #13 + +#### ASSERTION: check against unique log filename error +-- let $io_thd_injection_fault_flag= error_unique_log_filename +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +-- echo ###################### TEST #14 + +#### ASSERTION: check against rotate failing +-- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +-- echo ###################### TEST #15 + +#### ASSERTION: check against relay log open failure +-- let $io_thd_injection_fault_flag= fault_injection_registering_index +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +-- echo ###################### TEST #16 + +#### ASSERTION: check against relay log index open failure +-- let $io_thd_injection_fault_flag= fault_injection_openning_index +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +### clean up +-- source include/stop_slave_sql.inc +RESET SLAVE; +RESET MASTER; +--remove_file $load_file +--remove_file $load_file2 +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test b/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test index 573c1d111fc49..7c1d0ea25e2dc 100644 --- a/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test +++ b/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test @@ -1 +1,88 @@ ---source include/rpl_cant_read_event_incident.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# +# Bug#11747416 : 32228 A disk full makes binary log corrupt. +# +# +# The test demonstrates reading from binlog error propagation to slave +# and reporting there. +# Conditions for the bug include a crash at time of the last event to +# the binlog was written partly. With the fixes the event is not sent out +# any longer, but rather the dump thread sends out a sound error message. +# +# Crash is not simulated. A binlog with partly written event in its end is installed +# and replication is started from it. +# + +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO MASTER_USE_GTID=NO; +--source include/start_slave.inc + +# Make sure the slave is stopped while we are messing with master. +# Otherwise we get occasional failures as the slave manages to re-connect +# to the newly started master and we get extra events applied, causing +# conflicts. +--source include/stop_slave.inc + +--connection master +call mtr.add_suppression("Error in Log_event::read_log_event()"); +--let $datadir= `SELECT @@datadir` + +--let $rpl_server_number= 1 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master-bin.000001 +--copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001 + +--let $rpl_server_number= 1 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +# evidence of the partial binlog +--error ER_ERROR_WHEN_EXECUTING_COMMAND +show binlog events; + +--connection slave +call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log"); +--let $master_use_gtid_option= No +--source include/reset_slave.inc +start slave; + +# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236 +--let $slave_param=Last_IO_Errno +--let $slave_param_value=1236 +--source include/wait_for_slave_param.inc + +--let $slave_field_result_replace= / at [0-9]*/ at XXX/ +--let $status_items= Last_IO_Errno, Last_IO_Error +--source include/show_slave_status.inc + +# +# Cleanup +# + +--connection master +reset master; + +--connection slave +stop slave; +reset slave; +# Table was created from binlog, it may not be created if SQL thread is running +# slowly and IO thread reaches incident before SQL thread applies it. +--disable_warnings +drop table if exists t; +--enable_warnings +reset master; + +--echo End of the tests +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_checksum.test b/mysql-test/suite/rpl/t/rpl_checksum.test index 0edf8fda7f329..fc765744b3faa 100644 --- a/mysql-test/suite/rpl/t/rpl_checksum.test +++ b/mysql-test/suite/rpl/t/rpl_checksum.test @@ -1 +1,342 @@ ---source include/rpl_checksum.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# WL2540 replication events checksum +# Testing configuration parameters + +--source include/have_debug.inc +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO MASTER_USE_GTID=NO; +--source include/start_slave.inc + +--connection master +call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log'); +call mtr.add_suppression('Replication event checksum verification failed'); +# due to C failure simulation +call mtr.add_suppression('Relay log write failure: could not queue event from master'); +call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them'); + +# A. read/write access to the global vars: +# binlog_checksum master_verify_checksum slave_sql_verify_checksum + +connection master; + +set @master_save_binlog_checksum= @@global.binlog_checksum; +set @save_master_verify_checksum = @@global.master_verify_checksum; + +select @@global.binlog_checksum as 'must be CRC32 because of the command line option'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.binlog_checksum as 'no session var'; + +select @@global.master_verify_checksum as 'must be zero because of default'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.master_verify_checksum as 'no session var'; + +connection slave; + +set @slave_save_binlog_checksum= @@global.binlog_checksum; +set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum; + +select @@global.slave_sql_verify_checksum as 'must be one because of default'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.slave_sql_verify_checksum as 'no session var'; + +connection master; + +source include/show_binary_logs.inc; +set @@global.binlog_checksum = NONE; +select @@global.binlog_checksum; +--echo *** must be rotations seen *** +source include/show_binary_logs.inc; + +set @@global.binlog_checksum = default; +select @@global.binlog_checksum; + +# testing lack of side-effects in non-effective update of binlog_checksum: +set @@global.binlog_checksum = CRC32; +select @@global.binlog_checksum; +set @@global.binlog_checksum = CRC32; + +set @@global.master_verify_checksum = 0; +set @@global.master_verify_checksum = default; + +--error ER_WRONG_VALUE_FOR_VAR +set @@global.binlog_checksum = ADLER32; +--error ER_WRONG_VALUE_FOR_VAR +set @@global.master_verify_checksum = 2; # the var is of bool type + +connection slave; + +set @@global.slave_sql_verify_checksum = 0; +set @@global.slave_sql_verify_checksum = default; +--error ER_WRONG_VALUE_FOR_VAR +set @@global.slave_sql_verify_checksum = 2; # the var is of bool type + +# +# B. Old Slave to New master conditions +# +# while master does not send a checksum-ed binlog the Old Slave can +# work with the New Master + +connection master; + +set @@global.binlog_checksum = NONE; +create table t1 (a int); + +# testing that binlog rotation preserves opt_binlog_checksum value +flush logs; +flush logs; +-- source include/wait_for_binlog_checkpoint.inc +flush logs; + +sync_slave_with_master; +#connection slave; +# checking that rotation on the slave side leaves slave stable +flush logs; +flush logs; +flush logs; +select count(*) as zero from t1; + +source include/stop_slave.inc; + +connection master; +set @@global.binlog_checksum = CRC32; +-- source include/wait_for_binlog_checkpoint.inc +insert into t1 values (1) /* will not be applied on slave due to simulation */; + +# instruction to the dump thread + +connection slave; +set @saved_dbug = @@global.debug_dbug; +set @@global.debug_dbug='d,simulate_slave_unaware_checksum'; +start slave; +--let $slave_io_errno= 1236 +--let $show_slave_io_error= 1 +source include/wait_for_slave_io_error.inc; + +select count(*) as zero from t1; + +set @@global.debug_dbug = @saved_dbug; + +connection slave; +source include/start_slave.inc; + +# +# C. checksum failure simulations +# + +# C1. Failure by a client thread +connection master; +set @@global.master_verify_checksum = 1; +set @save_dbug = @@session.debug_dbug; +set @@session.debug_dbug='d,simulate_checksum_test_failure'; +--error ER_ERROR_WHEN_EXECUTING_COMMAND +show binlog events; +SET debug_dbug= @save_dbug; +set @@global.master_verify_checksum = default; + +#connection master; +sync_slave_with_master; + +connection slave; +source include/stop_slave.inc; + +connection master; +create table t2 (a int); +let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1); + +connection slave; + +# C2. Failure by IO thread +# instruction to io thread +set @saved_dbug = @@global.debug_dbug; +set @@global.debug_dbug='d,simulate_checksum_test_failure'; +start slave io_thread; +# When the checksum error is detected, the slave sets error code 1913 +# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately +# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). +# So we usually get 1595, but it is occasionally possible to get 1913. +--let $slave_io_errno= 1595,1913 +--let $show_slave_io_error= 0 +source include/wait_for_slave_io_error.inc; +set @@global.debug_dbug = @saved_dbug; + +# to make IO thread re-read it again w/o the failure +start slave io_thread; +let $slave_param= Read_Master_Log_Pos; +let $slave_param_value= $pos_master; +source include/wait_for_slave_param.inc; + +# C3. Failure by SQL thread +# instruction to sql thread; +set @@global.slave_sql_verify_checksum = 1; + +set @@global.debug_dbug='d,simulate_checksum_test_failure'; + +start slave sql_thread; +--let $slave_sql_errno= 1593 +--let $show_slave_sql_error= 1 +source include/wait_for_slave_sql_error.inc; + +# resuming SQL thread to parse out the event w/o the failure + +set @@global.debug_dbug = @saved_dbug; +source include/start_slave.inc; + +connection master; +sync_slave_with_master; + +#connection slave; +select count(*) as 'must be zero' from t2; + +# +# D. Reset slave, Change-Master, Binlog & Relay-log rotations with +# random value on binlog_checksum on both master and slave +# +connection slave; +stop slave; +--let $master_use_gtid_option= No +--source include/reset_slave.inc + +# randomize slave server's own checksum policy +set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); +flush logs; + +connection master; +set @@global.binlog_checksum= CRC32; +reset master; +flush logs; +create table t3 (a int, b char(5)); + +connection slave; +source include/start_slave.inc; + +connection master; +sync_slave_with_master; + +#connection slave; +select count(*) as 'must be zero' from t3; +source include/stop_slave.inc; +--replace_result $MASTER_MYPORT MASTER_PORT +eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root'; + +connection master; +flush logs; +reset master; +insert into t3 value (1, @@global.binlog_checksum); + +connection slave; +source include/start_slave.inc; +flush logs; + +connection master; +sync_slave_with_master; + +#connection slave; +select count(*) as 'must be one' from t3; + +connection master; +set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); +insert into t3 value (1, @@global.binlog_checksum); +sync_slave_with_master; + +#connection slave; + +#clean-up + +connection master; +drop table t1, t2, t3; +set @@global.binlog_checksum = @master_save_binlog_checksum; +set @@global.master_verify_checksum = @save_master_verify_checksum; + +# +# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540 +# +# Sanity check that verifies that no assertions are triggered because +# of old FD events (generated by versions prior to server released with +# checksums feature) +# +# There is no need for query log, if something wrong this should trigger +# an assertion + +--disable_query_log + +BINLOG ' +MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA== +'; + +--enable_query_log + +#connection slave; +sync_slave_with_master; + + +--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** + +--connection master + +--source include/wait_for_binlog_checkpoint.inc +CREATE TABLE t4 (a INT PRIMARY KEY); +INSERT INTO t4 VALUES (1); + +SET sql_log_bin=0; +CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET debug_dbug= '+d,binlog_inject_new_name_error'; +--error ER_NO_UNIQUE_LOGFILE +FLUSH LOGS; +SET debug_dbug= @old_dbug; + +INSERT INTO t4 VALUES (2); + +--connection slave +--let $slave_sql_errno= 1590 +--source include/wait_for_slave_sql_error.inc + +# Search the error log for the error message. +# The bug was that 4 garbage bytes were output in the middle of the error +# message; by searching for a pattern that spans that location, we can +# catch the error. +let $log_error_= `SELECT @@GLOBAL.log_error`; +if(!$log_error_) +{ + # MySQL Server on windows is started with --console and thus + # does not know the location of its .err log, use default location + let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; +} +--let SEARCH_FILE= $log_error_ +--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 +--source include/search_pattern_in_file.inc + +SELECT * FROM t4 ORDER BY a; +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc + +--connection master +--save_master_pos + +--connection slave +--sync_with_master +SELECT * FROM t4 ORDER BY a; + + +--connection slave +set @@global.binlog_checksum = @slave_save_binlog_checksum; +set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; + +--echo End of tests + +--connection master +DROP TABLE t4; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_checksum_cache.test b/mysql-test/suite/rpl/t/rpl_checksum_cache.test index 59b338d25562f..e04f618b81ea1 100644 --- a/mysql-test/suite/rpl/t/rpl_checksum_cache.test +++ b/mysql-test/suite/rpl/t/rpl_checksum_cache.test @@ -1 +1,261 @@ ---source include/rpl_checksum_cache.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +-- source include/have_innodb.inc +-- source include/master-slave.inc + +--disable_warnings +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t2 set data=repeat.*'a', @act_size.*"); +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*"); +--enable_warnings + +connection master; +set @save_binlog_cache_size = @@global.binlog_cache_size; +set @save_binlog_checksum = @@global.binlog_checksum; +set @save_master_verify_checksum = @@global.master_verify_checksum; +set @@global.binlog_cache_size = 4096; +set @@global.binlog_checksum = CRC32; +set @@global.master_verify_checksum = 1; + +# restart slave to force the dump thread to verify events (on master side) +connection slave; +source include/stop_slave.inc; +source include/start_slave.inc; + +connection master; + +# +# Testing a critical part of checksum handling dealing with transaction cache. +# The cache's buffer size is set to be less than the transaction's footprint +# in binlog. +# +# To verify combined buffer-by-buffer read out of the file and fixing crc per event +# there are the following parts: +# +# 1. the event size is much less than the cache's buffer +# 2. the event size is bigger than the cache's buffer +# 3. the event size if approximately the same as the cache's buffer +# 4. all in above + +# +# 1. the event size is much less than the cache's buffer +# + +flush status; +show status like "binlog_cache_use"; +show status like "binlog_cache_disk_use"; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# parameter to ensure the test slightly varies binlog content +# between different invocations +# +let $deviation_size=32; +eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb; + +# Now we are going to create transaction which is long enough so its +# transaction binlog will be flushed to disk... + +delimiter |; +create procedure test.p_init (n int, size int) +begin + while n > 0 do + select round(RAND() * size) into @act_size; + set @data = repeat('a', @act_size); + insert into t1 values(n, @data ); + set n= n-1; + end while; +end| + +delimiter ;| + +let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s + +begin; +--disable_warnings +# todo: check if it is really so. +#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave. +eval call test.p_init($1, $deviation_size); +--enable_warnings +commit; + +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t1, slave:test.t1; +source include/diff_tables.inc; + +# undoing changes with verifying the above once again +connection master; + +begin; +delete from t1; +commit; + +sync_slave_with_master; + + +# +# 2. the event size is bigger than the cache's buffer +# +connection master; + +flush status; +let $t2_data_size= `select 3 * @@global.binlog_cache_size`; +let $t2_aver_size= `select 2 * @@global.binlog_cache_size`; +let $t2_max_rand= `select 1 * @@global.binlog_cache_size`; + +eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb; +let $1=100; +--disable_query_log +begin; +while ($1) +{ + eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; + set @data = repeat('a', @act_size); + insert into t2 set data = @data; + dec $1; +} +commit; +--enable_query_log +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t2, slave:test.t2; +source include/diff_tables.inc; + +# undoing changes with verifying the above once again +connection master; + +begin; +delete from t2; +commit; + +sync_slave_with_master; + +# +# 3. the event size if approximately the same as the cache's buffer +# + +connection master; + +flush status; +let $t3_data_size= `select 2 * @@global.binlog_cache_size`; +let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`; +let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`; + +eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb; + +let $1= 300; +--disable_query_log +begin; +while ($1) +{ + eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; + insert into t3 set data= repeat('a', @act_size); + dec $1; +} +commit; +--enable_query_log +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t3, slave:test.t3; +source include/diff_tables.inc; + +# undoing changes with verifying the above once again +connection master; + +begin; +delete from t3; +commit; + +sync_slave_with_master; + + +# +# 4. all in above +# + +connection master; +flush status; + +delimiter |; +eval create procedure test.p1 (n int) +begin + while n > 0 do + case (select (round(rand()*100) % 3) + 1) + when 1 then + select round(RAND() * $deviation_size) into @act_size; + set @data = repeat('a', @act_size); + insert into t1 values(n, @data); + when 2 then + begin + select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; + insert into t2 set data=repeat('a', @act_size); + end; + when 3 then + begin + select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; + insert into t3 set data= repeat('a', @act_size); + end; + end case; + set n= n-1; + end while; +end| +delimiter ;| + +let $1= 1000; +set autocommit= 0; +begin; +--disable_warnings +eval call test.p1($1); +--enable_warnings +commit; + +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t1, slave:test.t1; +source include/diff_tables.inc; + +let $diff_tables=master:test.t2, slave:test.t2; +source include/diff_tables.inc; + +let $diff_tables=master:test.t3, slave:test.t3; +source include/diff_tables.inc; + + +connection master; + +begin; +delete from t1; +delete from t2; +delete from t3; +commit; + +drop table t1, t2, t3; +set @@global.binlog_cache_size = @save_binlog_cache_size; +set @@global.binlog_checksum = @save_binlog_checksum; +set @@global.master_verify_checksum = @save_master_verify_checksum; +drop procedure test.p_init; +drop procedure test.p1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_corruption.test b/mysql-test/suite/rpl/t/rpl_corruption.test index e51d1c65e9503..cd2a1cc7ef207 100644 --- a/mysql-test/suite/rpl/t/rpl_corruption.test +++ b/mysql-test/suite/rpl/t/rpl_corruption.test @@ -1 +1,181 @@ ---source include/rpl_corruption.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +############################################################ +# Purpose: WL#5064 Testing with corrupted events. +# The test emulates the corruption at the vary stages +# of replication: +# - in binlog file +# - in network +# - in relay log +############################################################ + +# +# The tests intensively utilize @@global.debug. Note, +# Bug#11765758 - 58754, +# @@global.debug is read by the slave threads through dbug-interface. +# Hence, before a client thread set @@global.debug we have to ensure that: +# (a) the slave threads are stopped, or (b) the slave threads are in +# sync and waiting. + +--source include/have_debug.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO MASTER_USE_GTID=NO; +--source include/start_slave.inc +--connection master + +# Block legal errors for MTR +call mtr.add_suppression('Found invalid event in binary log'); +call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master'); +call mtr.add_suppression('event read from binlog did not pass crc check'); +call mtr.add_suppression('Replication event checksum verification failed'); +call mtr.add_suppression('Event crc check failed! Most likely there is event corruption'); +call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593'); + +SET @old_master_verify_checksum = @@master_verify_checksum; + +# Creating test table/data and set corruption position for testing +--echo # 1. Creating test table/data and set corruption position for testing +--connection master +--echo * insert/update/delete rows in table t1 * +# Corruption algorithm modifies only the first event and +# then will be reset. To avoid checking always the first event +# from binlog (usually it is FD) we randomly execute different +# statements and set position for corruption inside events. + +CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100)); +--disable_query_log +let $i=`SELECT 3+CEILING(10*RAND())`; +let $j=1; +let $pos=0; +while ($i) { + eval INSERT INTO t1 VALUES ($j, 'a', NULL); + if (`SELECT RAND() > 0.7`) + { + eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j; + } + if (`SELECT RAND() > 0.8`) + { + eval DELETE FROM t1 WHERE a = $j; + } + if (!$pos) { + let $pos= query_get_value(SHOW MASTER STATUS, Position, 1); + --sync_slave_with_master + --source include/stop_slave.inc + --disable_query_log + --connection master + } + dec $i; + inc $j; +} +--enable_query_log + + +# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing +--echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS +SET @saved_dbug = @@global.debug_dbug; +SET @@global.debug_dbug="d,corrupt_read_log_event_char"; +--echo SHOW BINLOG EVENTS; +--disable_query_log +send_eval SHOW BINLOG EVENTS FROM $pos; +--enable_query_log +--error ER_ERROR_WHEN_EXECUTING_COMMAND +reap; + +SET @@global.debug_dbug=@saved_dbug; + +# Emulate corruption on master with crc checking on master +--echo # 3. Master read a corrupted event from binlog and send the error to slave + +# We have a rare but nasty potential race here: if the dump thread on +# the master for the _old_ slave connection has not yet discovered +# that the slave has disconnected, we will inject the corrupt event on +# the wrong connection, and the test will fail +# (+d,corrupt_read_log_event2 corrupts only one event). +# So kill any lingering dump thread (we need to kill; otherwise dump thread +# could manage to send all events down the socket before seeing it close, and +# hang forever waiting for new binlog events to be created). +let $id= `select id from information_schema.processlist where command = "Binlog Dump"`; +if ($id) +{ + --disable_query_log + --error 0,1094 + eval kill $id; + --enable_query_log +} +let $wait_condition= + SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump'; +--source include/wait_condition.inc + +SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; +--connection slave +START SLAVE IO_THREAD; +let $slave_io_errno= 1236; +--let $slave_timeout= 10 +--source include/wait_for_slave_io_error.inc +--connection master +SET @@global.debug_dbug=@saved_dbug; + +# Emulate corruption on master without crc checking on master +--echo # 4. Master read a corrupted event from binlog and send it to slave +--connection master +SET GLOBAL master_verify_checksum=0; +SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; +--connection slave +START SLAVE IO_THREAD; +# When the checksum error is detected, the slave sets error code 1743 +# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately +# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). +# So we usually get 1595, but it is occasionally possible to get 1743. +let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE +--source include/wait_for_slave_io_error.inc +--connection master +SET @@global.debug_dbug=@saved_dbug; +SET GLOBAL master_verify_checksum=1; + +# Emulate corruption in network +--echo # 5. Slave. Corruption in network +--connection slave +SET @saved_dbug_slave = @@GLOBAL.debug_dbug; +SET @@global.debug_dbug="d,corrupt_queue_event"; +START SLAVE IO_THREAD; +let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE +--source include/wait_for_slave_io_error.inc +SET @@global.debug_dbug=@saved_dbug_slave; + +# Emulate corruption in relay log +--echo # 6. Slave. Corruption in relay log + +SET @@global.debug_dbug="d,corrupt_read_log_event_char"; + +START SLAVE SQL_THREAD; +let $slave_sql_errno= 1593; +--source include/wait_for_slave_sql_error.inc + +SET @@global.debug_dbug=@saved_dbug_slave; + +# Start normal replication and compare same table on master +# and slave +--echo # 7. Seek diff for tables on master and slave +--connection slave +--source include/start_slave.inc +--connection master +--sync_slave_with_master +let $diff_tables= master:test.t1, slave:test.t1; +--source include/diff_tables.inc + +# Clean up +--echo # 8. Clean up +--connection master +set @@global.debug_dbug = @saved_dbug; +SET GLOBAL master_verify_checksum = @old_master_verify_checksum; +DROP TABLE t1; +--sync_slave_with_master + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_basic.test b/mysql-test/suite/rpl/t/rpl_gtid_basic.test index 004003ea52488..5975c6f03c3e3 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_basic.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_basic.test @@ -1,4 +1,575 @@ ---source include/rpl_gtid_basic.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +--source include/have_innodb.inc +--let $rpl_topology=1->2->3->4 +--source include/rpl_init.inc + +# Set up a 4-deep replication topology, then test various fail-overs +# using GTID. +# +# A -> B -> C -> D + +connection server_1; +--source include/wait_for_binlog_checkpoint.inc +--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) +--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) +--echo *** GTID position should be empty here *** +--replace_result $binlog_file $binlog_pos +eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); + +CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; +CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, "m1"); +INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4"); +INSERT INTO t2 VALUES (1, "i1"); +BEGIN; +INSERT INTO t2 VALUES (2, "i2"), (3, "i3"); +INSERT INTO t2 VALUES (4, "i4"); +COMMIT; +save_master_pos; +source include/wait_for_binlog_checkpoint.inc; +--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) +--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) +--let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos` +--echo *** GTID position should be non-empty here *** +--replace_result $binlog_file $binlog_pos $gtid_pos_server_1 +eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); + +connection server_2; +sync_with_master; +source include/wait_for_binlog_checkpoint.inc; +--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) +--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) +--echo *** GTID position should be the same as on server_1 *** +--replace_result $binlog_file $binlog_pos $gtid_pos_server_1 +eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; +save_master_pos; + +connection server_3; +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; +save_master_pos; + +connection server_4; +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; + + +--echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A *** +connection server_4; +--source include/stop_slave.inc + +connection server_1; +INSERT INTO t1 VALUES (5, "m1a"); +INSERT INTO t2 VALUES (5, "i1a"); +save_master_pos; + +connection server_4; +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, + MASTER_USE_GTID=CURRENT_POS; +--source include/start_slave.inc +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; + +--echo *** Now move B to D (C is still replicating from B) *** +connection server_2; +--source include/stop_slave.inc +--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, + MASTER_USE_GTID=CURRENT_POS; +--source include/start_slave.inc + +connection server_4; +UPDATE t2 SET b="j1a" WHERE a=5; +save_master_pos; + +connection server_2; +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; + +--echo *** Now move C to D, after letting it fall a little behind *** +connection server_3; +--source include/stop_slave.inc + +connection server_1; +INSERT INTO t2 VALUES (6, "i6b"); +INSERT INTO t2 VALUES (7, "i7b"); +--source include/save_master_gtid.inc + +connection server_3; +--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, + MASTER_USE_GTID=CURRENT_POS; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t2 ORDER BY a; + +--echo *** Now change everything back to what it was, to make rpl_end.inc happy +# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled. +connection server_2; +# We need to sync up server_2 before switching. If it happened to have reached +# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to +# server_1, which is (deliberately) missing that transaction. +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +--replace_result $MASTER_MYPORT MASTER_MYPORT +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT; +--source include/start_slave.inc +--source include/wait_for_slave_to_start.inc + +connection server_3; +--source include/stop_slave.inc +--replace_result $SLAVE_MYPORT SLAVE_MYPORT +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +connection server_4; +--source include/stop_slave.inc +--replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3 +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3; +--source include/start_slave.inc + +connection server_1; +DROP TABLE t1,t2; +--source include/save_master_gtid.inc + +--echo *** A few more checks for BINLOG_GTID_POS function *** +--let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1) +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT BINLOG_GTID_POS(); +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT BINLOG_GTID_POS('a'); +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT BINLOG_GTID_POS('a',1,NULL); +SELECT BINLOG_GTID_POS(1,'a'); +SELECT BINLOG_GTID_POS(NULL,NULL); +SELECT BINLOG_GTID_POS('',1); +SELECT BINLOG_GTID_POS('a',1); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616); + + +--echo *** Some tests of @@GLOBAL.gtid_binlog_state *** +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc + +--connection server_1 +SET @old_state= @@GLOBAL.gtid_binlog_state; + +--error ER_BINLOG_MUST_BE_EMPTY +SET GLOBAL gtid_binlog_state = ''; +RESET MASTER; +SET GLOBAL gtid_binlog_state = ''; +FLUSH LOGS; +--source include/show_binary_logs.inc +SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30'; +--source include/show_binary_logs.inc +--let $binlog_file= master-bin.000001 +--let $binlog_start= 4 +--source include/show_binlog_events.inc +#SELECT @@GLOBAL.gtid_binlog_pos; +#SELECT @@GLOBAL.gtid_binlog_state; +--error ER_BINLOG_MUST_BE_EMPTY +SET GLOBAL gtid_binlog_state = @old_state; +RESET MASTER; +SET GLOBAL gtid_binlog_state = @old_state; + +# Check that slave can reconnect again, despite the RESET MASTER, as we +# restored the state. + +CREATE TABLE t1 (a INT PRIMARY KEY); +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (1); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +# We cannot just use sync_with_master as we've done RESET MASTER, so +# slave old-style position is wrong. +# So sync on gtid position instead. +--source include/sync_with_master_gtid.inc + +SELECT * FROM t1; +# Check that the IO gtid position in SHOW SLAVE STATUS is also correct. +--let $status_items= Gtid_IO_Pos +--source include/show_slave_status.inc + +--echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() *** + +--connection server_1 +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + +--connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SELECT @@last_gtid; +SET gtid_seq_no=110; +SELECT @@last_gtid; +BEGIN; +SELECT @@last_gtid; +INSERT INTO t1 VALUES (2); +SELECT @@last_gtid; +COMMIT; +SELECT @@last_gtid; +--let $pos= `SELECT @@gtid_binlog_pos` + +--connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +eval SET @pos= '$pos'; +# Check NULL argument. +SELECT master_gtid_wait(NULL); +# Check empty argument returns immediately. +SELECT master_gtid_wait('', NULL); +# Check this gets counted +SHOW STATUS LIKE 'Master_gtid_wait_count'; +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_time'; +# Let's check that we get a timeout +SELECT master_gtid_wait(@pos, 0.5); +SELECT * FROM t1 ORDER BY a; +# Now actually wait until the slave reaches the position +send SELECT master_gtid_wait(@pos); + +--connection server_2 +--source include/start_slave.inc + +--connection s1 +reap; +SELECT * FROM t1 ORDER BY a; + +# Test waiting on a domain that does not exist yet. +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id= 1; +INSERT INTO t1 VALUES (3); +--let $pos= `SELECT @@gtid_binlog_pos` + +--connection s1 +--replace_result $pos POS +eval SET @pos= '$pos'; +SELECT master_gtid_wait(@pos, 0); +SELECT * FROM t1 WHERE a >= 3; +send SELECT master_gtid_wait(@pos, -1); + +--connection server_2 +--source include/start_slave.inc + +--connection s1 +reap; +SELECT * FROM t1 WHERE a >= 3; +# Waiting for only part of the position. +SELECT master_gtid_wait('1-1-1', 0); + +# Now test a lot of parallel master_gtid_wait() calls, completing in different +# order, and some of which time out or get killed on the way. + +--connection s1 +send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110'); + +--connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +# This will time out. No event 0-1-1000 exists +send SELECT master_gtid_wait('0-1-1000', 0.5); + +--connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +# This one we will kill +--let $kill1_id= `SELECT connection_id()` +send SELECT master_gtid_wait('0-1-2000'); + +--connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-10'); + +--connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-6', 1); + +# This one we will kill also. +--connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +--let $kill2_id= `SELECT connection_id()` +send SELECT master_gtid_wait('2-1-5'); + +--connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-10'); + +--connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110'); + +--connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-2'); + +--connection server_2 +# This one completes immediately. +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +SELECT master_gtid_wait('1-1-1'); +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); +--replace_result $wait_time MASTER_GTID_WAIT_TIME +eval SET @a= $wait_time; +SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected")) + AS Master_gtid_wait_time_as_expected; + + +--connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('0-1-109'); + +--connection server_2 +# This one should time out. +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +SELECT master_gtid_wait('2-1-2', 0.5); +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); +--replace_result $wait_time MASTER_GTID_WAIT_TIME +eval SET @a= $wait_time; +# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling +# and timer inaccuracies could introduce significant jitter. So allow a +# generous interval. +SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected; + +--replace_result $kill1_id KILL_ID +eval KILL QUERY $kill1_id; +--connection s3 +--error ER_QUERY_INTERRUPTED +reap; + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=2; +INSERT INTO t1 VALUES (4); + +--connection s9 +reap; + +--connection server_2 +--replace_result $kill2_id KILL_ID +eval KILL CONNECTION $kill2_id; + +--connection s6 +--error 2013,ER_CONNECTION_KILLED +reap; + +--connection server_1 +SET gtid_domain_id=1; +SET gtid_seq_no=4; +INSERT INTO t1 VALUES (5); +SET gtid_domain_id=2; +SET gtid_seq_no=5; +INSERT INTO t1 VALUES (6); + +--connection s8 +reap; +--connection s1 +reap; +--connection s2 +reap; +--connection s5 +reap; +--connection s10 +reap; + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=10; +INSERT INTO t1 VALUES (7); + +--connection s4 +reap; +--connection s7 +reap; + + +--echo *** Test gtid_slave_pos when used with GTID *** + +--connection server_2 +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1000; +INSERT INTO t1 VALUES (10); +INSERT INTO t1 VALUES (11); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1010; +INSERT INTO t1 VALUES (12); +INSERT INTO t1 VALUES (13); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 2; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1020; +INSERT INTO t1 VALUES (14); +INSERT INTO t1 VALUES (15); +INSERT INTO t1 VALUES (16); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 3; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1030; +# Disable logging Annotate_rows events to preserve events count. +let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; +SET @@binlog_annotate_row_events= 0; +INSERT INTO t1 VALUES (17); +INSERT INTO t1 VALUES (18); +INSERT INTO t1 VALUES (19); +eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 5; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=3; +SET gtid_seq_no=100; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; +SET gtid_domain_id=2; +SET gtid_seq_no=1040; +INSERT INTO t1 VALUES (20); +--save_master_pos + +--connection server_2 +SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; +SET GLOBAL slave_ddl_exec_mode=STRICT; +SET sql_slave_skip_counter=1; +START SLAVE UNTIL master_gtid_pos="3-1-100"; +--let $master_pos=3-1-100 +--source include/sync_with_master_gtid.inc +--source include/wait_for_slave_to_stop.inc +--error ER_NO_SUCH_TABLE +SELECT * FROM t2; +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +# Start the slave again, it should fail on the DROP TABLE as the table is not there. +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); +SET sql_log_bin=1; +START SLAVE; +--let $slave_sql_errno=1051 +--source include/wait_for_slave_sql_error.inc +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter=2; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +SET GLOBAL slave_ddl_exec_mode= @saved_mode; + + +--echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. *** + +# Create an out-of-order binlog on server 2. +# Let server 3 replicate to an out-of-order point, stop it, restart it, +# and check that it replicates correctly despite the out-of-order. + +--connection server_1 +SET gtid_domain_id= @@GLOBAL.gtid_domain_id; +INSERT INTO t1 VALUES (31); +--save_master_pos + +--connection server_2 +--sync_with_master +SET gtid_domain_id= @@GLOBAL.gtid_domain_id; +INSERT INTO t1 VALUES (32); + +--connection server_1 +INSERT INTO t1 VALUES (33); +--save_master_pos + +--connection server_2 +--sync_with_master +--save_master_pos + +--connection server_3 +--sync_with_master +--source include/stop_slave.inc + +--connection server_1 +INSERT INTO t1 VALUES (34); +--save_master_pos + +--connection server_2 +--sync_with_master +--save_master_pos + +--connection server_3 +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 30 ORDER BY a; +--save_master_pos + +--connection server_4 +--sync_with_master +SELECT * FROM t1 WHERE a >= 30 ORDER BY a; + + +# Clean up. +--connection server_1 +DROP TABLE t1; + + +--source include/rpl_end.inc --echo # --echo # Start of 10.2 tests diff --git a/mysql-test/suite/rpl/t/rpl_incident.test b/mysql-test/suite/rpl/t/rpl_incident.test index 4bb6477ca986d..75d28d6a6c6b0 100644 --- a/mysql-test/suite/rpl/t/rpl_incident.test +++ b/mysql-test/suite/rpl/t/rpl_incident.test @@ -1 +1,61 @@ ---source include/rpl_incident.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +--source include/have_debug.inc +--source include/master-slave.inc + +SET @old_binlog_checksum=@@binlog_checksum; +SET GLOBAL BINLOG_CHECKSUM=none; +connection slave; +SET @old_binlog_checksum=@@binlog_checksum; +SET GLOBAL BINLOG_CHECKSUM=none; +connection master; + +--echo **** On Master **** +CREATE TABLE t1 (a INT); + +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; + +set @saved_dbug = @@global.debug_dbug; +SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*'; + +# This will generate an incident log event and store it in the binary +# log before the replace statement. +REPLACE INTO t1 VALUES (4); +--save_master_pos +SELECT * FROM t1; + +set @@global.debug_dbug = @saved_dbug; + +connection slave; +# Wait until SQL thread stops with error LOST_EVENT on master +call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590"); +let $slave_sql_errno= 1590; +let $show_slave_sql_error= 1; +source include/wait_for_slave_sql_error.inc; + +# The 4 should not be inserted into the table, since the incident log +# event should have stop the slave. +--echo **** On Slave **** +SELECT * FROM t1; + +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; +START SLAVE; +--sync_with_master + +# Now, we should have inserted the row into the table and the slave +# should be running. We should also have rotated to a new binary log. + +SELECT * FROM t1; +source include/check_slave_is_running.inc; + +connection master; +SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; +DROP TABLE t1; +--sync_slave_with_master +SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_init_slave_errors.test b/mysql-test/suite/rpl/t/rpl_init_slave_errors.test index 6f6ab7e8d7c9a..46673ea47649f 100644 --- a/mysql-test/suite/rpl/t/rpl_init_slave_errors.test +++ b/mysql-test/suite/rpl/t/rpl_init_slave_errors.test @@ -1 +1,96 @@ ---source include/rpl_init_slave_errors.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +###################################################################### +# Some errors that cause the slave SQL thread to stop are not shown in +# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error +# is only in the server's error log. +# +# Two failures and their respective reporting are verified: +# +# 1 - Failures during slave thread initialization +# 2 - Failures while processing queries passed through the init_slave +# option. +# +# In order to check the first type of failure, we inject a fault in the +# SQL/IO Threads through SET GLOBAL debug. +# +# To check the second type, we set @@global.init_slave to an invalid +# command thus preventing the initialization of the SQL Thread. +# +# Obs: +# 1 - Note that testing failures while initializing the relay log position +# is hard as the same function is called before the code reaches the point +# that we want to test. +# +# 2 - This test does not target failures that are reported while applying +# events such as duplicate keys, errors while reading the relay-log.bin*, +# etc. Such errors are already checked on other tests. +###################################################################### + +###################################################################### +# Configuring the Environment +###################################################################### +source include/have_debug.inc; +source include/have_log_bin.inc; +source include/master-slave.inc; + +connection slave; + +--disable_warnings +stop slave; +--enable_warnings +reset slave; + +###################################################################### +# Injecting faults in the threads' initialization +###################################################################### +connection slave; + +# Set debug flags on slave to force errors to occur +set @saved_dbug = @@global.debug_dbug; +SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init"; + +start slave; + +# +# slave is going to stop because of emulated failures +# but there won't be any crashes nor asserts hit. +# +# 1593 = ER_SLAVE_FATAL_ERROR +--let $slave_sql_errno= 1593 +--let $show_slave_sql_error= 1 +--source include/wait_for_slave_sql_error.inc + +call mtr.add_suppression("Failed during slave.* thread initialization"); + +set @@global.debug_dbug = @saved_dbug; + +###################################################################### +# Injecting faults in the init_slave option +###################################################################### +connection slave; + +reset slave; + +SET GLOBAL init_slave= "garbage"; + +start slave; +# 1064 = ER_PARSE_ERROR +--let $slave_sql_errno= 1064 +--let $show_slave_sql_error= 1 +--source include/wait_for_slave_sql_error.inc + +###################################################################### +# Clean up +###################################################################### +SET GLOBAL init_slave= ""; + +# Clean up Last_SQL_Error +--source include/stop_slave_io.inc +RESET SLAVE; +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_loaddata_local.inc b/mysql-test/suite/rpl/t/rpl_loaddata_local.test similarity index 100% rename from mysql-test/suite/rpl/include/rpl_loaddata_local.inc rename to mysql-test/suite/rpl/t/rpl_loaddata_local.test diff --git a/mysql-test/suite/rpl/t/rpl_loaddatalocal.test b/mysql-test/suite/rpl/t/rpl_loaddatalocal.test deleted file mode 100644 index 712041467ab1c..0000000000000 --- a/mysql-test/suite/rpl/t/rpl_loaddatalocal.test +++ /dev/null @@ -1 +0,0 @@ ---source include/rpl_loaddata_local.inc diff --git a/mysql-test/suite/rpl/t/rpl_loadfile.test b/mysql-test/suite/rpl/t/rpl_loadfile.test index 10fecf1f65354..9cd645306908c 100644 --- a/mysql-test/suite/rpl/t/rpl_loadfile.test +++ b/mysql-test/suite/rpl/t/rpl_loadfile.test @@ -1 +1,120 @@ ---source include/rpl_loadfile.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +############################################################################# +# Original Author: JBM # +# Original Date: Aug/18/2005 # +############################################################################# +# TEST: To test the LOAD_FILE() in rbr # +############################################################################# +# Change Author: JBM +# Change Date: 2006-01-16 +########## + +# Includes +-- source include/have_binlog_format_mixed_or_row.inc +-- source include/master-slave.inc + +-- source suite/rpl/include/rpl_loadfile.test + +# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE +# +# DESCRIPTION +# +# Problem: when using load_file string function and mixed binlogging format +# there was no switch to row based binlogging format. This leads +# to scenarios on which the slave replicates the statement and it +# will try to load the file from local file system, which in most +# likely it will not exist. +# +# Solution: +# Marking this function as unsafe for statement format, makes the +# statement using it to be logged in row based format. As such, data +# replicated from the master, becomes the content of the loaded file. +# Consequently, the slave receives the necessary data to complete +# the load_file instruction correctly. +# +# IMPLEMENTATION +# +# The test is implemented as follows: +# +# On Master, +# i) write to file the desired content. +# ii) create table and stored procedure with load_file +# iii) stop slave +# iii) execute load_file +# iv) remove file +# +# On Slave, +# v) start slave +# vi) sync it with master so that it gets the updates from binlog (which +# should have bin logged in row format). +# +# If the the binlog format does not change to row, then the assertion +# done in the following step fails. This happens because tables differ +# since the file does not exist anymore, meaning that when slave +# attempts to execute LOAD_FILE statement it inserts NULL on table +# instead of the same contents that the master loaded when it executed +# the procedure (which was executed when file existed). +# +# vii) assert that the contents of master and slave +# table are the same + +--source include/rpl_reset.inc + +connection master; +let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data; + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval SELECT repeat('x',20) INTO OUTFILE '$file' + +disable_warnings; +DROP TABLE IF EXISTS t1; +enable_warnings; + +CREATE TABLE t1 (t text); +DELIMITER |; +CREATE PROCEDURE p(file varchar(4096)) + BEGIN + INSERT INTO t1 VALUES (LOAD_FILE(file)); + END| +DELIMITER ;| + +# stop slave before issuing the load_file on master +connection slave; +source include/stop_slave.inc; + +connection master; + +# test: check that logging falls back to rbr. +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval CALL p('$file') + +# test: remove the file from the filesystem and assert that slave still +# gets the loaded file +remove_file $file; + +# now that the file is removed it is safe (regarding what we want to test) +# to start slave +connection slave; +source include/start_slave.inc; + +connection master; +sync_slave_with_master; + +# assertion: assert that the slave got the updates even +# if the file was removed before the slave started, +# meaning that contents were indeed transfered +# through binlog (in row format) +let $diff_tables= master:t1, slave:t1; +source include/diff_tables.inc; + +# CLEAN UP +--connection master +DROP TABLE t1; +DROP PROCEDURE p; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_packet.test b/mysql-test/suite/rpl/t/rpl_packet.test index 1bf99c2366b65..cbde486bcbbda 100644 --- a/mysql-test/suite/rpl/t/rpl_packet.test +++ b/mysql-test/suite/rpl/t/rpl_packet.test @@ -1 +1,184 @@ ---source include/rpl_packet.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# ==== Purpose ==== +# +# Check replication protocol packet size handling +# +# ==== Related bugs ==== +# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave +# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits +# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors +# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET + +# max-out size db name +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); +call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); +let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; +disable_warnings; +eval drop database if exists $db; +enable_warnings; +eval create database $db; + +connection master; +let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`; +let $old_net_buffer_length= `SELECT @@global.net_buffer_length`; +let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`; +SET @@global.max_allowed_packet=1024; +SET @@global.net_buffer_length=1024; + +sync_slave_with_master; +# Restart slave for setting to take effect +source include/stop_slave.inc; +source include/start_slave.inc; + +# Reconnect to master for new setting to take effect +disconnect master; + +# alas, can't use eval here; if db name changed apply the change here +connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________); + +connection master; +select @@net_buffer_length, @@max_allowed_packet; + +create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM; + +INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023'); +sync_slave_with_master; + +eval select count(*) from `$db`.`t1` /* must be 1 */; + +SHOW STATUS LIKE 'Slave_running'; +select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING'; +connection master; +eval drop database $db; +sync_slave_with_master; + +# +# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits +# +# Check that a situation when the size of event on the master is greater than +# max_allowed_packet on the slave does not lead to infinite re-transmits. + +connection master; + +# Change the max packet size on master + +SET @@global.max_allowed_packet=4096; +SET @@global.net_buffer_length=4096; + +# Restart slave for new setting to take effect +connection slave; +source include/stop_slave.inc; +source include/start_slave.inc; + +# Reconnect to master for new setting to take effect +disconnect master; +connect (master, localhost, root); +connection master; + +CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM; + +sync_slave_with_master; + +connection master; + +INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048'); + + +# +# Bug#42914: The slave I/O thread must stop after trying to read the above +# event, However there is no Last_IO_Error report. +# + +# The slave I/O thread must stop after trying to read the above event +connection slave; +# 1153 = ER_NET_PACKET_TOO_LARGE +--let $slave_io_errno= 1153 +--let $show_slave_io_error= 1 +--source include/wait_for_slave_io_error.inc + +# TODO: this is needed because of BUG#55790. Remove once that is fixed. +--source include/stop_slave_sql.inc + +# +# Bug#42914: On the master, if a binary log event is larger than +# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG +# is sent to a slave when it requests a dump from the master, thus leading the +# I/O thread to stop. However, there is no Last_IO_Error reported. +# + +--let $rpl_only_running_threads= 1 +--source include/rpl_reset.inc +--connection master +DROP TABLE t1; +--sync_slave_with_master + + +connection master; +CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM; +sync_slave_with_master; + +connection master; +INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet)); + +connection slave; +# The slave I/O thread must stop after receiving +# 1153 = ER_NET_PACKET_TOO_LARGE +--let $slave_io_errno= 1153 +--let $show_slave_io_error= 1 +--source include/wait_for_slave_io_error.inc + +# Remove the bad binlog and clear error status on slave. +STOP SLAVE; +RESET SLAVE; +--connection master +RESET MASTER; + + +# +# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET +# +# In BUG#55322, @@session.max_allowed_packet increased each time SHOW +# BINLOG EVENTS was issued. To verify that this bug is fixed, we +# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet +# never changes. We turn off the result log because we don't care +# about the contents of the binlog. + +--disable_result_log +SET @max_allowed_packet_0= @@session.max_allowed_packet; +SHOW BINLOG EVENTS; +SET @max_allowed_packet_1= @@session.max_allowed_packet; +SHOW BINLOG EVENTS; +SET @max_allowed_packet_2= @@session.max_allowed_packet; +--enable_result_log +if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`) +{ + --echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS + --source include/show_rpl_debug_info.inc + SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2; + --die @max_allowed_packet changed after executing SHOW BINLOG EVENTS +} + + +--echo ==== clean up ==== +connection master; +DROP TABLE t1; +eval SET @@global.max_allowed_packet= $old_max_allowed_packet; +eval SET @@global.net_buffer_length= $old_net_buffer_length; +eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet; +# slave is stopped +connection slave; +DROP TABLE t1; + +# Clear Last_IO_Error +RESET SLAVE; + +--source include/rpl_end.inc +# End of tests diff --git a/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test b/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test index 90f09a76546db..493385f1ae304 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test @@ -1 +1,115 @@ ---source include/rpl_parallel_ignored_errors.inc +# ==== Purpose ==== +# +# Test verifies that, in parallel replication, transaction failure notification +# is propagated to all the workers. Workers should abort the execution of +# transaction event groups, whose event positions are higher than the failing +# transaction group. +# +# ==== Implementation ==== +# +# Steps: +# 0 - Create a table t1 on master which has a primary key. Enable parallel +# replication on slave with slave_parallel_mode='optimistic' and +# slave_parallel_threads=3. +# 1 - On slave start a transaction and execute a local INSERT statement +# which will insert value 32. This is done to block the INSERT coming +# from master. +# 2 - On master execute an INSERT statement with value 32, so that it is +# blocked on slave. +# 3 - On slave enable a debug sync point such that it holds the worker thread +# execution as soon as work is scheduled to it. +# 4 - INSERT value 33 on master. It will be held on slave by other worker +# thread due to debug simulation. +# 5 - INSERT value 34 on master. +# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for +# its prior transactions to commit. +# 7 - Commit the local INSERT 32 on slave server so that first worker will +# error out. +# 8 - Now send a continue signal to second worker processing 33. It should +# wakeup and propagate the error to INSERT 34. +# 9 - Upon slave stop due to error, check that no rows are found after the +# failed INSERT 32. +# +# ==== References ==== +# +# MDEV-20645: Replication consistency is broken as workers miss the error +# notification from an earlier failed group. +# + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--enable_connect_log +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET @old_debug= @@GLOBAL.debug_dbug; +SET GLOBAL slave_parallel_mode='optimistic'; +SET GLOBAL slave_parallel_threads= 3; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB; +# MDEV-515 takes X-lock on the table for the first insert. +# So concurrent insert won't happen on the table +INSERT INTO t1 VALUES(1); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +BEGIN; +INSERT INTO t1 VALUES (32); + +--connection server_1 +INSERT INTO t1 VALUES (32); + +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)" +--source include/wait_condition.inc +SET GLOBAL debug_dbug="+d,hold_worker_on_schedule"; +SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker"; + +--connection server_1 +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (33); + +--connection server_2 +SET debug_sync='now WAIT_FOR reached_pause'; + +--connection server_1 +INSERT INTO t1 VALUES (34); + +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit" +--source include/wait_condition.inc +--connection con_temp2 +COMMIT; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +--let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32 +--let $assert_text= table t1 should have zero rows where a>32 +--source include/assert.inc +SELECT * FROM t1 WHERE a>32; +DELETE FROM t1 WHERE a=32; + +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL debug_dbug=@old_debug; +SET DEBUG_SYNC= 'RESET'; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; +--disable_connect_log +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_optimistic_error_stop.test b/mysql-test/suite/rpl/t/rpl_parallel_optimistic_error_stop.test new file mode 100644 index 0000000000000..27f38d47bdbfb --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_optimistic_error_stop.test @@ -0,0 +1,180 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit"); +call mtr.add_suppression("Slave: Duplicate entry '99'"); + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1,1); # hit a dup entry on slave +INSERT INTO t1 VALUES(2,1); # races to "win" the last exit +INSERT INTO t1 VALUES(3,1); +INSERT INTO t1 VALUES(4,1); # make W3 race over W1 +--save_master_pos + +--connection server_2 +--sync_with_master +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET @old_debug_dbug = @@global.debug_dbug; +# In a group of W1,W2,W3 of the same batch W2 simulates slowness. +SET @@global.debug_dbug = "d,hold_worker2_favor_worker3"; +SET GLOBAL slave_parallel_threads=4; +CHANGE MASTER TO master_use_gtid=slave_pos; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='optimistic'; + +# MDEV-30780 optimistic parallel slave hangs after hit an error +# Test workers hang scenario to prove it's no more neither +# out-of-order access to the active gco list. +# +# The test provides how to reproduce on the OLD version, false by default. +# That branch approximates the original hang with an assert that +# confirms the OLD version indeed could access already reclaimed gco. +--let $old_version_regression=0 + + +--connection server_1 + +# Let W1,W2,W3,W4 parallel workers that are going to execute +# the following transaction. +# W1 holds on with the 1st statement +# then crashes W3 with the 2nd into retry, +# finally hits with the 3rd a dup entry, on slave. +SET @@gtid_seq_no = 2001; +BEGIN; + UPDATE t1 SET b = 11 WHERE a = 4; + UPDATE t1 SET b = 11 WHERE a = 3; + UPDATE t1 SET a = 99 WHERE a = 1; +COMMIT; +# In the buggy version W2 races to "win" the exit last (of W1..3) +# and by that to access last a gco struct, garbage-collected. +UPDATE t1 SET b = 2 WHERE a = 2; +# W3 garbage-collects the gco struct in the buggy version. +UPDATE t1 SET b = 3 WHERE a = 3; +# W4 resides in following "singleton" batch to a W2 replacement +# in the buggy version to allow W3 reclaim the batch's gco. +DROP TABLE IF EXISTS phantom_1; + +--source include/save_master_gtid.inc + +--connect (slave_local_0, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + UPDATE t1 set b = 11 where a = 4; +--connect (slave_local_1, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + INSERT INTO t1 VALUES (99, 11); + +--connect (slave_local_2, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + UPDATE t1 SET b = 12 WHERE a = 2; + +--connect (slave_local_3, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + UPDATE t1 SET b = 13 WHERE a = 3; + +--connection server_2 +--source include/start_slave.inc + +--echo # W4 is waiting to start its DROP + +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit%" +--source include/wait_condition.inc + +--connection slave_local_3 +# make W3 to set E.cc <- 1 + rollback; +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%" +--source include/wait_condition.inc + +--connection slave_local_0 +# make W3 into retry and delay it to let W1 hit a dupicate error first, +# see 'commit' by slave_local_1. + rollback; +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "debug sync point: now" +--source include/wait_condition.inc +SELECT count(*) = 0 as "W3 undid its commit state" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%"; + + +--connection slave_local_2 + rollback; +# wait for W2 to start committing E.cc <- 2 +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit" +--source include/wait_condition.inc + +--connection slave_local_1 + +# W1 errors out +# A. to alert W3 +# B. W3 will *not* wake up W4 in the fixed version, having to wait for W2 demise. +# C. W2 will notify W3 that releases W4 as it would do in normal cases. +commit; + +if (!$old_version_regression) +{ +# A. In the fixed version show-processlist W4 is still in the ordered waiting +SELECT COUNT(*) = 1 as "W4 remains with the same status" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit%"; +--let $status= query_get_value("show slave status", Slave_SQL_Running, 1) +--echo # Slave_SQL_Running YES = $status + +# B. In the fixed version W3 is waiting for W2,... +--let $wait_condition= SELECT count(*) = 1 as "W4 is waiting" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%" +--source include/wait_condition.inc +--echo # while W2 is held back ... +--let $wait_condition= SELECT count(*) = 1 as "W2 simulates slowness" FROM information_schema.processlist WHERE state LIKE "debug sync point: now" +--source include/wait_condition.inc + +# C. # ...until NOW. +SET DEBUG_SYNC = 'now SIGNAL cont_worker2'; + +} + +# To reproduce the hang on the OLD version ... +if ($old_version_regression) +{ + # replace the actual fixes block with checking W3,W4 have actually committed, + # followed by signaling to W2 like on behalf of W4 which would end up in the hang. + --let $wait_condition= SELECT COUNT(*) = 0 as "W4 has moved on" FROM information_schema.processlist WHERE state like "Waiting for prior transaction to start commit" + --source include/wait_condition.inc + --let $wait_condition= SELECT count(*) = 0 as "W3 does not wait on W2" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%" +--source include/wait_condition.inc + + --let $wait_condition= SELECT count(*) = 1 as "W2 simulates slowness" FROM information_schema.processlist WHERE state LIKE "debug sync point: now" + --source include/wait_condition.inc + + # Like above, but signaling is done after W4 is done to violate the commit order + # that must fire a debug assert. + SET DEBUG_SYNC = 'now SIGNAL cont_worker2'; +} + +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +# Restore the slave data and resume with replication +DELETE FROM t1 WHERE a=99; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +# +# Clean up. +# +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET @@global.debug_dbug = @old_debug_dbug; +SET debug_sync = RESET; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test b/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test index 8c8892d53707f..cddc9286bd28d 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test @@ -1 +1,38 @@ ---source include/rpl_parallel_show_binlog_events_purge_logs.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER +# +# The function mysql_show_binlog_events has a local stack variable +# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however +# this variable goes out of scope and is destroyed before clean +# thd->current_linfo. +# +# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure +# that with the fix local variable linfo is valid along all +# mysql_show_binlog_events function scope. +# +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection slave +SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end'; +--send SHOW BINLOG EVENTS + +--connection slave1 +SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events'; +FLUSH LOGS; +SET DEBUG_SYNC= 'now SIGNAL end'; + +--connection slave +--disable_result_log +--reap +--enable_result_log +SET DEBUG_SYNC= 'RESET'; + +--connection master +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_relayrotate.test b/mysql-test/suite/rpl/t/rpl_relayrotate.test index 720739e14c079..4de554d314359 100644 --- a/mysql-test/suite/rpl/t/rpl_relayrotate.test +++ b/mysql-test/suite/rpl/t/rpl_relayrotate.test @@ -1 +1,18 @@ ---source include/rpl_relayrotate.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +####################################################### +# Wrapper for rpl_relayrotate.test to allow multi # +# Engines to reuse test code. By JBM 2006-02-15 # +####################################################### +-- source include/have_innodb.inc +# Slow test, don't run during staging part +-- source include/not_staging.inc +-- source include/master-slave.inc + +let $engine_type=innodb; +-- source suite/rpl/include/rpl_relayrotate.test +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync.test b/mysql-test/suite/rpl/t/rpl_semi_sync.test index 5c17bcb2344d1..720ec05935056 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync.test @@ -1 +1,532 @@ ---source include/rpl_semi_sync.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +source include/not_embedded.inc; +source include/have_innodb.inc; +source include/master-slave.inc; + +let $engine_type= InnoDB; + +# Suppress warnings that might be generated during the test +connection master; +call mtr.add_suppression("Timeout waiting for reply of binlog"); +call mtr.add_suppression("Read semi-sync reply"); +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); +call mtr.add_suppression("mysqld: Got an error reading communication packets"); +connection slave; +# While 'Current_Pos' exists as an option for Using_Gtd, keeping these +# events in the binlog will update gtid_binlog_pos, and the later calls to +# set `@@global.gtid_slave_pos= ""` will provide warning messages with +# inconsistent GTID values because the seq_nos are non-deterministic with +# the masters events coming in concurrently +set sql_log_bin=0; +call mtr.add_suppression("Master server does not support semi-sync"); +call mtr.add_suppression("Semi-sync slave .* reply"); +call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group"); +set sql_log_bin=1; +connection master; + +# wait for dying connections (if any) to disappear +let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed'; +--source include/wait_condition.inc + +# After fix of BUG#45848, semi-sync slave should not create any extra +# connections on master, save the count of connections before start +# semi-sync slave for comparison below. +let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); + +--echo # +--echo # Uninstall semi-sync plugins on master and slave +--echo # +connection slave; +source include/stop_slave.inc; +--source include/reset_slave.inc +set global rpl_semi_sync_master_enabled= 0; +set global rpl_semi_sync_slave_enabled= 0; + +connection master; +reset master; +set global rpl_semi_sync_master_enabled= 0; +set global rpl_semi_sync_slave_enabled= 0; + +--echo # +--echo # Main test of semi-sync replication start here +--echo # + +connection master; + +set global rpl_semi_sync_master_timeout= 60000; # 60s + +echo [ default state of semi-sync on master should be OFF ]; +show variables like 'rpl_semi_sync_master_enabled'; + +echo [ enable semi-sync on master ]; +set global rpl_semi_sync_master_enabled = 1; +show variables like 'rpl_semi_sync_master_enabled'; + +echo [ status of semi-sync on master should be ON even without any semi-sync slaves ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed +--echo # BUG#45673 Semisynch reports correct operation even if no slave is connected +--echo # + +# BUG#45672 When semi-sync is enabled on master, it would allocate +# transaction node even without semi-sync slave connected, and would +# finally result in transaction node allocation error. +# +# Semi-sync master will pre-allocate 'max_connections' transaction +# nodes, so here we do more than that much transactions to check if it +# will fail or not. +# select @@global.max_connections + 1; +let $i= `select @@global.max_connections + 1`; +disable_query_log; +eval create table t1 (a int) engine=$engine_type; +while ($i) +{ + eval insert into t1 values ($i); + dec $i; +} +drop table t1; +enable_query_log; + +# BUG#45673 +echo [ status of semi-sync on master should be OFF ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +# reset master to make sure the following test will start with a clean environment +reset master; + +connection slave; + +echo [ default state of semi-sync on slave should be OFF ]; +show variables like 'rpl_semi_sync_slave_enabled'; + +echo [ enable semi-sync on slave ]; +set global rpl_semi_sync_slave_enabled = 1; +show variables like 'rpl_semi_sync_slave_enabled'; +source include/start_slave.inc; + +connection master; + +# NOTE: Rpl_semi_sync_master_client will only be updated when +# semi-sync slave has started binlog dump request +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; + +echo [ initial master state after the semi-sync slave connected ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +replace_result $engine_type ENGINE_TYPE; +eval create table t1(a int) engine = $engine_type; + +echo [ master state after CREATE TABLE statement ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +# After fix of BUG#45848, semi-sync slave should not create any extra +# connections on master. +let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); +replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE; +eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0'; + +echo [ insert records to table ]; +insert t1 values (10); +insert t1 values (9); +insert t1 values (8); +insert t1 values (7); +insert t1 values (6); +insert t1 values (5); +insert t1 values (4); +insert t1 values (3); +insert t1 values (2); +insert t1 values (1); + +echo [ master status after inserts ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +sync_slave_with_master; + +echo [ slave status after replicated inserts ]; +show status like 'Rpl_semi_sync_slave_status'; + +select count(distinct a) from t1; +select min(a) from t1; +select max(a) from t1; + +--echo +--echo # BUG#50157 +--echo # semi-sync replication crashes when replicating a transaction which +--echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ; + +connection master; +SET SESSION AUTOCOMMIT= 0; +CREATE TABLE t2(c1 INT) ENGINE=innodb; +sync_slave_with_master; + +connection master; +BEGIN; +--echo +--echo # Even though it is in a transaction, this statement is binlogged into binlog +--echo # file immediately. +--disable_warnings +CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1; +--enable_warnings +--echo +--echo # These statements will not be binlogged until the transaction is committed +INSERT INTO t2 VALUES(11); +INSERT INTO t2 VALUES(22); +COMMIT; + +DROP TABLE t2, t3; +SET SESSION AUTOCOMMIT= 1; +sync_slave_with_master; + + +--echo # +--echo # Test semi-sync master will switch OFF after one transaction +--echo # timeout waiting for slave reply. +--echo # +connection slave; +source include/stop_slave.inc; + +connection master; +--source include/kill_binlog_dump_threads.inc +set global rpl_semi_sync_master_timeout= 5000; + +# The first semi-sync check should be on because after slave stop, +# there are no transactions on the master. +echo [ master status should be ON ]; + +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; + +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 0; +source include/wait_for_status_var.inc; + +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +echo [ semi-sync replication of these transactions will fail ]; +insert into t1 values (500); + +# Wait for the semi-sync replication of this transaction to timeout +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= OFF; +source include/wait_for_status_var.inc; + +# The second semi-sync check should be off because one transaction +# times out during waiting. +echo [ master status should be OFF ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +# Semi-sync status on master is now OFF, so all these transactions +# will be replicated asynchronously. +delete from t1 where a=10; +delete from t1 where a=9; +delete from t1 where a=8; +delete from t1 where a=7; +delete from t1 where a=6; +delete from t1 where a=5; +delete from t1 where a=4; +delete from t1 where a=3; +delete from t1 where a=2; +delete from t1 where a=1; + +insert into t1 values (100); + +echo [ master status should be OFF ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # Test semi-sync status on master will be ON again when slave catches up +--echo # + +# Save the master position for later use. +save_master_pos; + +connection slave; + +echo [ slave status should be OFF ]; +show status like 'Rpl_semi_sync_slave_status'; +source include/start_slave.inc; +sync_with_master; + +echo [ slave status should be ON ]; +show status like 'Rpl_semi_sync_slave_status'; + +select count(distinct a) from t1; +select min(a) from t1; +select max(a) from t1; + +connection master; + +# The master semi-sync status should be on again after slave catches up. +echo [ master status should be ON again after slave catches up ]; + +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; +show status like 'Rpl_semi_sync_master_clients'; + +--echo # +--echo # Test disable/enable master semi-sync on the fly. +--echo # + +drop table t1; +sync_slave_with_master; + +source include/stop_slave.inc; + +--echo # +--echo # Flush status +--echo # +connection master; +echo [ Semi-sync master status variables before FLUSH STATUS ]; +SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; +SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; +# Do not write the FLUSH STATUS to binlog, to make sure we'll get a +# clean status after this. +FLUSH NO_WRITE_TO_BINLOG STATUS; +echo [ Semi-sync master status variables after FLUSH STATUS ]; +SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; +SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; + +connection master; + +source include/show_master_logs.inc; +show variables like 'rpl_semi_sync_master_enabled'; + +echo [ disable semi-sync on the fly ]; +set global rpl_semi_sync_master_enabled=0; +show variables like 'rpl_semi_sync_master_enabled'; +show status like 'Rpl_semi_sync_master_status'; + +echo [ enable semi-sync on the fly ]; +set global rpl_semi_sync_master_enabled=1; +show variables like 'rpl_semi_sync_master_enabled'; +show status like 'Rpl_semi_sync_master_status'; + +--echo # +--echo # Test RESET MASTER/SLAVE +--echo # + +connection slave; + +source include/start_slave.inc; + +connection master; + +replace_result $engine_type ENGINE_TYPE; +eval create table t1 (a int) engine = $engine_type; +drop table t1; + +sync_slave_with_master; + +echo [ test reset master ]; +connection master; + +reset master; + +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +connection slave; + +source include/stop_slave.inc; +--source include/reset_slave.inc + +# Kill the dump thread on master for previous slave connection and +--source include/kill_binlog_dump_threads.inc + +connection slave; +source include/start_slave.inc; + +connection master; + +# Wait for dump thread to start, Rpl_semi_sync_master_clients will be +# 1 after dump thread started. +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; + +replace_result $engine_type ENGINE_TYPE; +eval create table t1 (a int) engine = $engine_type; +insert into t1 values (1); +insert into t1 values (2), (3); + +sync_slave_with_master; + +select * from t1; + +connection master; + +echo [ master semi-sync status should be ON ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # Start semi-sync replication without SUPER privilege +--echo # +connection slave; +source include/stop_slave.inc; +--source include/reset_slave.inc +connection master; +reset master; + +# Kill the dump thread on master for previous slave connection and wait for it to exit +--source include/kill_binlog_dump_threads.inc + +# Do not binlog the following statement because it will generate +# different events for ROW and STATEMENT format +set sql_log_bin=0; +grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; +flush privileges; +set sql_log_bin=1; +connection slave; +grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; +flush privileges; +change master to master_user='rpl',master_password='rpl_password'; +source include/start_slave.inc; +show status like 'Rpl_semi_sync_slave_status'; +connection master; + +# Wait for the semi-sync binlog dump thread to start +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; +echo [ master semi-sync should be ON ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; +insert into t1 values (4); +insert into t1 values (5); +echo [ master semi-sync should be ON ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # Test semi-sync slave connect to non-semi-sync master +--echo # + +# Disable semi-sync on master +connection slave; +source include/stop_slave.inc; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; + +connection master; + +# Kill the dump thread on master for previous slave connection and wait for it to exit +--source include/kill_binlog_dump_threads.inc + +echo [ Semi-sync status on master should be ON ]; +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 0; +source include/wait_for_status_var.inc; +show status like 'Rpl_semi_sync_master_status'; +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; +set global rpl_semi_sync_master_enabled= 0; + +connection slave; +SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; +source include/start_slave.inc; +connection master; +insert into t1 values (8); +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; +echo [ master semi-sync clients should be 1, status should be OFF ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +sync_slave_with_master; +show status like 'Rpl_semi_sync_slave_status'; + +# Uninstall semi-sync plugin on master +connection slave; +source include/stop_slave.inc; +connection master; +set global rpl_semi_sync_master_enabled= 0; + +connection slave; +SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; +source include/start_slave.inc; + +connection master; +insert into t1 values (10); +sync_slave_with_master; + +--echo # +--echo # Test non-semi-sync slave connect to semi-sync master +--echo # + +connection master; +set global rpl_semi_sync_master_timeout= 5000; # 5s +set global rpl_semi_sync_master_enabled= 1; + +connection slave; +source include/stop_slave.inc; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; + +echo [ uninstall semi-sync slave plugin ]; +set global rpl_semi_sync_slave_enabled= 0; + +echo [ reinstall semi-sync slave plugin and disable semi-sync ]; +SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; +source include/start_slave.inc; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; + +--echo # +--echo # Clean up +--echo # + +connection slave; +source include/stop_slave.inc; +set global rpl_semi_sync_slave_enabled= 0; + +connection master; +set global rpl_semi_sync_master_enabled= 0; + +connection slave; +change master to master_user='root',master_password=''; +source include/start_slave.inc; + +connection master; +drop table t1; +sync_slave_with_master; + +connection master; +drop user rpl@127.0.0.1; +flush privileges; +set global rpl_semi_sync_master_timeout= default; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test b/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test index dcff4030fdbb5..5eae91a55f25b 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test @@ -23,8 +23,6 @@ SET @@global.rpl_semi_sync_master_wait_no_slave = 1; --echo # It's okay to see "Killed" but we should not see "Timeout" in the log. call mtr.add_suppression("Killed waiting for reply of binlog"); -call mtr.add_suppression("Run function 'after_commit' in plugin 'rpl_semi_sync_master' failed"); -call mtr.add_suppression("Run function 'after_sync' in plugin 'rpl_semi_sync_master' failed"); --echo # --echo # Test wait point = AFTER_COMMIT diff --git a/mysql-test/suite/rpl/t/rpl_skip_replication.test b/mysql-test/suite/rpl/t/rpl_skip_replication.test index 66fdbb8915a34..d1044c4bf51ba 100644 --- a/mysql-test/suite/rpl/t/rpl_skip_replication.test +++ b/mysql-test/suite/rpl/t/rpl_skip_replication.test @@ -1 +1,408 @@ ---source include/rpl_skip_replication.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it. +# +# Usage: +# +# --let $use_remote_mysqlbinlog= 1 # optional +# --source suite/rpl/include/rpl_skip_replication.inc +# +# The script uses MYSQLBINLOG to verify certain results. +# By default, it uses binary logs directly. If it is undesirable, +# this behavior can be overridden by setting $use_remote_binlog +# as shown above. +# The value will be unset after every execution of the script, +# so if it is needed, it should be set explicitly before each call. +# + +--source include/have_innodb.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO MASTER_USE_GTID=NO; +--source include/start_slave.inc +--connection master + +connection slave; +# Test that SUPER is required to change @@replicate_events_marked_for_skip. +CREATE USER 'nonsuperuser'@'127.0.0.1'; +GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE, + SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1'; +connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,); +connection nonpriv; +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +disconnect nonpriv; +connection slave; +DROP USER'nonsuperuser'@'127.0.0.1'; + +SELECT @@global.replicate_events_marked_for_skip; +--error ER_SLAVE_MUST_STOP +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +SELECT @@global.replicate_events_marked_for_skip; +STOP SLAVE; +--error ER_GLOBAL_VARIABLE +SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER; +SELECT @@global.replicate_events_marked_for_skip; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +SELECT @@global.replicate_events_marked_for_skip; +START SLAVE; + +connection master; +SELECT @@skip_replication; +--error ER_LOCAL_VARIABLE +SET GLOBAL skip_replication=1; +SELECT @@skip_replication; + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb; +INSERT INTO t1(a) VALUES (1); +INSERT INTO t2(a) VALUES (1); + + +# Test that master-side filtering works. +SET skip_replication=1; + +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +INSERT INTO t1(a) VALUES (2); +INSERT INTO t2(a) VALUES (2); + +# Inject a rotate event in the binlog stream sent to slave (otherwise we will +# fail sync_slave_with_master as the last event on the master is not present +# on the slave). +FLUSH NO_WRITE_TO_BINLOG LOGS; + +sync_slave_with_master; +connection slave; +SHOW TABLES; +SELECT * FROM t1; +SELECT * FROM t2; + +connection master; +DROP TABLE t3; + +FLUSH NO_WRITE_TO_BINLOG LOGS; +sync_slave_with_master; + + +# Test that slave-side filtering works. +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +START SLAVE; + +connection master; +SET skip_replication=1; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +INSERT INTO t1(a) VALUES (3); +INSERT INTO t2(a) VALUES (3); + +# Inject a rotate event in the binlog stream sent to slave (otherwise we will +# fail sync_slave_with_master as the last event on the master is not present +# on the slave). +FLUSH NO_WRITE_TO_BINLOG LOGS; + +sync_slave_with_master; +connection slave; +SHOW TABLES; +SELECT * FROM t1; +SELECT * FROM t2; + +connection master; +DROP TABLE t3; + +FLUSH NO_WRITE_TO_BINLOG LOGS; +sync_slave_with_master; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; + + +# Test that events with @@skip_replication=1 are not filtered when filtering is +# not set on slave. +connection master; +SET skip_replication=1; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +INSERT INTO t3(a) VALUES(2); +sync_slave_with_master; +connection slave; +SELECT * FROM t3; +connection master; +DROP TABLE t3; + +# +# Test that the slave will preserve the @@skip_replication flag in its +# own binlog. +# + +TRUNCATE t1; +sync_slave_with_master; +connection slave; +RESET MASTER; + +connection master; +SET skip_replication=0; +INSERT INTO t1 VALUES (1,0); +SET skip_replication=1; +INSERT INTO t1 VALUES (2,0); +SET skip_replication=0; +INSERT INTO t1 VALUES (3,0); + +sync_slave_with_master; +connection slave; +# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have +# applied all events. +SELECT * FROM t1 ORDER by a; + +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +let $SLAVE_DATADIR= `select @@datadir`; + +connection master; +TRUNCATE t1; + +# Now apply the slave binlog to the master, to check that both the slave +# and mysqlbinlog will preserve the @@skip_replication flag. + +--let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001 +if ($use_remote_mysqlbinlog) +{ + --let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001 + --let $use_remote_mysqlbinlog= 0 +} +--exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog +--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog + +# The master should have all three events. +SELECT * FROM t1 ORDER by a; + +# The slave should be missing event 2, which is marked with the +# @@skip_replication flag. + +connection slave; +START SLAVE; + +connection master; +sync_slave_with_master; + +connection slave; +SELECT * FROM t1 ORDER by a; + +# +# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication +# events. +# + +connection master; +TRUNCATE t1; + +sync_slave_with_master; +connection slave; +STOP SLAVE; +# We will skip two INSERTs (in addition to any skipped due to +# @@skip_replication). Since from 5.5 every statement is wrapped in +# BEGIN ... END, we need to skip 6 events for this. +SET GLOBAL sql_slave_skip_counter=6; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +START SLAVE; + +connection master; +# Need to fix @@binlog_format to get consistent event count. +SET @old_binlog_format= @@binlog_format; +SET binlog_format= statement; +SET skip_replication=0; +INSERT INTO t1 VALUES (1,5); +SET skip_replication=1; +INSERT INTO t1 VALUES (2,5); +SET skip_replication=0; +INSERT INTO t1 VALUES (3,5); +INSERT INTO t1 VALUES (4,5); +SET binlog_format= @old_binlog_format; + +sync_slave_with_master; +connection slave; + +# The slave should have skipped the first three inserts (number 1 and 3 due +# to @@sql_slave_skip_counter=2, number 2 due to +# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4 +# should be left. +SELECT * FROM t1; + + +# +# Check that BINLOG statement preserves the @@skip_replication flag. +# +connection slave; +# Need row @@binlog_format for BINLOG statements containing row events. +--source include/stop_slave.inc +SET @old_slave_binlog_format= @@global.binlog_format; +SET GLOBAL binlog_format= row; +--source include/start_slave.inc + +connection master; +TRUNCATE t1; + +SET @old_binlog_format= @@binlog_format; +SET binlog_format= row; +# Format description log event. +BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAA371saA=='; +# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1 +BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC +wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA='; +# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0 +BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC +wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA='; +SET binlog_format= @old_binlog_format; + +SELECT * FROM t1 ORDER BY a; +sync_slave_with_master; +connection slave; +# Slave should have only the second insert, the first should be ignored due to +# the @@skip_replication flag. +SELECT * FROM t1 ORDER by a; + +--source include/stop_slave.inc +SET GLOBAL binlog_format= @old_slave_binlog_format; +--source include/start_slave.inc + + +# Test that it is not possible to change @@skip_replication inside a +# transaction or statement, thereby replicating only parts of statements +# or transactions. +connection master; +SET skip_replication=0; + +BEGIN; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=0; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=1; +ROLLBACK; +SET skip_replication=1; +BEGIN; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=0; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=1; +COMMIT; +SET autocommit=0; +INSERT INTO t2(a) VALUES(100); +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=1; +ROLLBACK; +SET autocommit=1; + +SET skip_replication=1; +--delimiter | +CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END| +CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END| +CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END| +--delimiter ; +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SELECT foo(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SELECT baz(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET @a= foo(1); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET @a= baz(1); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +UPDATE t2 SET b=foo(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +UPDATE t2 SET b=baz(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +INSERT INTO t1 VALUES (101, foo(1)); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +INSERT INTO t1 VALUES (101, baz(0)); +SELECT @@skip_replication; +CALL bar(0); +SELECT @@skip_replication; +CALL bar(1); +SELECT @@skip_replication; +DROP FUNCTION foo; +DROP PROCEDURE bar; +DROP FUNCTION baz; + + +# Test that master-side filtering happens on the master side, and that +# slave-side filtering happens on the slave. + +# First test that events do not reach the slave when master-side filtering +# is configured. Do this by replicating first with only the IO thread running +# and master-side filtering; then change to no filtering and start the SQL +# thread. This should still skip the events, as master-side filtering +# means the events never reached the slave. +connection master; +SET skip_replication= 0; +TRUNCATE t1; +sync_slave_with_master; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +START SLAVE IO_THREAD; +connection master; +SET skip_replication= 1; +INSERT INTO t1(a) VALUES (1); +SET skip_replication= 0; +INSERT INTO t1(a) VALUES (2); +--source include/save_master_pos.inc +connection slave; +--source include/sync_io_with_master.inc +STOP SLAVE IO_THREAD; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; +connection master; +sync_slave_with_master; +connection slave; +# Now only the second insert of (2) should be visible, as the first was +# filtered on the master, so even though the SQL thread ran without skipping +# events, it will never see the event in the first place. +SELECT * FROM t1; + +# Now tests that when slave-side filtering is configured, events _do_ reach +# the slave. +connection master; +SET skip_replication= 0; +TRUNCATE t1; +sync_slave_with_master; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +START SLAVE IO_THREAD; +connection master; +SET skip_replication= 1; +INSERT INTO t1(a) VALUES (1); +SET skip_replication= 0; +INSERT INTO t1(a) VALUES (2); +--source include/save_master_pos.inc +connection slave; +--source include/sync_io_with_master.inc +STOP SLAVE IO_THREAD; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; +connection master; +sync_slave_with_master; +connection slave; +# Now both inserts should be visible. Since filtering was configured to be +# slave-side, the event is in the relay log, and when the SQL thread ran we +# had disabled filtering again. +SELECT * FROM t1 ORDER BY a; + + +# Clean up. +connection master; +SET skip_replication=0; +DROP TABLE t1,t2; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_special_charset.test b/mysql-test/suite/rpl/t/rpl_special_charset.test index fa19a17b1e4f2..641aa483d32b7 100644 --- a/mysql-test/suite/rpl/t/rpl_special_charset.test +++ b/mysql-test/suite/rpl/t/rpl_special_charset.test @@ -1 +1,32 @@ ---source include/rpl_special_charset.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +################################################################################ +# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS +# Problem: IO thread fails to connect to master if servers are configured with +# special character sets like utf16, utf32, ucs2. +# +# Analysis: MySQL server does not support few special character sets like +# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2). +# When IO thread is trying to connect to Master, it sets server's character +# set as client's character set. When Slave server is started with these +# special character sets, IO thread (a connection to Master) fails because +# of the above said reason. +# +# Fix: If server's character set is not supported as client's character set, +# then set default's client character set(latin1) as client's character set. +############################################################################### +--source include/master-slave.inc +call mtr.add_suppression("'utf16' can not be used as client character set"); +CREATE TABLE t1(i VARCHAR(20)); +INSERT INTO t1 VALUES (0xFFFF); +--sync_slave_with_master +--let diff_tables=master:t1, slave:t1 +--source include/diff_tables.inc +# Cleanup +--connection master +DROP TABLE t1; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_sporadic_master.test b/mysql-test/suite/rpl/t/rpl_sporadic_master.test index 397756af3965d..ad4c44cbf7409 100644 --- a/mysql-test/suite/rpl/t/rpl_sporadic_master.test +++ b/mysql-test/suite/rpl/t/rpl_sporadic_master.test @@ -1 +1,32 @@ ---source include/rpl_sporadic_master.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# test to see if replication can continue when master sporadically fails on +# COM_BINLOG_DUMP and additionally limits the number of events per dump + +source include/master-slave.inc; + +create table t2(n int); +create table t1(n int not null auto_increment primary key); +insert into t1 values (NULL),(NULL); +truncate table t1; +# We have to use 4 in the following to make this test work with all table types +insert into t1 values (4),(NULL); +sync_slave_with_master; +--source include/stop_slave.inc +--source include/start_slave.inc +connection master; +insert into t1 values (NULL),(NULL); +flush logs; +truncate table t1; +insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL); +sync_slave_with_master; +select * from t1 ORDER BY n; +connection master; +drop table t1,t2; +sync_slave_with_master; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_ssl.test b/mysql-test/suite/rpl/t/rpl_ssl.test index c4a534b929495..59a2af9f137c0 100644 --- a/mysql-test/suite/rpl/t/rpl_ssl.test +++ b/mysql-test/suite/rpl/t/rpl_ssl.test @@ -1 +1,116 @@ ---source include/rpl_ssl.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +source include/have_ssl_communication.inc; +source include/master-slave.inc; +source include/no_valgrind_without_big.inc; + +# create a user for replication that requires ssl encryption +connection master; +create user replssl@localhost; +grant replication slave on *.* to replssl@localhost require ssl; +create table t1 (t int auto_increment, KEY(t)); + +sync_slave_with_master; + +# Set slave to use SSL for connection to master +stop slave; +--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR +eval change master to + master_user='replssl', + master_password='', + master_ssl=1, + master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem', + master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem', + master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem'; +start slave; + +# Switch to master and insert one record, then sync it to slave +connection master; +insert into t1 values(1); +sync_slave_with_master; + +# The record should now be on slave +select * from t1; + +# The slave is synced and waiting/reading from master +# SHOW SLAVE STATUS will show "Waiting for master to send event" +let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Crl, Master_SSL_Crlpath, Master_SSL_Cert, Master_SSL_Key; +source include/show_slave_status.inc; +source include/check_slave_is_running.inc; + +# Stop the slave, as reported in bug#21871 it would hang +STOP SLAVE; + +select * from t1; + +# Do the same thing a number of times +disable_query_log; +disable_result_log; +# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows +# After discussions with Engineering, I'm disabling this part of the test to avoid it causing +# red trees. +disable_parsing; +let $i= 100; +while ($i) +{ + start slave; + connection master; + insert into t1 values (NULL); + select * from t1; # Some variance + connection slave; + select * from t1; # Some variance + stop slave; + dec $i; +} +enable_parsing; +START SLAVE; +enable_query_log; +enable_result_log; +connection master; +# INSERT one more record to make sure +# the sync has something to do +insert into t1 values (NULL); +let $master_count= `select count(*) from t1`; + +sync_slave_with_master; +--source include/wait_for_slave_to_start.inc +source include/show_slave_status.inc; +source include/check_slave_is_running.inc; + +let $slave_count= `select count(*) from t1`; + +if ($slave_count != $master_count) +{ + echo master and slave differed in number of rows; + echo master: $master_count; + echo slave: $slave_count; + + connection master; + select count(*) t1; + select * from t1; + connection slave; + select count(*) t1; + select * from t1; + query_vertical show slave status; +} + +connection master; +drop user replssl@localhost; +drop table t1; +sync_slave_with_master; + +--source include/stop_slave.inc +CHANGE MASTER TO + master_user = 'root', + master_ssl = 0, + master_ssl_ca = '', + master_ssl_cert = '', + master_ssl_key = ''; + +--echo End of 5.0 tests +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test b/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test index b4e5335871268..654a5d47cb910 100644 --- a/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test +++ b/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test @@ -1 +1,107 @@ ---source include/rpl_stm_relay_ign_space.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# +# BUG#12400313 / BUG#64503 test case +# +# +# Description +# ----------- +# +# This test case starts the slave server with: +# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096 +# +# Then it issues some queries that will cause the slave to reach +# relay-log-space-limit. We lock the table so that the SQL thread is +# not able to purge the log and then we issue some more statements. +# +# The purpose is to show that the IO thread will honor the limits +# while the SQL thread is not able to purge the relay logs, which did +# not happen before this patch. In addition we assert that while +# ignoring the limit (SQL thread needs to rotate before purging), the +# IO thread does not do it in an uncontrolled manner. + +--source include/have_binlog_format_statement.inc +--source include/have_innodb.inc +--source include/master-slave.inc + +--disable_query_log +CREATE TABLE t1 (c1 TEXT) engine=InnoDB; + +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); + +--sync_slave_with_master + +# wait for the SQL thread to sleep +--let $show_statement= SHOW PROCESSLIST +--let $field= State +--let $condition= = 'Slave has read all relay log; waiting for more updates' +--source include/wait_show_condition.inc + +# now the io thread has set rli->ignore_space_limit +# lets lock the table so that once the SQL thread awakes +# it blocks there and does not set rli->ignore_space_limit +# back to zero +LOCK TABLE t1 WRITE; + +# now issue more statements that will overflow the +# rli->log_space_limit (in this case ~10K) +--connection master + +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); + +--connection slave + +# ASSERT that the IO thread waits for the SQL thread to release some +# space before continuing +--let $show_statement= SHOW PROCESSLIST +--let $field= State +--let $condition= LIKE 'Waiting for %' +# before the patch (IO would have transfered everything) +#--let $condition= = 'Waiting for master to send event' +# after the patch (now it waits for space to be freed) +#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space' +--source include/wait_show_condition.inc + +# without the patch we can uncomment the following two lines and +# watch the IO thread synchronize with the master, thus writing +# relay logs way over the space limit +#--connection master +#--source include/sync_slave_io_with_master.inc + +## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge +--let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1) +--let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1) +--let $assert_text= Assert that relay log space is close to the limit +--let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15 +--source include/assert.inc + +# unlock the table and let SQL thread continue applying events +UNLOCK TABLES; + +--connection master +--sync_slave_with_master +--let $diff_tables=master:test.t1,slave:test.t1 +--source include/diff_tables.inc + +--connection master +DROP TABLE t1; +--enable_query_log +--sync_slave_with_master + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test index 2625508515b90..d814a25771168 100644 --- a/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test @@ -1 +1,633 @@ ---source include/rpl_switch_stm_row_mixed.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# +# rpl_switch_stm_row_mixed tests covers +# +# - Master is switching explicitly between STATEMENT, ROW, and MIXED +# binlog format showing when it is possible and when not. +# - Master switching from MIXED to RBR implicitly listing all use +# cases, e.g a query invokes SYS_GUID(), thereafter to serve as the +# definition of MIXED binlog format +# - correctness of execution + + +-- source include/have_binlog_format_mixed_or_row.inc +-- source include/master-slave.inc + +# Since this test generates row-based events in the binary log, the +# slave SQL thread cannot be in STATEMENT mode to execute this test, +# so we only execute it for MIXED and ROW as default value of +# BINLOG_FORMAT. + +connection slave; + +connection master; +--disable_warnings +drop database if exists mysqltest1; +create database mysqltest1; +--enable_warnings +use mysqltest1; + +# Save binlog format +set @my_binlog_format= @@global.binlog_format; + +# play with switching +set session binlog_format=mixed; +show session variables like "binlog_format%"; +set session binlog_format=statement; +show session variables like "binlog_format%"; +set session binlog_format=row; +show session variables like "binlog_format%"; + +set global binlog_format=DEFAULT; +show global variables like "binlog_format%"; +set global binlog_format=MIXED; +show global variables like "binlog_format%"; +set global binlog_format=STATEMENT; +show global variables like "binlog_format%"; +set global binlog_format=ROW; +show global variables like "binlog_format%"; +show session variables like "binlog_format%"; +select @@global.binlog_format, @@session.binlog_format; + +CREATE TABLE t1 (a varchar(100)); + +prepare stmt1 from 'insert into t1 select concat(SYS_GUID(),?)'; +set @string="emergency_1_"; +insert into t1 values("work_2_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values(concat(SYS_GUID(),"work_3_")); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values(concat("for_4_",SYS_GUID())); +insert into t1 select "yesterday_5_"; + +# verify that temp tables prevent a switch to SBR +create temporary table tmp(a char(100)); +insert into tmp values("see_6_"); +--error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR +set binlog_format=statement; +insert into t1 select * from tmp; +drop temporary table tmp; + +# Now we go to SBR +set binlog_format=statement; +show global variables like "binlog_format%"; +show session variables like "binlog_format%"; +select @@global.binlog_format, @@session.binlog_format; +set global binlog_format=statement; +show global variables like "binlog_format%"; +show session variables like "binlog_format%"; +select @@global.binlog_format, @@session.binlog_format; + +prepare stmt1 from 'insert into t1 select ?'; +set @string="emergency_7_"; +insert into t1 values("work_8_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values("work_9_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values("for_10_"); +insert into t1 select "yesterday_11_"; + +# test statement (is not default after wl#3368) +set binlog_format=statement; +select @@global.binlog_format, @@session.binlog_format; +set global binlog_format=statement; +select @@global.binlog_format, @@session.binlog_format; + +prepare stmt1 from 'insert into t1 select ?'; +set @string="emergency_12_"; +insert into t1 values("work_13_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values("work_14_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values("for_15_"); +insert into t1 select "yesterday_16_"; + +# and now the mixed mode + +set global binlog_format=mixed; +select @@global.binlog_format, @@session.binlog_format; +set binlog_format=default; +select @@global.binlog_format, @@session.binlog_format; + +prepare stmt1 from 'insert into t1 select concat(SYS_GUID(),?)'; +set @string="emergency_17_"; +insert into t1 values("work_18_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values(concat(SYS_GUID(),"work_19_")); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values(concat("for_20_",SYS_GUID())); +insert into t1 select "yesterday_21_"; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values(concat(SYS_GUID(),"work_22_")); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values(concat("for_23_",SYS_GUID())); +insert into t1 select "yesterday_24_"; + +# Test of CREATE TABLE SELECT + +create table t2 ENGINE=MyISAM select rpad(SYS_GUID(),100,' '); +create table t3 select 1 union select SYS_GUID(); +--disable_warnings +SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR +create table t4 select * from t1 where 3 in (select 1 union select 2 union select SYS_GUID() union select 3); +--enable_warnings +SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR +create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); +# what if SYS_GUID() is first: +--disable_warnings +insert ignore into t5 select SYS_GUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4); +--enable_warnings + +# inside a stored procedure + +delimiter |; +create procedure foo() +begin +insert into t1 values("work_25_"); +insert into t1 values(concat("for_26_",SYS_GUID())); +insert into t1 select "yesterday_27_"; +end| +create procedure foo2() +begin +insert into t1 values(concat("emergency_28_",SYS_GUID())); +insert into t1 values("work_29_"); +insert into t1 values(concat("for_30_",SYS_GUID())); +set session binlog_format=row; # accepted for stored procs +insert into t1 values("more work_31_"); +set session binlog_format=mixed; +end| +create function foo3() returns bigint unsigned +begin + set session binlog_format=row; # rejected for stored funcs + insert into t1 values("alarm"); + return 100; +end| +create procedure foo4(x varchar(100)) +begin +insert into t1 values(concat("work_250_",x)); +insert into t1 select "yesterday_270_"; +end| +delimiter ;| +call foo(); +call foo2(); +call foo4("hello"); +call foo4(SYS_GUID()); +call foo4("world"); + +# test that can't SET in a stored function +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT +select foo3(); +select * from t1 where a="alarm"; + +# Tests of stored functions/triggers/views for BUG#20930 "Mixed +# binlogging mode does not work with stored functions, triggers, +# views" + +# Function which calls procedure +drop function foo3; +delimiter |; +create function foo3() returns bigint unsigned +begin + insert into t1 values("foo3_32_"); + call foo(); + return 100; +end| +delimiter ;| +insert into t2 select foo3(); + +prepare stmt1 from 'insert into t2 select foo3()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# Test if stored function calls stored function which calls procedure +# which requires row-based. + +delimiter |; +create function foo4() returns bigint unsigned +begin + insert into t2 select foo3(); + return 100; +end| +delimiter ;| +select foo4(); + +prepare stmt1 from 'select foo4()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# A simple stored function +delimiter |; +create function foo5() returns bigint unsigned +begin + insert into t2 select SYS_GUID(); + return 100; +end| +delimiter ;| +select foo5(); + +prepare stmt1 from 'select foo5()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# A simple stored function where SYS_GUID() is in the argument +delimiter |; +create function foo6(x varchar(100)) returns bigint unsigned +begin + insert into t2 select x; + return 100; +end| +delimiter ;| +select foo6("foo6_1_"); +select foo6(concat("foo6_2_",SYS_GUID())); + +prepare stmt1 from 'select foo6(concat("foo6_3_",SYS_GUID()))'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + + +# Test of views using SYS_GUID() + +create view v1 as select SYS_GUID(); +create table t11 (data varchar(255)); +insert into t11 select * from v1; +# Test of querying INFORMATION_SCHEMA which parses the view's body, +# to verify that it binlogs statement-based (is not polluted by +# the parsing of the view's body). +insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11'); +prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')"; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# Test of triggers with SYS_GUID() +delimiter |; +create trigger t11_bi before insert on t11 for each row +begin + set NEW.data = concat(NEW.data,SYS_GUID()); +end| +delimiter ;| +insert into t11 values("try_560_"); + +# Test that INSERT DELAYED works in mixed mode (BUG#20649) +insert delayed into t2 values("delay_1_"); +insert delayed into t2 values(concat("delay_2_",SYS_GUID())); +insert delayed into t2 values("delay_6_"); + +# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not +# replicate fine in statement-based ; we test that in mixed mode it +# works). +insert delayed into t2 values(rand()); +set @a=2.345; +insert delayed into t2 values(@a); + +# With INSERT DELAYED, rows are written to the binlog after they are +# written to the table. Therefore, it is not enough to wait until the +# rows make it to t2 on the master (the rows may not be in the binlog +# at that time, and may still not be in the binlog when +# sync_slave_with_master is later called). Instead, we wait until the +# rows make it to t2 on the slave. We first call +# sync_slave_with_master, so that we are sure that t2 has been created +# on the slave. +sync_slave_with_master; +let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2; +--source include/wait_condition.inc +connection master; + +# If you want to do manual testing of the mixed mode regarding UDFs (not +# testable automatically as quite platform- and compiler-dependent), +# you just need to set the variable below to 1, and to +# "make udf_example.so" in sql/, and to copy sql/udf_example.so to +# MYSQL_TEST_DIR/lib/mysql. +let $you_want_to_test_UDF=0; +if ($you_want_to_test_UDF) +{ + CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so'; + prepare stmt1 from 'insert into t1 select metaphon(?)'; + set @string="emergency_133_"; + insert into t1 values("work_134_"); + execute stmt1 using @string; + deallocate prepare stmt1; + prepare stmt1 from 'insert into t1 select ?'; + insert into t1 values(metaphon("work_135_")); + execute stmt1 using @string; + deallocate prepare stmt1; + insert into t1 values(metaphon("for_136_")); + insert into t1 select "yesterday_137_"; + create table t6 select metaphon("for_138_"); + create table t7 select 1 union select metaphon("for_139_"); + create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3); + create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); +} + +create table t20 select * from t1; # save for comparing later +create table t21 select * from t2; +create table t22 select * from t3; +drop table t1,t2,t3; + +# This tests the fix to +# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog +# We verify that under the mixed binlog mode, a stored function +# modifying at least two tables having an auto_increment column, +# is binlogged row-based. Indeed in statement-based binlogging, +# only the auto_increment value generated for the first table +# is recorded in the binlog, the value generated for the 2nd table +# lacking. + +create table t1 (a int primary key auto_increment, b varchar(100)); +create table t2 (a int primary key auto_increment, b varchar(100)); +create table t3 (b varchar(100)); +delimiter |; +create function f (x varchar(100)) returns int deterministic +begin + insert into t1 values(null,x); + insert into t2 values(null,x); + return 1; +end| +delimiter ;| +select f("try_41_"); +# Two operations which compensate each other except that their net +# effect is that they advance the auto_increment counter of t2 on slave: +sync_slave_with_master; +use mysqltest1; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; + +connection master; +# this is the call which didn't replicate well +select f("try_42_"); +sync_slave_with_master; + +# now use prepared statement and test again, just to see that the RBB +# mode isn't set at PREPARE but at EXECUTE. + +insert into t2 values(3,null),(4,null); +delete from t2 where a>=3; + +connection master; +prepare stmt1 from 'select f(?)'; +set @string="try_43_"; +insert into t1 values(null,"try_44_"); # should be SBB +execute stmt1 using @string; # should be RBB +deallocate prepare stmt1; +sync_slave_with_master; + +# verify that if only one table has auto_inc, it does not trigger RBB +# (we'll check in binlog further below) + +connection master; +create table t12 select * from t1; # save for comparing later +drop table t1; +create table t1 (a int, b varchar(100), key(a)); +select f("try_45_"); + +# restore table's key +create table t13 select * from t1; +drop table t1; +create table t1 (a int primary key auto_increment, b varchar(100)); + +# now test if it's two functions, each of them inserts in one table + +drop function f; +# we need a unique key to have sorting of rows by mysqldump +create table t14 (unique (a)) select * from t2; +truncate table t2; +delimiter |; +create function f1 (x varchar(100)) returns int deterministic +begin + insert into t1 values(null,x); + return 1; +end| +create function f2 (x varchar(100)) returns int deterministic +begin + insert into t2 values(null,x); + return 1; +end| +delimiter ;| +select f1("try_46_"),f2("try_47_"); + +sync_slave_with_master; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; + +connection master; +# Test with SELECT and INSERT +select f1("try_48_"),f2("try_49_"); +insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_"))); +sync_slave_with_master; + +# verify that if f2 does only read on an auto_inc table, this does not +# switch to RBB +connection master; +drop function f2; +delimiter |; +create function f2 (x varchar(100)) returns int deterministic +begin + declare y int; + insert into t1 values(null,x); + set y = (select count(*) from t2); + return y; +end| +delimiter ;| +select f1("try_53_"),f2("try_54_"); +sync_slave_with_master; + +# And now, a normal statement with a trigger (no stored functions) + +connection master; +drop function f2; +delimiter |; +create trigger t1_bi before insert on t1 for each row +begin + insert into t2 values(null,"try_55_"); +end| +delimiter ;| +insert into t1 values(null,"try_56_"); +# and now remove one auto_increment and verify SBB +alter table t1 modify a int, drop primary key; +insert into t1 values(null,"try_57_"); +sync_slave_with_master; + +# Test for BUG#20499 "mixed mode with temporary table breaks binlog" +# Slave used to have only 2 rows instead of 3. +connection master; +CREATE TEMPORARY TABLE t15 SELECT SYS_GUID(); +create table t16 like t15; +INSERT INTO t16 SELECT * FROM t15; +# we'll verify that this one is done RBB +insert into t16 values("try_65_"); +drop table t15; +# we'll verify that this one is done SBB +insert into t16 values("try_66_"); +sync_slave_with_master; + +# and now compare: + +connection master; + +# first check that data on master is sensible +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t11; +select count(*) from t20; +select count(*) from t21; +select count(*) from t22; +select count(*) from t12; +select count(*) from t13; +select count(*) from t14; +select count(*) from t16; +if ($you_want_to_test_UDF) +{ + select count(*) from t6; + select count(*) from t7; + select count(*) from t8; + select count(*) from t9; +} + +sync_slave_with_master; + +# +# Bug#20863 If binlog format is changed between update and unlock of +# tables, wrong binlog +# + +connection master; +DROP TABLE IF EXISTS t11; +SET SESSION BINLOG_FORMAT=STATEMENT; +CREATE TABLE t11 (song VARCHAR(255)); +LOCK TABLES t11 WRITE; +SET SESSION BINLOG_FORMAT=ROW; +INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict'); +SET SESSION BINLOG_FORMAT=STATEMENT; +INSERT INTO t11 VALUES('Careful With That Axe, Eugene'); +UNLOCK TABLES; + +--query_vertical SELECT * FROM t11 +sync_slave_with_master; +USE mysqltest1; +--query_vertical SELECT * FROM t11 + +connection master; +DROP TABLE IF EXISTS t12; +SET SESSION BINLOG_FORMAT=MIXED; +CREATE TABLE t12 (data LONG); +LOCK TABLES t12 WRITE; +INSERT INTO t12 VALUES(SYS_GUID()); +UNLOCK TABLES; +sync_slave_with_master; + +# +# BUG#28086: SBR of USER() becomes corrupted on slave +# + +connection master; + +# Just to get something that is non-trivial, albeit still simple, we +# stuff the result of USER() and CURRENT_USER() into a variable. +--delimiter $$ +CREATE FUNCTION my_user() + RETURNS CHAR(64) +BEGIN + DECLARE user CHAR(64); + SELECT USER() INTO user; + RETURN user; +END $$ +--delimiter ; + +--delimiter $$ +CREATE FUNCTION my_current_user() + RETURNS CHAR(64) +BEGIN + DECLARE user CHAR(64); + SELECT CURRENT_USER() INTO user; + RETURN user; +END $$ +--delimiter ; + +DROP TABLE IF EXISTS t13; +CREATE TABLE t13 (data CHAR(64)); +INSERT INTO t13 VALUES (USER()); +INSERT INTO t13 VALUES (my_user()); +INSERT INTO t13 VALUES (CURRENT_USER()); +INSERT INTO t13 VALUES (my_current_user()); + +sync_slave_with_master; + +# as we're using SYS_GUID we don't SELECT but use "diff" like in rpl_row_UUID +--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql +--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql + +# Let's compare. Note: If they match test will pass, if they do not match +# the test will show that the diff statement failed and not reject file +# will be created. You will need to go to the mysql-test dir and diff +# the files your self to see what is not matching + +diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; + +connection master; + +# Now test that mysqlbinlog works fine on a binlog generated by the +# mixed mode + +# BUG#11312 "DELIMITER is not written to the binary log that causes +# syntax error" makes that mysqlbinlog will fail if we pass it the +# text of queries; this forces us to use --base64-output here. + +# BUG#20929 "BINLOG command causes invalid free plus assertion +# failure" makes mysqld segfault when receiving --base64-output + +# So I can't enable this piece of test +# SIGH + +if ($enable_when_11312_or_20929_fixed) +{ +--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql +drop database mysqltest1; +--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql +--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql +# the old mysqldump output on slave is the same as what it was on +# master before restoring on master. +diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; +} + +drop database mysqltest1; +sync_slave_with_master; + +connection master; +# Restore binlog format setting +set global binlog_format =@my_binlog_format; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_sync.test b/mysql-test/suite/rpl/t/rpl_sync.test index bdb0d8ec4ccd8..1e2ec2ca83b85 100644 --- a/mysql-test/suite/rpl/t/rpl_sync.test +++ b/mysql-test/suite/rpl/t/rpl_sync.test @@ -1,2 +1,159 @@ ---source include/rpl_sync_test.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +######################################################################################## +# This test verifies the options --sync-relay-log-info and --relay-log-recovery by +# crashing the slave in two different situations: +# (case-1) - Corrupt the relay log with changes which were not processed by +# the SQL Thread and crashes it. +# (case-2) - Corrupt the master.info with wrong coordinates and crashes it. +# +# Case 1: +# 1 - Stops the SQL Thread +# 2 - Inserts new records into the master. +# 3 - Corrupts the relay-log.bin* which most likely has such changes. +# 4 - Crashes the slave +# 5 - Verifies if the slave is sync with the master which means that the information +# loss was circumvented by the recovery process. +# +# Case 2: +# 1 - Stops the SQL/IO Threads +# 2 - Inserts new records into the master. +# 3 - Corrupts the master.info with wrong coordinates. +# 4 - Crashes the slave +# 5 - Verifies if the slave is sync with the master which means that the information +# loss was circumvented by the recovery process. +######################################################################################## + +######################################################################################## +# Configuring the environment +######################################################################################## +--echo =====Configuring the enviroment=======; +--source include/not_embedded.inc +--source include/not_valgrind.inc +--source include/have_debug.inc +--source include/have_innodb.inc +--source include/not_crashrep.inc +--source include/master-slave.inc + +call mtr.add_suppression('Attempting backtrace'); +call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001"); +# Use innodb so we do not get "table should be repaired" issues. +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +flush tables; +CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb; + +insert into t1(a) values(1); +insert into t1(a) values(2); +insert into t1(a) values(3); + +######################################################################################## +# Case 1: Corrupt a relay-log.bin* +######################################################################################## +--echo =====Inserting data on the master but without the SQL Thread being running=======; +sync_slave_with_master; + +connection slave; +let $MYSQLD_SLAVE_DATADIR= `select @@datadir`; +--replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR +--copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup +--source include/stop_slave_sql.inc + +connection master; +insert into t1(a) values(4); +insert into t1(a) values(5); +insert into t1(a) values(6); + +--echo =====Removing relay log files and crashing/recoverying the slave=======; +connection slave; +--source include/stop_slave_io.inc + +let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1); + +--let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file +perl; +$file= $ENV{'FILE_TO_CORRUPT'}; +open(FILE, ">$file") || die "Unable to open $file."; +truncate(FILE,0); +print FILE "failure"; +close ($file); +EOF + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; +--error 2013 +FLUSH LOGS; + +--let $rpl_server_number= 2 +--source include/rpl_reconnect.inc + +--echo =====Dumping and comparing tables=======; +--source include/start_slave.inc + +connection master; +sync_slave_with_master; + +let $diff_tables=master:t1,slave:t1; +source include/diff_tables.inc; + +######################################################################################## +# Case 2: Corrupt a master.info +######################################################################################## +--echo =====Corrupting the master.info=======; +connection slave; +--source include/stop_slave.inc + +connection master; +FLUSH LOGS; + +insert into t1(a) values(7); +insert into t1(a) values(8); +insert into t1(a) values(9); + +connection slave; +let MYSQLD_SLAVE_DATADIR=`select @@datadir`; + +--perl +use strict; +use warnings; +my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup"; +my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info"; +open(FILE, "<", $src) or die; +my @content= ; +close FILE; +open(FILE, ">", $dst) or die; +binmode FILE; +print FILE @content; +close FILE; +EOF + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; +--error 2013 +FLUSH LOGS; + +--let $rpl_server_number= 2 +--source include/rpl_reconnect.inc + +--echo =====Dumping and comparing tables=======; +--source include/start_slave.inc + +connection master; +sync_slave_with_master; + +let $diff_tables=master:t1,slave:t1; +source include/diff_tables.inc; + +######################################################################################## +# Clean up +######################################################################################## +--echo =====Clean up=======; +connection master; +drop table t1; + +--remove_file $MYSQLD_SLAVE_DATADIR/master.backup +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test b/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test index d976ae3757bab..6728ff55d6f5e 100644 --- a/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test +++ b/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test @@ -1 +1,82 @@ ---source include/rpl_temporal_format_default_to_default.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +--source include/master-slave.inc + +if ($force_master_mysql56_temporal_format) +{ + connection master; + eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format; +} + +if ($force_slave_mysql56_temporal_format) +{ + connection slave; + eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format; +} + +connection master; +SELECT @@global.mysql56_temporal_format AS on_master; +connection slave; +SELECT @@global.mysql56_temporal_format AS on_slave; +connection master; + +CREATE TABLE t1 +( + c0 TIME(0), + c1 TIME(1), + c2 TIME(2), + c3 TIME(3), + c4 TIME(4), + c5 TIME(5), + c6 TIME(6) +); +CREATE TABLE t2 +( + c0 TIMESTAMP(0), + c1 TIMESTAMP(1), + c2 TIMESTAMP(2), + c3 TIMESTAMP(3), + c4 TIMESTAMP(4), + c5 TIMESTAMP(5), + c6 TIMESTAMP(6) +); + +CREATE TABLE t3 +( + c0 DATETIME(0), + c1 DATETIME(1), + c2 DATETIME(2), + c3 DATETIME(3), + c4 DATETIME(4), + c5 DATETIME(5), + c6 DATETIME(6) +); +INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111'); +INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); +INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); +SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; +sync_slave_with_master; + +connection slave; +--query_vertical SELECT * FROM t1; +--query_vertical SELECT * FROM t2; +--query_vertical SELECT * FROM t3; +SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; + +connection master; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; + +connection slave; +SET @@global.mysql56_temporal_format=DEFAULT; +connection master; +SET @@global.mysql56_temporal_format=DEFAULT; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_typeconv.test b/mysql-test/suite/rpl/t/rpl_typeconv.test index c251708625827..9e5662588827e 100644 --- a/mysql-test/suite/rpl/t/rpl_typeconv.test +++ b/mysql-test/suite/rpl/t/rpl_typeconv.test @@ -1 +1,78 @@ ---source include/rpl_typeconv.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption suite). +# Please check all dependent tests after modifying it +# + +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +connection slave; +set @saved_slave_type_conversions = @@global.slave_type_conversions; +CREATE TABLE type_conversions ( + TestNo INT AUTO_INCREMENT PRIMARY KEY, + Source TEXT, + Target TEXT, + Flags TEXT, + On_Master LONGTEXT, + On_Slave LONGTEXT, + Expected LONGTEXT, + Compare INT, + Error TEXT); + +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; +SELECT @@global.slave_type_conversions; +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT'; +SELECT @@global.slave_type_conversions; + +# Checking strict interpretation of type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; +source suite/rpl/include/type_conversions.test; + +# Checking lossy integer type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; +source suite/rpl/include/type_conversions.test; + +# Checking non-lossy integer type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; +source suite/rpl/include/type_conversions.test; + +# Checking all type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; +source suite/rpl/include/type_conversions.test; + +connection slave; +--echo **** Result of conversions **** +disable_query_log; +SELECT RPAD(Source, 15, ' ') AS Source_Type, + RPAD(Target, 15, ' ') AS Target_Type, + RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags, + IF(Compare IS NULL AND Error IS NOT NULL, '', + IF(Compare, '', + CONCAT("'", On_Slave, "' != '", Expected, "'"))) + AS Value_On_Slave + FROM type_conversions; +enable_query_log; +DROP TABLE type_conversions; + +call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677"); + +connection master; +DROP TABLE t1; +sync_slave_with_master; + +set global slave_type_conversions = @saved_slave_type_conversions; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/sys_vars/r/aria_log_dir_path_basic.result b/mysql-test/suite/sys_vars/r/aria_log_dir_path_basic.result new file mode 100644 index 0000000000000..8563b024a75d7 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/aria_log_dir_path_basic.result @@ -0,0 +1,36 @@ +SELECT COUNT(@@GLOBAL.aria_log_dir_path); +COUNT(@@GLOBAL.aria_log_dir_path) +1 +SET @@GLOBAL.aria_log_dir_path=1; +ERROR HY000: Variable 'aria_log_dir_path' is a read only variable +SELECT COUNT(@@GLOBAL.aria_log_dir_path); +COUNT(@@GLOBAL.aria_log_dir_path) +1 +SELECT @@GLOBAL.aria_log_dir_path = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='aria_log_dir_path'; +@@GLOBAL.aria_log_dir_path = VARIABLE_VALUE +1 +SELECT COUNT(@@GLOBAL.aria_log_dir_path); +COUNT(@@GLOBAL.aria_log_dir_path) +1 +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='aria_log_dir_path'; +COUNT(VARIABLE_VALUE) +1 +SELECT @@aria_log_dir_path = @@GLOBAL.aria_log_dir_path; +@@aria_log_dir_path = @@GLOBAL.aria_log_dir_path +1 +SELECT COUNT(@@aria_log_dir_path); +COUNT(@@aria_log_dir_path) +1 +SELECT COUNT(@@local.aria_log_dir_path); +ERROR HY000: Variable 'aria_log_dir_path' is a GLOBAL variable +SELECT COUNT(@@SESSION.aria_log_dir_path); +ERROR HY000: Variable 'aria_log_dir_path' is a GLOBAL variable +SELECT COUNT(@@GLOBAL.aria_log_dir_path); +COUNT(@@GLOBAL.aria_log_dir_path) +1 +SELECT aria_log_dir_path = @@SESSION.aria_log_dir_path; +ERROR 42S22: Unknown column 'aria_log_dir_path' in 'field list' diff --git a/mysql-test/suite/sys_vars/r/sysvars_aria.result b/mysql-test/suite/sys_vars/r/sysvars_aria.result index 8cec099d28c39..181c0de74df53 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_aria.result +++ b/mysql-test/suite/sys_vars/r/sysvars_aria.result @@ -85,6 +85,18 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME ARIA_LOG_DIR_PATH +SESSION_VALUE NULL +DEFAULT_VALUE DATADIR +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE VARCHAR +VARIABLE_COMMENT Path to the directory where to store transactional log +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME ARIA_LOG_FILE_SIZE SESSION_VALUE NULL DEFAULT_VALUE 1073741824 diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 428036958138b..c4a26e58cb411 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -149,7 +149,7 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL -READ_ONLY NO +READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_ABORT SESSION_VALUE NULL diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 6d0d147a9db67..50f0fe4a5b072 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -112,6 +112,16 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME ARIA_LOG_DIR_PATH +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE VARCHAR +VARIABLE_COMMENT Path to the directory where to store transactional log +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME ARIA_LOG_FILE_SIZE VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index 1b4e49f55be96..07d547060c5d1 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -112,6 +112,16 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME ARIA_LOG_DIR_PATH +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE VARCHAR +VARIABLE_COMMENT Path to the directory where to store transactional log +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME ARIA_LOG_FILE_SIZE VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED diff --git a/mysql-test/suite/sys_vars/t/all_vars.test b/mysql-test/suite/sys_vars/t/all_vars.test index 06edc04d25c2e..41c89a4bed9cd 100644 --- a/mysql-test/suite/sys_vars/t/all_vars.test +++ b/mysql-test/suite/sys_vars/t/all_vars.test @@ -12,8 +12,6 @@ eval INSTALL PLUGIN federated SONAME "$HA_FEDERATEDX_SO"; eval INSTALL PLUGIN oqgraph SONAME "$HA_OQGRAPH_SO"; eval INSTALL PLUGIN sphinx SONAME "$HA_SPHINX_SO"; eval INSTALL PLUGIN innodb SONAME "$HA_INNODB_SO"; -eval INSTALL PLUGIN rpl_semi_sync_master SONAME "$SEMISYNC_MASTER_SO"; -eval INSTALL PLUGIN rpl_semi_sync_slave SONAME "$SEMISYNC_SLAVE_SO"; --enable_abort_on_error --enable_result_log --enable_query_log diff --git a/mysql-test/suite/sys_vars/t/aria_log_dir_path_basic.test b/mysql-test/suite/sys_vars/t/aria_log_dir_path_basic.test new file mode 100644 index 0000000000000..f09705e6f0d3a --- /dev/null +++ b/mysql-test/suite/sys_vars/t/aria_log_dir_path_basic.test @@ -0,0 +1,35 @@ +--source include/have_maria.inc + +SELECT COUNT(@@GLOBAL.aria_log_dir_path); +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@GLOBAL.aria_log_dir_path=1; + +SELECT COUNT(@@GLOBAL.aria_log_dir_path); + + +SELECT @@GLOBAL.aria_log_dir_path = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='aria_log_dir_path'; + +SELECT COUNT(@@GLOBAL.aria_log_dir_path); + +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='aria_log_dir_path'; + + +SELECT @@aria_log_dir_path = @@GLOBAL.aria_log_dir_path; + + +SELECT COUNT(@@aria_log_dir_path); + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.aria_log_dir_path); + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.aria_log_dir_path); + +SELECT COUNT(@@GLOBAL.aria_log_dir_path); + +--Error ER_BAD_FIELD_ERROR +SELECT aria_log_dir_path = @@SESSION.aria_log_dir_path; diff --git a/mysql-test/suite/sys_vars/t/sysvars_aria.test b/mysql-test/suite/sys_vars/t/sysvars_aria.test index b01e39eebf543..b042f1010f4b6 100644 --- a/mysql-test/suite/sys_vars/t/sysvars_aria.test +++ b/mysql-test/suite/sys_vars/t/sysvars_aria.test @@ -2,7 +2,11 @@ --source include/have_aria_used_for_temp_tables.inc --source include/word_size.inc +let datadir=`select @@datadir`; + --vertical_results + +--replace_result $datadir DATADIR select VARIABLE_NAME, SESSION_VALUE, DEFAULT_VALUE, VARIABLE_SCOPE, VARIABLE_TYPE, VARIABLE_COMMENT, NUMERIC_MIN_VALUE, NUMERIC_MAX_VALUE, NUMERIC_BLOCK_SIZE, ENUM_VALUE_LIST, READ_ONLY, COMMAND_LINE_ARGUMENT from information_schema.system_variables where variable_name like 'aria%' order by variable_name; diff --git a/mysql-test/suite/versioning/r/delete_history.result b/mysql-test/suite/versioning/r/delete_history.result index 3681b25f59287..9365145e699ca 100644 --- a/mysql-test/suite/versioning/r/delete_history.result +++ b/mysql-test/suite/versioning/r/delete_history.result @@ -168,6 +168,8 @@ x explain extended delete history from t1 before system_time '2039-01-01 23:00'; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` FOR SYSTEM_TIME BEFORE TIMESTAMP '2039-01-01 23:00' where `test`.`t1`.`row_end` < '2039-01-01 23:00' and is_history(`test`.`t1`.`row_end`) create or replace procedure p() delete history from t1 before system_time '2039-01-01 23:00'; call p; select * from t1; diff --git a/mysys/my_addr_resolve.c b/mysys/my_addr_resolve.c index 2d4385f46843e..376e73686e24c 100644 --- a/mysys/my_addr_resolve.c +++ b/mysys/my_addr_resolve.c @@ -239,10 +239,10 @@ static int addr_resolve(void *ptr, my_addr_loc *loc) } - /* 500 ms should be plenty of time for addr2line to issue a response. */ + /* 5000 ms should be plenty of time for addr2line to issue a response. */ /* Read in a loop till all the output from addr2line is complete. */ while (parsed == total_bytes_read && - (ret= poll(&poll_fds, 1, 500))) + (ret= poll(&poll_fds, 1, 5000))) { /* error during poll */ if (ret < 0) @@ -286,7 +286,8 @@ static int addr_resolve(void *ptr, my_addr_loc *loc) loc->line= atoi(output + line_number_start); /* Addr2line was unable to extract any meaningful information. */ - if (strcmp(loc->file, "??") == 0 && loc->func[0] == '?') + if ((strcmp(loc->file, "??") == 0 || strcmp(loc->file, "") == 0) && + (loc->func[0] == '?' || loc->line == 0)) return 6; loc->file= strip_path(loc->file); diff --git a/mysys/my_atomic_writes.c b/mysys/my_atomic_writes.c index 42ee4e690cdfd..90d0f1d026188 100644 --- a/mysys/my_atomic_writes.c +++ b/mysys/my_atomic_writes.c @@ -118,7 +118,7 @@ static my_bool test_if_shannon_card_exists() char path[32]; struct stat stat_buff; - sprintf(path, "/dev/df%c", dev_part); + snprintf(path, sizeof(path), "/dev/df%c", dev_part); #ifdef TEST_SHANNON if (stat(path, &stat_buff) < 0) { @@ -127,8 +127,10 @@ static my_bool test_if_shannon_card_exists() } #endif shannon_devices[shannon_found_devices].st_dev= stat_buff.st_rdev; - sprintf(shannon_devices[shannon_found_devices].dev_name, "/dev/sct%c", - dev_part); + snprintf(shannon_devices[shannon_found_devices].dev_name, + sizeof(shannon_devices[shannon_found_devices].dev_name), + "/dev/sct%c", + dev_part); #ifdef TEST_SHANNON printf("%s(): i=%d, stat_buff.st_dev=0x%lx, stat_buff.st_rdev=0x%lx, st_rdev=0x%lx, dev_name=%s\n", @@ -151,13 +153,15 @@ static my_bool test_if_shannon_card_exists() for (dev_no= 1 ; dev_no < 9 ; dev_no++) { - sprintf(path, "/dev/df%c%d", dev_part, dev_no); + snprintf(path, sizeof(path), "/dev/df%c%d", dev_part, dev_no); if (stat(path, &stat_buff) < 0) break; shannon_devices[shannon_found_devices].st_dev= stat_buff.st_rdev; - sprintf(shannon_devices[shannon_found_devices].dev_name, "/dev/sct%c%d", - dev_part, dev_no); + snprintf(shannon_devices[shannon_found_devices].dev_name, + sizeof(shannon_devices[shannon_found_devices].dev_name), + "/dev/sct%c%d", + dev_part, dev_no); #ifdef TEST_SHANNON printf("%s(): i=%d, st_dev=0x%lx, st_rdev=0x%lx, dev_name=%s\n", diff --git a/mysys/my_likely.c b/mysys/my_likely.c index 2ae587be18d56..d52074f01e442 100644 --- a/mysys/my_likely.c +++ b/mysys/my_likely.c @@ -76,7 +76,7 @@ void end_my_likely(FILE *out) if (!(likely_file= out)) { char name[80]; - sprintf(name, "/tmp/unlikely-%lu.out", (ulong) getpid()); + snprintf(name, sizeof(name), "/tmp/unlikely-%lu.out", (ulong) getpid()); if ((likely_file= my_fopen(name, O_TRUNC | O_WRONLY, MYF(MY_WME)))) do_close= 1; else diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c index cde34e5bdb974..fd8a99c21967d 100644 --- a/mysys/my_thr_init.c +++ b/mysys/my_thr_init.c @@ -421,7 +421,7 @@ const char *my_thread_name(void) if (!tmp->name[0]) { my_thread_id id= my_thread_dbug_id(); - sprintf(name_buff,"T@%lu", (ulong) id); + snprintf(name_buff, sizeof(name_buff), "T@%lu", (ulong) id); strmake_buf(tmp->name, name_buff); } return tmp->name; diff --git a/plugin/auth_pam/testing/pam_mariadb_mtr.c b/plugin/auth_pam/testing/pam_mariadb_mtr.c index f61a8da768269..108aeb941ac88 100644 --- a/plugin/auth_pam/testing/pam_mariadb_mtr.c +++ b/plugin/auth_pam/testing/pam_mariadb_mtr.c @@ -45,6 +45,7 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags __attribute__((unused)), else { free(resp); + resp= NULL; msg[0].msg_style = PAM_PROMPT_ECHO_ON; msg[0].msg = (char*)"PIN:"; pam_err = (*conv->conv)(1, msgp, &resp, conv->appdata_ptr); diff --git a/plugin/feedback/feedback.cc b/plugin/feedback/feedback.cc index cb254d69ec3f2..ba4850f4cba05 100644 --- a/plugin/feedback/feedback.cc +++ b/plugin/feedback/feedback.cc @@ -366,7 +366,7 @@ static MYSQL_SYSVAR_STR(user_info, user_info, NULL, NULL, ""); static MYSQL_SYSVAR_STR(url, url, PLUGIN_VAR_READONLY | PLUGIN_VAR_RQCMDARG, "Space separated URLs to send the feedback report to.", NULL, NULL, - DEFAULT_PROTO "mariadb.org/feedback_plugin/post"); + DEFAULT_PROTO "feedback.mariadb.org/rest/v1/post"); static MYSQL_SYSVAR_ULONG(send_timeout, send_timeout, PLUGIN_VAR_RQCMDARG, "Timeout (in seconds) for the sending the report.", NULL, NULL, 60, 1, 60*60*24, 1); diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index 0381927d70f85..a8cf77a967101 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -288,7 +288,6 @@ SET GLOBAL general_log = 'OFF'; ALTER TABLE general_log MODIFY event_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, MODIFY user_host MEDIUMTEXT NOT NULL, - MODIFY thread_id INTEGER NOT NULL, MODIFY server_id INTEGER UNSIGNED NOT NULL, MODIFY command_type VARCHAR(64) NOT NULL, MODIFY argument MEDIUMTEXT NOT NULL, @@ -850,3 +849,8 @@ IF 1 = (SELECT count(*) FROM information_schema.VIEWS WHERE TABLE_CATALOG = 'def END IF// DELIMITER ; + +# MDEV-22683 - upgrade Host and Owner of servers +ALTER TABLE servers + MODIFY Host varchar(2048) NOT NULL DEFAULT '', + MODIFY Owner varchar(512) NOT NULL DEFAULT ''; diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 7e26af83701ce..6ec975f5a593f 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -95,9 +95,9 @@ sst_ver=1 declare -a RC -BACKUP_BIN=$(commandex 'mariabackup') +BACKUP_BIN=$(commandex 'mariadb-backup') if [ -z "$BACKUP_BIN" ]; then - wsrep_log_error 'mariabackup binary not found in path' + wsrep_log_error 'mariadb-backup binary not found in path' exit 42 fi @@ -685,7 +685,7 @@ cleanup_at_exit() if [ -n "$BACKUP_PID" ]; then if check_pid "$BACKUP_PID" 1; then wsrep_log_error \ - "mariabackup process is still running. Killing..." + "mariadb-backup process is still running. Killing..." cleanup_pid $CHECK_PID "$BACKUP_PID" fi fi @@ -761,7 +761,7 @@ check_extra() if [ "$thread_handling" = 'pool-of-threads' ]; then local eport=$(parse_cnf '--mysqld' 'extra-port') if [ -n "$eport" ]; then - # mariabackup works only locally. + # mariadb-backup works only locally. # Hence, setting host to 127.0.0.1 unconditionally: wsrep_log_info "SST through extra_port $eport" INNOEXTRA="$INNOEXTRA --host=127.0.0.1 --port=$eport" @@ -930,7 +930,7 @@ cd "$OLD_PWD" if [ $ssyslog -eq 1 ]; then if [ -n "$(commandex logger)" ]; then - wsrep_log_info "Logging all stderr of SST/mariabackup to syslog" + wsrep_log_info "Logging all stderr of SST/mariadb-backup to syslog" exec 2> >(logger -p daemon.err -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE) @@ -1053,11 +1053,11 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then xtmpdir=$(TMPDIR="$tmpdir"; mktemp '-d') fi - wsrep_log_info "Using '$xtmpdir' as mariabackup temporary directory" + wsrep_log_info "Using '$xtmpdir' as mariadb-backup temporary directory" tmpopts=" --tmpdir='$xtmpdir'" itmpdir="$(mktemp -d)" - wsrep_log_info "Using '$itmpdir' as mariabackup working directory" + wsrep_log_info "Using '$itmpdir' as mariadb-abackup working directory" usrst=0 if [ -n "$WSREP_SST_OPT_USER" ]; then @@ -1148,7 +1148,7 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then fi # if compression is enabled for backup files, then add the - # appropriate options to the mariabackup command line: + # appropriate options to the mariadb-backup command line: if [ "$compress" != 'none' ]; then iopts="--compress${compress:+=$compress}${iopts:+ }$iopts" if [ -n "$compress_threads" ]; then @@ -1170,7 +1170,7 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then set -e if [ ${RC[0]} -ne 0 ]; then - wsrep_log_error "mariabackup finished with error: ${RC[0]}." \ + wsrep_log_error "mariadb-backup finished with error: ${RC[0]}." \ "Check syslog or '$INNOBACKUPLOG' for details" exit 22 elif [ ${RC[$(( ${#RC[@]}-1 ))]} -eq 1 ]; then @@ -1178,7 +1178,7 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then exit 22 fi - # mariabackup implicitly writes PID to fixed location in $xtmpdir + # mariadb-backup implicitly writes PID to fixed location in $xtmpdir BACKUP_PID="$xtmpdir/xtrabackup_pid" else # BYPASS FOR IST @@ -1450,14 +1450,14 @@ else # joiner if [ ! -s "$DATA/xtrabackup_checkpoints" ]; then wsrep_log_error "xtrabackup_checkpoints missing," \ - "failed mariabackup/SST on donor" + "failed mariadb-backup/SST on donor" exit 2 fi - # Compact backups are not supported by mariabackup + # Compact backups are not supported by mariadb-backup if grep -qw -F 'compact = 1' "$DATA/xtrabackup_checkpoints"; then wsrep_log_info "Index compaction detected" - wsrel_log_error "Compact backups are not supported by mariabackup" + wsrel_log_error "Compact backups are not supported by mariadb-backup" exit 2 fi @@ -1509,9 +1509,9 @@ else # joiner wsrep_log_info "Preparing the backup at $DATA" setup_commands - timeit 'mariabackup prepare stage' "$INNOAPPLY" + timeit 'mariadb-backup prepare stage' "$INNOAPPLY" if [ $? -ne 0 ]; then - wsrep_log_error "mariabackup apply finished with errors." \ + wsrep_log_error "mariadb-backup apply finished with errors." \ "Check syslog or '$INNOAPPLYLOG' for details." exit 22 fi @@ -1556,7 +1556,7 @@ else # joiner MAGIC_FILE="$TDATA/$INFO_FILE" wsrep_log_info "Moving the backup to $TDATA" - timeit 'mariabackup move stage' "$INNOMOVE" + timeit 'mariadb-backup move stage' "$INNOMOVE" if [ $? -eq 0 ]; then wsrep_log_info "Move successful, removing $DATA" rm -rf "$DATA" diff --git a/sql-common/client.c b/sql-common/client.c index 916abdea1906f..78d14b853a020 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -4097,7 +4097,7 @@ int STDCALL mysql_set_character_set(MYSQL *mysql, const char *cs_name) /* Skip execution of "SET NAMES" for pre-4.1 servers */ if (mysql_get_server_version(mysql) < 40100) return 0; - sprintf(buff, "SET NAMES %s", cs_name); + snprintf(buff, sizeof(buff), "SET NAMES %s", cs_name); if (!mysql_real_query(mysql, buff, (uint) strlen(buff))) { mysql->charset= cs; diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index eac111d32d7a2..f363816fe4914 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -1451,7 +1451,7 @@ uchar *debug_sync_value_ptr(THD *thd) if (opt_debug_sync_timeout) { - static char on[]= "ON - current signals: '"; + static const char on[]= "ON - current signals: '"; // Ensure exclusive access to debug_sync_global.ds_signal mysql_mutex_lock(&debug_sync_global.ds_mutex); diff --git a/sql/field.cc b/sql/field.cc index c4819ebaa7dd1..e7719264ffdd9 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -7624,7 +7624,8 @@ int Field_string::cmp(const uchar *a_ptr, const uchar *b_ptr) const return field_charset()->coll->strnncollsp_nchars(field_charset(), a_ptr, field_length, b_ptr, field_length, - Field_string::char_length()); + Field_string::char_length(), + MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES); } @@ -8004,10 +8005,11 @@ int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr) const int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr, - size_t prefix_len) const + size_t prefix_char_len) const { - /* avoid expensive well_formed_char_length if possible */ - if (prefix_len == table->field[field_index]->field_length) + /* avoid more expensive strnncollsp_nchars() if possible */ + if (prefix_char_len * field_charset()->mbmaxlen == + table->field[field_index]->field_length) return Field_varstring::cmp(a_ptr, b_ptr); size_t a_length, b_length; @@ -8027,8 +8029,8 @@ int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr, a_length, b_ptr + length_bytes, b_length, - prefix_len / - field_charset()->mbmaxlen); + prefix_char_len, + 0); } @@ -8815,7 +8817,7 @@ int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr) const int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr, - size_t prefix_len) const + size_t prefix_char_len) const { uchar *blob1,*blob2; memcpy(&blob1, a_ptr+packlength, sizeof(char*)); @@ -8824,8 +8826,8 @@ int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr, return field_charset()->coll->strnncollsp_nchars(field_charset(), blob1, a_len, blob2, b_len, - prefix_len / - field_charset()->mbmaxlen); + prefix_char_len, + 0); } @@ -10010,7 +10012,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value) (not the table->record[0] necessarily) */ int Field_bit::cmp_prefix(const uchar *a, const uchar *b, - size_t prefix_len) const + size_t prefix_char_len) const { my_ptrdiff_t a_diff= a - ptr; my_ptrdiff_t b_diff= b - ptr; diff --git a/sql/field.h b/sql/field.h index d4b59a88f59b9..642456b9774b4 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1301,7 +1301,7 @@ class Field: public Value_source Currently it's only used in partitioning. */ virtual int cmp_prefix(const uchar *a, const uchar *b, - size_t prefix_len) const + size_t prefix_char_len) const { return cmp(a, b); } virtual int cmp(const uchar *,const uchar *) const=0; virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U) const @@ -4201,7 +4201,7 @@ class Field_varstring :public Field_longstr { my_decimal *val_decimal(my_decimal *) override; bool send(Protocol *protocol) override; int cmp(const uchar *a,const uchar *b) const override; - int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len) const + int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_char_len) const override; void sort_string(uchar *buff,uint length) override; uint get_key_image(uchar *buff, uint length, @@ -4494,7 +4494,7 @@ class Field_blob :public Field_longstr { String *val_str(String *, String *) override; my_decimal *val_decimal(my_decimal *) override; int cmp(const uchar *a, const uchar *b) const override; - int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len) const + int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_char_len) const override; int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length) const; @@ -4973,7 +4973,7 @@ class Field_bit :public Field { int cmp_binary_offset(uint row_offset) override { return cmp_offset(row_offset); } int cmp_prefix(const uchar *a, const uchar *b, - size_t max_length) const override; + size_t prefix_char_length) const override; int key_cmp(const uchar *a, const uchar *b) const override { return cmp_binary((uchar *) a, (uchar *) b); } int key_cmp(const uchar *str, uint length) const override; diff --git a/sql/filesort.cc b/sql/filesort.cc index bf5520955c9c1..10e4d388cae1a 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -224,7 +224,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str); DEBUG_SYNC(thd, "filesort_start"); - if (!(sort= new SORT_INFO)) + if (!(sort= new SORT_INFO)) // Note that this is not automatically freed! return 0; if (subselect && subselect->filesort_buffer.is_allocated()) @@ -439,6 +439,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, error= 0; err: + param.tmp_buffer.free(); if (!subselect || !subselect->is_uncacheable()) { if (!param.using_addon_fields()) @@ -1119,7 +1120,7 @@ void store_length(uchar *to, uint length, uint pack_length) void Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { CHARSET_INFO *cs= item->collation.collation; bool maybe_null= item->maybe_null(); @@ -1127,7 +1128,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, if (maybe_null) *to++= 1; - String *res= item->str_result(¶m->tmp_buffer); + Binary_string *res= item->str_result(tmp_buffer); if (!res) { if (maybe_null) @@ -1189,7 +1190,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { longlong value= item->val_int_result(); make_sort_key_longlong(to, item->maybe_null(), item->null_value, @@ -1200,7 +1201,7 @@ Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { MYSQL_TIME buf; // This is a temporal type. No nanoseconds. Rounding mode is not important. @@ -1222,7 +1223,7 @@ Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_timestamp_common::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { THD *thd= current_thd; uint binlen= my_timestamp_binary_length(item->decimals); @@ -1315,7 +1316,7 @@ Type_handler::make_packed_sort_key_longlong(uchar *to, bool maybe_null, void Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); if (item->maybe_null()) @@ -1335,7 +1336,7 @@ Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_real_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { double value= item->val_result(); if (item->maybe_null()) @@ -2580,7 +2581,7 @@ void Sort_param::try_to_pack_sortkeys() uint Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { CHARSET_INFO *cs= item->collation.collation; bool maybe_null= item->maybe_null(); @@ -2588,7 +2589,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, if (maybe_null) *to++= 1; - Binary_string *res= item->str_result(¶m->tmp_buffer); + Binary_string *res= item->str_result(tmp); if (!res) { if (maybe_null) @@ -2619,7 +2620,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { longlong value= item->val_int_result(); return make_packed_sort_key_longlong(to, item->maybe_null(), @@ -2631,7 +2632,7 @@ Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); if (item->maybe_null()) @@ -2653,7 +2654,7 @@ Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { double value= item->val_result(); if (item->maybe_null()) @@ -2674,7 +2675,7 @@ Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { MYSQL_TIME buf; // This is a temporal type. No nanoseconds. Rounding mode is not important. @@ -2696,7 +2697,7 @@ Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_timestamp_common::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { THD *thd= current_thd; uint binlen= my_timestamp_binary_length(item->decimals); @@ -3040,7 +3041,8 @@ static uint make_sortkey(Sort_param *param, uchar *to) { // Item sort_field->item->type_handler()->make_sort_key_part(to, sort_field->item, - sort_field, param); + sort_field, + ¶m->tmp_buffer); if ((maybe_null= sort_field->item->maybe_null())) to++; } @@ -3093,7 +3095,7 @@ static uint make_packed_sortkey(Sort_param *param, uchar *to) Item *item= sort_field->item; length= item->type_handler()->make_packed_sort_key_part(to, item, sort_field, - param); + ¶m->tmp_buffer); if ((maybe_null= sort_field->item->maybe_null())) to++; } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 3752907339124..cfa281da4ef1d 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -1304,7 +1304,18 @@ class ha_partition final :public handler The following code is not safe if you are using different storage engines or different index types per partition. */ - return m_file[0]->index_flags(inx, part, all_parts); + ulong part_flags= m_file[0]->index_flags(inx, part, all_parts); + + /* + The underlying storage engine might support Rowid Filtering. But + ha_partition does not forward the needed SE API calls, so the feature + will not be used. + + Note: It's the same with IndexConditionPushdown, except for its variant + of IndexConditionPushdown+BatchedKeyAccess (that one works). Because of + that, we do not clear HA_DO_INDEX_COND_PUSHDOWN here. + */ + return part_flags & ~HA_DO_RANGE_FILTER_PUSHDOWN; } /** diff --git a/sql/handler.cc b/sql/handler.cc index e074b46f52f16..84e5feaef6a8c 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -4664,7 +4664,7 @@ int handler::check_collation_compatibility() { ulong mysql_version= table->s->mysql_version; - if (mysql_version < 50124) + if (mysql_version < Charset::latest_mariadb_version_with_collation_change()) { KEY *key= table->key_info; KEY *key_end= key + table->s->keys; @@ -4678,18 +4678,7 @@ int handler::check_collation_compatibility() continue; Field *field= table->field[key_part->fieldnr - 1]; uint cs_number= field->charset()->number; - if ((mysql_version < 50048 && - (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */ - cs_number == 41 || /* latin7_general_ci - bug #29461 */ - cs_number == 42 || /* latin7_general_cs - bug #29461 */ - cs_number == 20 || /* latin7_estonian_cs - bug #29461 */ - cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */ - cs_number == 22 || /* koi8u_general_ci - bug #29461 */ - cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */ - cs_number == 26)) || /* cp1250_general_ci - bug #29461 */ - (mysql_version < 50124 && - (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */ - cs_number == 35))) /* ucs2_general_ci - bug #27877 */ + if (Charset::collation_changed_order(mysql_version, cs_number)) return HA_ADMIN_NEEDS_UPGRADE; } } @@ -7999,11 +7988,13 @@ static int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_key_length) { int res = 0; + uint to_delete_counter= 0; + INDEX_STATS *index_stats_to_delete[MAX_INDEXES]; DBUG_ENTER("del_global_index_stats_for_table"); mysql_mutex_lock(&LOCK_global_index_stats); - for (uint i= 0; i < global_index_stats.records;) + for (uint i= 0; i < global_index_stats.records; i++) { INDEX_STATS *index_stats = (INDEX_STATS*) my_hash_element(&global_index_stats, i); @@ -8013,19 +8004,13 @@ int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_ke index_stats->index_name_length >= cache_key_length && !memcmp(index_stats->index, cache_key, cache_key_length)) { - res= my_hash_delete(&global_index_stats, (uchar*)index_stats); - /* - In our HASH implementation on deletion one elements - is moved into a place where a deleted element was, - and the last element is moved into the empty space. - Thus we need to re-examine the current element, but - we don't have to restart the search from the beginning. - */ + index_stats_to_delete[to_delete_counter++]= index_stats; } - else - i++; } + for (uint i= 0; i < to_delete_counter; i++) + res= my_hash_delete(&global_index_stats, (uchar*)index_stats_to_delete[i]); + mysql_mutex_unlock(&LOCK_global_index_stats); DBUG_RETURN(res); } diff --git a/sql/item.cc b/sql/item.cc index 298da95905a43..8081df91ce48f 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5772,7 +5772,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= select->nest_level) { Item::Type ref_type= (*reference)->type(); @@ -5798,7 +5799,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) (Item_ident*) (*reference) : 0), false); if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= select->nest_level) { set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -6147,10 +6149,8 @@ bool Item_field::fix_fields(THD *thd, Item **reference) goto mark_non_agg_field; } - if (!thd->lex->current_select->no_wrap_view_item && + if (select && !thd->lex->current_select->no_wrap_view_item && thd->lex->in_sum_func && - select && - thd->lex == select->parent_lex && thd->lex->in_sum_func->nest_level == select->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8188,7 +8188,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= last_checked_context->select_lex->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8212,7 +8213,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= last_checked_context->select_lex->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8227,7 +8229,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) 1. outer reference (will be fixed later by the fix_inner_refs function); 2. an unnamed reference inside an aggregate function. */ - if (!((*ref)->type() == REF_ITEM && + if (!set_properties_only && + !((*ref)->type() == REF_ITEM && ((Item_ref *)(*ref))->ref_type() == OUTER_REF) && (((*ref)->with_sum_func() && name.str && !(current_sel->get_linkage() != GLOBAL_OPTIONS_TYPE && diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index 82efddb5a5a35..6f6b6a7afe868 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -492,9 +492,14 @@ class Item_func_json_length: public Item_long_func { bool check_arguments() const override { - return args[0]->check_type_can_return_text(func_name_cstring()) || - (arg_count > 1 && - args[1]->check_type_general_purpose_string(func_name_cstring())); + const LEX_CSTRING name= func_name_cstring(); + if (arg_count == 0 || arg_count > 2) + { + my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str); + return true; + } + return args[0]->check_type_can_return_text(name) || + (arg_count > 1 && args[1]->check_type_general_purpose_string(name)); } protected: json_path_with_flags path; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index b38d7086548e1..40d8c791a06b6 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -3970,6 +3970,7 @@ String *Item_func_weight_string::val_str(String *str) weigth_flags); DBUG_ASSERT(frm_length <= tmp_length); + str->set_charset(&my_charset_bin); str->length(frm_length); null_value= 0; return str; @@ -4049,6 +4050,7 @@ String *Item_func_unhex::val_str(String *str) from= res->ptr(); null_value= 0; + str->set_charset(&my_charset_bin); str->length(length); to= (char*) str->ptr(); if (res->length() % 2) diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 72de21ee4832d..ffac6dbb912f6 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -428,7 +428,8 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) sl= sl->master_unit()->outer_select() ) sl->master_unit()->item->with_flags|= item_with_t::SUM_FUNC; } - thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); + if (aggr_sel) + thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); if ((thd->lex->describe & DESCRIBE_EXTENDED) && aggr_sel) { diff --git a/sql/key.cc b/sql/key.cc index b65851cf7c134..4e40a3354ce74 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -615,7 +615,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec) that take the max length into account. */ if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff, - key_part->length))) + key_part->length / + field->charset()->mbmaxlen))) DBUG_RETURN(result * GREATER); next_loop: key_part++; diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index 98325fe74981f..9fa1c08795040 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -5995,7 +5995,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) to avoid query cache being polluted with stale entries, */ # ifdef WITH_WSREP - if (!WSREP(thd) && !wsrep_thd_is_applying(thd)) + /* Query cache is not invalidated on wsrep applier here */ + if (!(WSREP(thd) && wsrep_thd_is_applying(thd))) # endif /* WITH_WSREP */ query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock); #endif /* HAVE_QUERY_CACHE */ @@ -7909,19 +7910,21 @@ Write_rows_log_event::do_exec_row(rpl_group_info *rgi) const char *tmp= thd->get_proc_info(); LEX_CSTRING tmp_db= thd->db; char *message, msg[128]; - const char *table_name= m_table->s->table_name.str; - char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name)); - my_snprintf(msg, sizeof(msg),"Write_rows_log_event::write_row() on table %c%s%c", - quote_char, table_name, quote_char); + const LEX_CSTRING &table_name= m_table->s->table_name; + const char quote_char= + get_quote_char_for_identifier(thd, table_name.str, table_name.length); + my_snprintf(msg, sizeof msg, + "Write_rows_log_event::write_row() on table %c%.*s%c", + quote_char, int(table_name.length), table_name.str, quote_char); thd->reset_db(&m_table->s->db); message= msg; int error; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Write_rows_log_event::write_row(%lld) on table %c%s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, - quote_char); + "Write_rows_log_event::write_row(%lld) on table %c%.*s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, + int(table_name.length), table_name.str, quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8534,10 +8537,12 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) const char *tmp= thd->get_proc_info(); LEX_CSTRING tmp_db= thd->db; char *message, msg[128]; - const char *table_name= m_table->s->table_name.str; - char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name)); - my_snprintf(msg, sizeof(msg),"Delete_rows_log_event::find_row() on table %c%s%c", - quote_char, table_name, quote_char); + const LEX_CSTRING &table_name= m_table->s->table_name; + const char quote_char= + get_quote_char_for_identifier(thd, table_name.str, table_name.length); + my_snprintf(msg, sizeof msg, + "Delete_rows_log_event::find_row() on table %c%.*s%c", + quote_char, int(table_name.length), table_name.str, quote_char); thd->reset_db(&m_table->s->db); message= msg; const bool invoke_triggers= (m_table->triggers && do_invoke_trigger()); @@ -8545,26 +8550,29 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Delete_rows_log_event::find_row(%lld) on table %c%s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + "Delete_rows_log_event::find_row(%lld) on table %c%.*s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, + int(table_name.length), table_name.str, quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ thd_proc_info(thd, message); if (likely(!(error= find_row(rgi)))) - { + { /* Delete the record found, located in record[0] */ - my_snprintf(msg, sizeof(msg),"Delete_rows_log_event::ha_delete_row() on table %c%s%c", - quote_char, table_name, quote_char); + my_snprintf(msg, sizeof msg, + "Delete_rows_log_event::ha_delete_row() on table %c%.*s%c", + quote_char, int(table_name.length), table_name.str, + quote_char); message= msg; #ifdef WSREP_PROC_INFO snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Delete_rows_log_event::ha_delete_row(%lld) on table %c%s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, - quote_char); + "Delete_rows_log_event::ha_delete_row(%lld) on table %c%.*s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, + int(table_name.length), table_name.str, quote_char); message= thd->wsrep_info; #endif thd_proc_info(thd, message); @@ -8697,17 +8705,20 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) DBUG_ASSERT(m_table != NULL); LEX_CSTRING tmp_db= thd->db; char *message, msg[128]; - const char *table_name= m_table->s->table_name.str; - char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name)); - my_snprintf(msg, sizeof(msg),"Update_rows_log_event::find_row() on table %c%s%c", - quote_char, table_name, quote_char); + const LEX_CSTRING &table_name= m_table->s->table_name; + const char quote_char= + get_quote_char_for_identifier(thd, table_name.str, table_name.length); + my_snprintf(msg, sizeof msg, + "Update_rows_log_event::find_row() on table %c%.*s%c", + quote_char, int(table_name.length), table_name.str, quote_char); thd->reset_db(&m_table->s->db); message= msg; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::find_row(%lld) on table %c%s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + "Update_rows_log_event::find_row(%lld) on table %c%.*s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, + int(table_name.length), table_name.str, quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8747,14 +8758,15 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) store_record(m_table,record[1]); m_curr_row= m_curr_row_end; - my_snprintf(msg, sizeof(msg),"Update_rows_log_event::unpack_current_row() on table %c%s%c", - quote_char, table_name, quote_char); + my_snprintf(msg, sizeof msg, + "Update_rows_log_event::unpack_current_row() on table %c%.*s%c", + quote_char, int(table_name.length), table_name.str, quote_char); message= msg; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::unpack_current_row(%lld) on table %c%s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, - quote_char); + "Update_rows_log_event::unpack_current_row(%lld) on table %c%.*s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, + int(table_name.length), table_name.str, quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8777,13 +8789,15 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength); #endif - my_snprintf(msg, sizeof(msg),"Update_rows_log_event::ha_update_row() on table %c%s%c", - quote_char, table_name, quote_char); + my_snprintf(msg, sizeof msg, + "Update_rows_log_event::ha_update_row() on table %c%.*s%c", + quote_char, int(table_name.length), table_name.str, quote_char); message= msg; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::ha_update_row(%lld) on table %c%s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, quote_char); + "Update_rows_log_event::ha_update_row(%lld) on table %c%.*s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, + int(table_name.length), table_name.str, quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ diff --git a/sql/mysqld.h b/sql/mysqld.h index efa072272c543..5263e397a1518 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -904,10 +904,12 @@ enum enum_query_type // it evaluates to. Should be used for error messages, so that they // don't reveal values. QT_NO_DATA_EXPANSION= (1 << 9), + // Remove wrappers added for TVC when creating or showing view + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW= (1 << 12), // The temporary tables used by the query might be freed by the time // this print() call is made. - QT_DONT_ACCESS_TMP_TABLES= (1 << 12) + QT_DONT_ACCESS_TMP_TABLES= (1 << 13) }; diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index b39179428746e..c0a08d9d703e1 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -264,6 +264,12 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id, STRING_WITH_LEN("now WAIT_FOR proceed_by_1000")); } }); + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2001) { + DBUG_ASSERT(!rgi->worker_error || entry->stop_on_error_sub_id == sub_id); + debug_sync_set_action(thd, STRING_WITH_LEN("now SIGNAL cont_worker3")); + } + }); #endif if (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING) @@ -289,6 +295,11 @@ signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err) In case we get an error during commit, inform following transactions that we aborted our commit. */ + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2002) { + debug_sync_set_action(thd, STRING_WITH_LEN("now WAIT_FOR cont_worker2")); + }}); + rgi->unmark_start_commit(); rgi->cleanup_context(thd, true); rgi->rli->abort_slave= true; @@ -823,7 +834,14 @@ retry_event_group(rpl_group_info *rgi, rpl_parallel_thread *rpt, thd->reset_killed(); thd->clear_error(); rgi->killed_for_retry = rpl_group_info::RETRY_KILL_NONE; - +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2003) { + debug_sync_set_action(thd, + STRING_WITH_LEN("now WAIT_FOR cont_worker3")); + } + }); +#endif /* If we retry due to a deadlock kill that occurred during the commit step, we might have already updated (but not committed) an update of table @@ -842,13 +860,10 @@ retry_event_group(rpl_group_info *rgi, rpl_parallel_thread *rpt, for (;;) { mysql_mutex_lock(&entry->LOCK_parallel_entry); - if (entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX || - DBUG_IF("simulate_mdev_12746") || - rgi->gtid_sub_id < entry->stop_on_error_sub_id) - { - register_wait_for_prior_event_group_commit(rgi, entry); - } - else + register_wait_for_prior_event_group_commit(rgi, entry); + if (entry->stop_on_error_sub_id != (uint64) ULONGLONG_MAX && + !DBUG_IF("simulate_mdev_12746") && + rgi->gtid_sub_id >= entry->stop_on_error_sub_id) { /* A failure of a preceding "parent" transaction may not be @@ -2073,6 +2088,9 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, gco->prior_sub_id= prior_sub_id; gco->installed= false; gco->flags= 0; +#ifndef DBUG_OFF + gco->gc_done= false; +#endif return gco; } @@ -2080,6 +2098,10 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, void rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco) { +#ifndef DBUG_OFF + DBUG_ASSERT(!gco->gc_done); + gco->gc_done= true; +#endif if (!loc_gco_list) loc_gco_last_ptr_ptr= &gco->next_gco; else diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index 9a8dfc7e3866b..f2bf36aa4a162 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -91,6 +91,9 @@ struct group_commit_orderer { FORCE_SWITCH= 2 }; uint8 flags; +#ifndef DBUG_OFF + bool gc_done; +#endif }; diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 83668449c2d4c..239616865608b 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -2436,8 +2436,13 @@ mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco, uint64 count= ++e->count_committing_event_groups; /* Signal any following GCO whose wait_count has been reached now. */ tmp= gco; + + DBUG_ASSERT(!tmp->gc_done); + while ((tmp= tmp->next_gco)) { + DBUG_ASSERT(!tmp->gc_done); + uint64 wait_count= tmp->wait_count; if (wait_count > count) break; diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index b57fc199826b9..17adeed86e782 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -317,8 +317,8 @@ void Active_tranx::clear_active_tranx_nodes(const char *log_file_name, /******************************************************************************* * - * class: the basic code layer for syncsync master. - * class: the basic code layer for syncsync slave. + * class: the basic code layer for semisync master. + * class: the basic code layer for semisync slave. * * The most important functions during semi-syn replication listed: * @@ -809,8 +809,6 @@ void Repl_semi_sync_master::dump_end(THD* thd) remove_slave(); ack_receiver.remove_slave(thd); - - return; } int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index 9991f26120210..fb3f94d019e73 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -27,6 +27,7 @@ #ifdef _WIN32 #include +#include #define SIGNAL_FMT "exception 0x%x" #else #define SIGNAL_FMT "signal %d" @@ -66,9 +67,9 @@ static inline void output_core_info() (int) len, buff); } #ifdef __FreeBSD__ - if ((fd= my_open("/proc/curproc/rlimit", O_RDONLY, MYF(MY_NO_REGISTER))) >= 0) + if ((fd= open("/proc/curproc/rlimit", O_RDONLY)) >= 0) #else - if ((fd= my_open("/proc/self/limits", O_RDONLY, MYF(MY_NO_REGISTER))) >= 0) + if ((fd= open("/proc/self/limits", O_RDONLY)) >= 0) #endif { my_safe_printf_stderr("Resource Limits:\n"); @@ -76,21 +77,20 @@ static inline void output_core_info() { my_write_stderr(buff, len); } - my_close(fd, MYF(0)); + close(fd); } #ifdef __linux__ - if ((fd= my_open("/proc/sys/kernel/core_pattern", O_RDONLY, - MYF(MY_NO_REGISTER))) >= 0) + if ((fd= open("/proc/sys/kernel/core_pattern", O_RDONLY)) >= 0) { len= read(fd, (uchar*)buff, sizeof(buff)); my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff); - my_close(fd, MYF(0)); + close(fd); } - if ((fd= my_open("/proc/version", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/version", O_RDONLY)) >= 0) { - len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); + len= read(fd, (uchar*)buff, sizeof(buff)); my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); - my_close(fd, MYF(0)); + close(fd); } #endif #elif defined(__APPLE__) || defined(__FreeBSD__) @@ -104,11 +104,14 @@ static inline void output_core_info() { my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); } -#else +#elif defined(HAVE_GETCWD) char buff[80]; - my_getwd(buff, sizeof(buff), 0); - my_safe_printf_stderr("Writing a core file at %s\n", buff); - fflush(stderr); + + if (getcwd(buff, sizeof(buff))) + { + my_safe_printf_stderr("Writing a core file at %.*s\n", (int) sizeof(buff), buff); + fflush(stderr); + } #endif } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index d225b018488e5..283c629f42abb 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2568,6 +2568,8 @@ static bool acl_load(THD *thd, const Grant_tables& tables) "possible to remove this privilege using REVOKE.", host.host.hostname, host.db); } + else if (!host.db) + host.db= const_cast(host_not_specified.str); host.access= host_table.get_access(); host.access= fix_rights_for_db(host.access); host.sort= get_magic_sort("hd", host.host.hostname, host.db); @@ -2576,8 +2578,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables) { sql_print_warning("'host' entry '%s|%s' " "ignored in --skip-name-resolve mode.", - safe_str(host.host.hostname), - safe_str(host.db)); + host.host.hostname, host.db); continue; } #ifndef TO_BE_REMOVED @@ -3671,7 +3672,7 @@ privilege_t acl_get(const char *host, const char *ip, ACL_HOST *acl_host=dynamic_element(&acl_hosts,i,ACL_HOST*); if (compare_hostname(&acl_host->host,host,ip)) { - if (!acl_host->db || !wild_compare(db,acl_host->db,db_is_pattern)) + if (!wild_compare(db, acl_host->db, db_is_pattern)) { host_access=acl_host->access; // Fully specified. Take it break; @@ -6699,6 +6700,7 @@ static int update_role_columns(GRANT_TABLE *merged, } } +restart: for (uint i=0 ; i < mh->records ; i++) { GRANT_COLUMN *col = (GRANT_COLUMN *)my_hash_element(mh, i); @@ -6707,6 +6709,7 @@ static int update_role_columns(GRANT_TABLE *merged, { changed= 1; my_hash_delete(mh, (uchar*)col); + goto restart; } } DBUG_ASSERT(rights == merged->cols); diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h index 4136dff1e0c5b..d033e79168ab3 100644 --- a/sql/sql_analyze_stmt.h +++ b/sql/sql_analyze_stmt.h @@ -63,14 +63,19 @@ class Exec_time_tracker if (my_gap_tracker) attach_gap_time_tracker(thd, my_gap_tracker, end); } -public: - Exec_time_tracker() : count(0), cycles(0), my_gap_tracker(NULL) {} /* - The time spent between stop_tracking() call on this object and any - other time measurement will be billed to this tracker. + The time spent after stop_tracking() call on this object and any + subsequent time tracking call will be billed to this tracker. */ Gap_time_tracker *my_gap_tracker; +public: + Exec_time_tracker() : count(0), cycles(0), my_gap_tracker(NULL) {} + + void set_gap_tracker(Gap_time_tracker *gap_tracker) + { + my_gap_tracker= gap_tracker; + } // interface for collecting time void start_tracking(THD *thd) @@ -160,6 +165,25 @@ class Time_and_counter_tracker: public Exec_time_tracker if (unlikely((tracker)->timed)) \ { (tracker)->stop_tracking(thd); } + +/* + Just a counter to increment one value. Wrapped in a class to be uniform + with other counters used by ANALYZE. +*/ + +class Counter_tracker +{ +public: + Counter_tracker() : r_scans(0) {} + ha_rows r_scans; + + inline void on_scan_init() { r_scans++; } + + bool has_scans() const { return (r_scans != 0); } + ha_rows get_loops() const { return r_scans; } +}; + + /* A class for collecting read statistics. @@ -170,20 +194,16 @@ class Time_and_counter_tracker: public Exec_time_tracker It can be used to track reading from files, buffers, etc). */ -class Table_access_tracker +class Table_access_tracker { public: - Table_access_tracker() : - r_scans(0), r_rows(0), /*r_rows_after_table_cond(0),*/ - r_rows_after_where(0) + Table_access_tracker() : r_scans(0), r_rows(0), r_rows_after_where(0) {} - ha_rows r_scans; /* How many scans were ran on this join_tab */ + ha_rows r_scans; /* how many scans were ran on this join_tab */ ha_rows r_rows; /* How many rows we've got after that */ ha_rows r_rows_after_where; /* Rows after applying attached part of WHERE */ - bool has_scans() const { return (r_scans != 0); } - ha_rows get_loops() const { return r_scans; } double get_avg_rows() const { return r_scans @@ -202,6 +222,9 @@ class Table_access_tracker inline void on_scan_init() { r_scans++; } inline void on_record_read() { r_rows++; } inline void on_record_after_where() { r_rows_after_where++; } + + bool has_scans() const { return (r_scans != 0); } + ha_rows get_loops() const { return r_scans; } }; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 8db958b25b07f..bc9ab817b86b3 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -6293,7 +6293,10 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, size_t length, if (cached_field_index < table->s->fields && !my_strcasecmp(system_charset_info, table->field[cached_field_index]->field_name.str, name)) + { field= table->field[cached_field_index]; + DEBUG_SYNC(thd, "table_field_cached"); + } else { LEX_CSTRING fname= {name, length}; @@ -6757,6 +6760,13 @@ find_field_in_tables(THD *thd, Item_ident *item, if (last_table) last_table= last_table->next_name_resolution_table; + field_index_t fake_index_for_duplicate_search= NO_CACHED_FIELD_INDEX; + /* + For the field search it will point to field cache, but for duplicate + search it will point to fake_index_for_duplicate_search (no cache + present). + */ + field_index_t *current_cache= &(item->cached_field_index); for (; cur_table != last_table ; cur_table= cur_table->next_name_resolution_table) { @@ -6771,7 +6781,7 @@ find_field_in_tables(THD *thd, Item_ident *item, SQLCOM_SHOW_FIELDS) ? false : check_privileges, allow_rowid, - &(item->cached_field_index), + current_cache, register_tree_change, &actual_table); if (cur_field) @@ -6786,7 +6796,7 @@ find_field_in_tables(THD *thd, Item_ident *item, item->name.str, db, table_name, ignored_tables, ref, false, allow_rowid, - &(item->cached_field_index), + current_cache, register_tree_change, &actual_table); if (cur_field) @@ -6803,8 +6813,19 @@ find_field_in_tables(THD *thd, Item_ident *item, Store the original table of the field, which may be different from cur_table in the case of NATURAL/USING join. */ - item->cached_table= (!actual_table->cacheable_table || found) ? - 0 : actual_table; + if (actual_table->cacheable_table /*(1)*/ && !found /*(2)*/) + { + /* + We have just found a field allowed to cache (1) and + it is not dublicate search (2). + */ + item->cached_table= actual_table; + } + else + { + item->cached_table= NULL; + item->cached_field_index= NO_CACHED_FIELD_INDEX; + } DBUG_ASSERT(thd->where); /* @@ -6823,6 +6844,7 @@ find_field_in_tables(THD *thd, Item_ident *item, return (Field*) 0; } found= cur_field; + current_cache= &fake_index_for_duplicate_search; } } @@ -8279,9 +8301,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context, table_list; table_list= table_list->next_local) { - if (table_list->merge_underlying_list) + if (table_list->is_merged_derived() && table_list->merge_underlying_list) { - DBUG_ASSERT(table_list->is_merged_derived()); Query_arena *arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); bool res; diff --git a/sql/sql_class.h b/sql/sql_class.h index 376399d0f479a..8d53454abb95d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1896,7 +1896,7 @@ show_system_thread(enum_thread_type thread) RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_BACKGROUND); RETURN_NAME_AS_STRING(SYSTEM_THREAD_SEMISYNC_MASTER_BACKGROUND); default: - sprintf(buf, "", thread); + snprintf(buf, sizeof(buf), "", thread); return buf; } #undef RETURN_NAME_AS_STRING @@ -7720,7 +7720,7 @@ class Database_qualified_name if (unlikely(!(dst->str= tmp= (char*) alloc_root(mem_root, dst->length + 1)))) return true; - sprintf(tmp, "%.*s%.*s%.*s", + snprintf(tmp, dst->length + 1, "%.*s%.*s%.*s", (int) m_db.length, (m_db.length ? m_db.str : ""), dot, ".", (int) m_name.length, m_name.str); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index d31d0b949ba18..a53c2ef75e38f 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -1096,6 +1096,8 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds, DBUG_RETURN(TRUE); select_lex->fix_prepare_information(thd, conds, &fake_conds); + if (!thd->lex->upd_del_where) + thd->lex->upd_del_where= *conds; DBUG_RETURN(FALSE); } diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 6f0857239dd50..2250310635076 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -351,24 +351,6 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived) DBUG_RETURN(FALSE); } - if (dt_select->uncacheable & UNCACHEABLE_RAND) - { - /* There is random function => fall back to materialization. */ - cause= "Random function in the select"; - if (unlikely(thd->trace_started())) - { - OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived, - derived->is_derived() ? "derived" : "view", - derived->alias.str ? derived->alias.str : "", - derived->get_unit()->first_select()->select_number, - "materialized"); - trace_derived.add("cause", cause); - } - derived->change_refs_to_fields(); - derived->set_materialized_derived(); - DBUG_RETURN(FALSE); - } - if (derived->dt_handler) { derived->change_refs_to_fields(); diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 83ae0132ea13b..67af0a5dab688 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -185,7 +185,7 @@ void Explain_query::notify_tables_are_closed() Send EXPLAIN output to the client. */ -int Explain_query::send_explain(THD *thd) +int Explain_query::send_explain(THD *thd, bool extended) { select_result *result; LEX *lex= thd->lex; @@ -198,8 +198,22 @@ int Explain_query::send_explain(THD *thd) if (thd->lex->explain_json) print_explain_json(result, thd->lex->analyze_stmt, false /*is_show_cmd*/); else + { res= print_explain(result, lex->describe, thd->lex->analyze_stmt); - + if (extended) + { + char buff[1024]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + /* + The warnings system requires input in utf8, @see + mysqld_show_warnings(). + */ + lex->unit.print(&str, QT_EXPLAIN_EXTENDED); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_YES, str.c_ptr_safe()); + } + } if (res) result->abort_result_set(); else @@ -209,6 +223,7 @@ int Explain_query::send_explain(THD *thd) } + /* The main entry point to print EXPLAIN of the entire query */ @@ -1966,12 +1981,35 @@ void Explain_table_access::print_explain_json(Explain_query *query, if (is_analyze) { - //writer->add_member("r_loops").add_ll(jbuf_tracker.get_loops()); + writer->add_member("r_loops").add_ll(jbuf_loops_tracker.get_loops()); + writer->add_member("r_filtered"); if (jbuf_tracker.has_scans()) writer->add_double(jbuf_tracker.get_filtered_after_where()*100.0); else writer->add_null(); + + writer->add_member("r_unpack_time_ms"); + writer->add_double(jbuf_unpack_tracker.get_time_ms()); + + writer->add_member("r_other_time_ms"). + add_double(jbuf_extra_time_tracker.get_time_ms()); + /* + effective_rows is average number of matches we got for an incoming + row. The row is stored in the join buffer and then is read + from there, possibly multiple times. We can't count this number + directly. Infer it as: + total_number_of_row_combinations_considered / r_loops. + */ + writer->add_member("r_effective_rows"); + if (jbuf_loops_tracker.has_scans()) + { + double loops= (double)jbuf_loops_tracker.get_loops(); + double row_combinations= (double)jbuf_tracker.r_rows; + writer->add_double(row_combinations / loops); + } + else + writer->add_null(); } } diff --git a/sql/sql_explain.h b/sql/sql_explain.h index 0e8569aabe549..a0f137bb5619a 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -493,7 +493,7 @@ class Explain_query : public Sql_alloc bool is_analyze); /* Send tabular EXPLAIN to the client */ - int send_explain(THD *thd); + int send_explain(THD *thd, bool extended); /* Return tabular EXPLAIN output as a text string */ bool print_explain_str(THD *thd, String *out_str, bool is_analyze); @@ -747,7 +747,7 @@ class Explain_range_checked_fer : public Sql_alloc class Explain_table_access : public Sql_alloc { public: - Explain_table_access(MEM_ROOT *root) : + Explain_table_access(MEM_ROOT *root, bool timed) : derived_select_number(0), non_merged_sjm_number(0), extra_tags(root), @@ -760,6 +760,7 @@ class Explain_table_access : public Sql_alloc pushed_index_cond(NULL), sjm_nest(NULL), pre_join_sort(NULL), + jbuf_unpack_tracker(timed), rowid_filter(NULL) {} ~Explain_table_access() { delete sjm_nest; } @@ -867,8 +868,22 @@ class Explain_table_access : public Sql_alloc Exec_time_tracker op_tracker; Gap_time_tracker extra_time_tracker; + /* When using join buffer: Track the reads from join buffer */ Table_access_tracker jbuf_tracker; - + + /* When using join buffer: time spent unpacking rows from the join buffer */ + Time_and_counter_tracker jbuf_unpack_tracker; + + /* + When using join buffer: time spent after unpacking rows from the join + buffer. This will capture the time spent checking the Join Condition: + the condition that depends on this table and preceding tables. + */ + Gap_time_tracker jbuf_extra_time_tracker; + + /* When using join buffer: Track the number of incoming record combinations */ + Counter_tracker jbuf_loops_tracker; + Explain_rowid_filter *rowid_filter; int print_explain(select_result_sink *output, uint8 explain_flags, diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index c31d341b3f181..55b08750e5e32 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -870,7 +870,8 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, save_insert_query_plan(thd, table_list); if (thd->lex->describe) { - retval= thd->lex->explain->send_explain(thd); + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + retval= thd->lex->explain->send_explain(thd, extended); goto abort; } diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index c7b6a0bf6e4c9..36eae30de1669 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -1614,7 +1614,7 @@ bool JOIN_CACHE::get_record() pos+= referenced_fields*size_of_fld_ofs; if (prev_cache) prev_cache->get_record_by_pos(prev_rec_ptr); - } + } return res; } @@ -2389,7 +2389,9 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last) (join_tab->first_inner && !not_exists_opt_is_applicable) || !skip_next_candidate_for_match(rec_ptr)) { - read_next_candidate_for_match(rec_ptr); + ANALYZE_START_TRACKING(join->thd, join_tab->jbuf_unpack_tracker); + read_next_candidate_for_match(rec_ptr); + ANALYZE_STOP_TRACKING(join->thd, join_tab->jbuf_unpack_tracker); rc= generate_full_extensions(rec_ptr); if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS) goto finish; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index d4028eaaae0ee..852403b7cb33c 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1297,6 +1297,8 @@ void LEX::start(THD *thd_arg) frame_bottom_bound= NULL; win_spec= NULL; + upd_del_where= NULL; + vers_conditions.empty(); period_conditions.empty(); @@ -3030,6 +3032,7 @@ void st_select_lex::init_select() in_funcs.empty(); curr_tvc_name= 0; versioned_tables= 0; + is_tvc_wrapper= false; nest_flags= 0; } @@ -3954,57 +3957,54 @@ LEX::LEX() } +bool LEX::can_be_merged() +{ + return unit.can_be_merged(); +} + + /* - Check whether the merging algorithm can be used on this VIEW + Check whether the merging algorithm can be used for this unit SYNOPSIS - LEX::can_be_merged() + st_select_lex_unit::can_be_merged() DESCRIPTION - We can apply merge algorithm if it is single SELECT view with - subqueries only in WHERE clause (we do not count SELECTs of underlying - views, and second level subqueries) and we have not grpouping, ordering, - HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and - several underlying tables. + We can apply merge algorithm for a unit if it is single SELECT with + subqueries only in WHERE clauses or in ON conditions or in select list + (we do not count SELECTs of underlying views/derived tables/CTEs and + second level subqueries) and we have no grouping, ordering, HAVING + clause, aggregate functions, DISTINCT clause, LIMIT clause. RETURN FALSE - only temporary table algorithm can be used TRUE - merge algorithm can be used */ -bool LEX::can_be_merged() +bool st_select_lex_unit::can_be_merged() { // TODO: do not forget implement case when select_lex.table_list.elements==0 /* find non VIEW subqueries/unions */ - bool selects_allow_merge= (first_select_lex()->next_select() == 0 && - !(first_select_lex()->uncacheable & - UNCACHEABLE_RAND)); - if (selects_allow_merge) - { - for (SELECT_LEX_UNIT *tmp_unit= first_select_lex()->first_inner_unit(); - tmp_unit; - tmp_unit= tmp_unit->next_unit()) - { - if (tmp_unit->first_select()->parent_lex == this && - (tmp_unit->item != 0 && - (tmp_unit->item->place() != IN_WHERE && - tmp_unit->item->place() != IN_ON && - tmp_unit->item->place() != SELECT_LIST))) - { - selects_allow_merge= 0; - break; - } - } - } - - return (selects_allow_merge && - first_select_lex()->group_list.elements == 0 && - first_select_lex()->having == 0 && - first_select_lex()->with_sum_func == 0 && - first_select_lex()->table_list.elements >= 1 && - !(first_select_lex()->options & SELECT_DISTINCT) && - first_select_lex()->limit_params.select_limit == 0); + st_select_lex *fs= first_select(); + + if (fs->next_select() || + (fs->uncacheable & UNCACHEABLE_RAND) || + (fs->options & SELECT_DISTINCT) || + fs->group_list.elements || fs->having || + fs->with_sum_func || + fs->table_list.elements < 1 || + fs->limit_params.select_limit) + return false; + for (SELECT_LEX_UNIT *tmp_unit= fs->first_inner_unit(); + tmp_unit; + tmp_unit= tmp_unit->next_unit()) + if ((tmp_unit->item != 0 && + (tmp_unit->item->place() != IN_WHERE && + tmp_unit->item->place() != IN_ON && + tmp_unit->item->place() != SELECT_LIST))) + return false; + return true; } @@ -4079,9 +4079,7 @@ bool LEX::can_not_use_merged(bool no_update_or_delete) case SQLCOM_UPDATE_MULTI: case SQLCOM_DELETE_MULTI: - if (no_update_or_delete) - return TRUE; - /* Fall through */ + return no_update_or_delete; default: return FALSE; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 2ddd40568f9e9..993e3db25b5af 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1040,6 +1040,8 @@ class st_select_lex_unit: public st_select_lex_node { bool set_lock_to_the_last_select(Lex_select_lock l); + bool can_be_merged(); + friend class st_select_lex; private: @@ -1300,6 +1302,8 @@ class st_select_lex: public st_select_lex_node st_select_lex. */ uint curr_tvc_name; + /* true <=> select has been created a TVC wrapper */ + bool is_tvc_wrapper; uint fields_in_window_functions; uint insert_tables; enum_parsing_place parsing_place; /* where we are parsing expression */ @@ -1470,6 +1474,10 @@ class st_select_lex: public st_select_lex_node } bool setup_ref_array(THD *thd, uint order_group_num); void print(THD *thd, String *str, enum_query_type query_type); + void print_item_list(THD *thd, String *str, enum_query_type query_type); + void print_set_clause(THD *thd, String *str, enum_query_type query_type); + void print_on_duplicate_key_clause(THD *thd, String *str, + enum_query_type query_type); static void print_order(String *str, ORDER *order, enum_query_type query_type); @@ -3563,6 +3571,8 @@ struct LEX: public Query_tables_list Window_frame_bound *frame_bottom_bound; Window_spec *win_spec; + Item *upd_del_where; + /* System Versioning */ vers_select_conds_t vers_conditions; vers_select_conds_t period_conditions; diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index dd19807dd6db2..60e7abc3fa23f 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -29,7 +29,7 @@ enum err_msgs_index { - en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, + en_US= 0, zh_CN, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK, es_ES, sv_SE, uk_UA, hi_IN } ERR_MSGS_INDEX; @@ -38,6 +38,7 @@ enum err_msgs_index MY_LOCALE_ERRMSGS global_errmsgs[]= { {"english", NULL}, + {"chinese", NULL}, {"czech", NULL}, {"danish", NULL}, {"dutch", NULL}, @@ -2095,7 +2096,7 @@ MY_LOCALE my_locale_zh_CN '.', /* decimal point zh_CN */ ',', /* thousands_sep zh_CN */ "\x03", /* grouping zh_CN */ - &global_errmsgs[en_US] + &global_errmsgs[zh_CN] ); /***** LOCALE END zh_CN *****/ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c699d3e9732f3..a92d07436282e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4574,7 +4574,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt) thd->protocol= save_protocol; } if (!res && thd->lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } delete sel_result; MYSQL_INSERT_DONE(res, (ulong) thd->get_row_count_func()); /* @@ -4754,7 +4757,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt) thd->protocol= save_protocol; } if (!res && (explain || lex->analyze_stmt)) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } /* revert changes for SP */ MYSQL_INSERT_SELECT_DONE(res, (ulong) thd->get_row_count_func()); @@ -4821,7 +4827,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt) if (thd->lex->analyze_stmt || thd->lex->describe) { if (!res) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } delete sel_result; @@ -4882,7 +4891,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt) else { if (lex->describe || lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } multi_delete_error: delete result; @@ -6275,7 +6287,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) thd->protocol= save_protocol; } if (!res) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } } } @@ -9307,7 +9322,9 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg) if (!(arg->thd->security_ctx->master_access & PRIV_KILL_OTHER_USER_PROCESS) && !arg->thd->security_ctx->user_matches(thd->security_ctx)) - return 1; + { + return MY_TEST(arg->thd->security_ctx->master_access & PROCESS_ACL); + } if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root)) { mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete @@ -9427,7 +9444,10 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) my_ok(thd, rows); break; case ER_KILL_DENIED_ERROR: - my_error(error, MYF(0), (long long) thd->thread_id); + char buf[DEFINER_LENGTH+1]; + strxnmov(buf, sizeof(buf), user->user.str, "@", user->host.str, NULL); + my_printf_error(ER_KILL_DENIED_ERROR, ER_THD(thd, ER_CANNOT_USER), MYF(0), + "KILL USER", buf); break; case ER_OUT_OF_RESOURCES: default: diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index d338dc302e630..8bde0f3bd5390 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -432,7 +432,7 @@ static int send_file(THD *thd) /** Internal to mysql_binlog_send() routine that recalculates checksum for - 1. FD event (asserted) that needs additional arranment prior sending to slave. + 1. FD event (asserted) that needs additional arrangement prior sending to slave. 2. Start_encryption_log_event whose Ignored flag is set TODO DBUG_ASSERT can be removed if this function is used for more general cases */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index a179ab933737b..5caf6d768d8f1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -251,10 +251,12 @@ static bool find_field_in_item_list (Field *field, void *data); static bool find_field_in_order_list (Field *field, void *data); int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, - Item *having); + SORT_FIELD *sortorder, ulong keylength, + Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, - uint field_count, Field **first_field, - ulong key_length,Item *having); + uint field_count, Field **first_field, + SORT_FIELD *sortorder, + ulong key_length,Item *having); static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref); static bool setup_new_fields(THD *thd, List &fields, List &all_fields, ORDER *new_order); @@ -313,6 +315,9 @@ static Item **get_sargable_cond(JOIN *join, TABLE *table); bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item); +void print_list_item(String *str, List_item *list, + enum_query_type query_type); + static bool build_notnull_conds_for_range_scans(JOIN *join, COND *cond, table_map allowed); @@ -8018,7 +8023,6 @@ best_access_path(JOIN *join, rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables Json_writer_object trace_access_idx(thd); - double eq_ref_rows= 0.0, eq_ref_cost= 0.0; /* full text keys require special treatment */ @@ -8060,14 +8064,13 @@ best_access_path(JOIN *join, type= JT_EQ_REF; trace_access_idx.add("access_type", join_type_str[type]) .add("index", keyinfo->name); + if (!found_ref && table->opt_range_keys.is_set(key)) tmp= adjust_quick_cost(table->opt_range[key].cost, 1); else tmp= table->file->avg_io_cost(); - eq_ref_rows= prev_record_reads(join_positions, idx, + tmp*= prev_record_reads(join_positions, idx, found_ref); - tmp*= eq_ref_rows; - eq_ref_cost= tmp; records=1.0; } else @@ -8365,28 +8368,7 @@ best_access_path(JOIN *join, (table->file->index_flags(start_key->key,0,1) & HA_DO_RANGE_FILTER_PUSHDOWN)) { - double rows; - if (type == JT_EQ_REF) - { - /* - Treat EQ_REF access in a special way: - 1. We have no cost for index-only read. Assume its cost is 50% of - the cost of the full read. - - 2. A regular ref access will do #record_count lookups, but eq_ref - has "lookup cache" which reduces the number of lookups made. - The estimation code uses prev_record_reads() call to estimate: - - tmp = prev_record_reads(join_positions, idx, found_ref); - - Set the effective number of rows from "tmp" here. - */ - keyread_tmp= COST_ADD(eq_ref_cost / 2, s->startup_cost); - rows= eq_ref_rows; - } - else - rows= record_count * records; - + double rows= record_count * records; /* If we use filter F with selectivity s the the cost of fetching data by key using this filter will be @@ -8408,46 +8390,63 @@ best_access_path(JOIN *join, cost_of_fetching_1_row = tmp/rows cost_of_fetching_1_key_tuple = keyread_tmp/rows - access_cost_factor is the gain we expect for using rowid filter. - An access_cost_factor of 1.0 means that keyread_tmp is 0 - (using key read is infinitely fast) and the gain for each row when - using filter is great. - An access_cost_factor if 0.0 means that using keyread has the - same cost as reading rows, so there is no gain to get with - filter. - access_cost_factor should never be bigger than 1.0 (if all - calculations are correct) as the cost of keyread should always be - smaller than the cost of fetching the same number of keys + rows. - access_cost_factor should also never be smaller than 0.0. - The one exception is if number of records is 1 (eq_ref), then - because we are comparing rows to cost of keyread_tmp, keyread_tmp - is higher by 1.0. This is a big that will be fixed in a later - version. - - If we have limited the cost (=tmp) of reading rows with 'worst_seek' - we cannot use filters as the cost calculation below would cause - tmp to become negative. The future resultion is to not limit - cost with worst_seek. + Here's a more detailed explanation that uses the formulas behind + the function the call filter->get_adjusted_gain(). The function + takes as a parameter the number of probes/look-ups into the filter + that is equal to the number of fetched key entries that is equal to + the number of row fetches when no filter is used (assuming no + index condition pushdown is employed for the used key access). + Let this number be N. Then the total gain from using the filter is + N*a_adj - b where b is the cost of building the filter and + a_adj is calcilated as follows: + a - (1-access_cost_factor)*(1-s) = + (1+1_cond_eval_cost)*(1-s)-1_probe_cost - (1-access_cost_factor)*(1-s) + = (1-s)*(1_cond_eval_cost+access_cost_factor) - 1_probe_cost. + Here ((1-s)*(1_cond_eval_cost) * N is the gain from checking less + conditions pushed into the table, 1_probe_cost*N is the cost of the + probes and (1*s) * access_cost_factor * N must be the gain from + accessing less rows. + It does not matter how we calculate the cost of N full row fetches + cost_of_fetching_N_rows or + how we calculate the cost of fetching N key entries + cost_of_fetching_N_key_entries + the gain from less row fetches will be + (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) * (1-s) + and this should be equal to (1*s) * access_cost_factor * N. + Thus access_cost_factor must be calculated as + (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) / N. + + For safety we clip cost_of_fetching_N_key_entries by the value + of cost_of_fetching_N_row though formally it's not necessary. */ - double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); + /* + For eq_ref access we assume that the cost of fetching N key entries + is equal to the half of fetching N rows + */ + double key_access_cost= + type == JT_EQ_REF ? 0.5 * tmp : MY_MIN(tmp, keyread_tmp); + double access_cost_factor= MY_MIN((tmp - key_access_cost) / rows, 1.0); + if (!(records < s->worst_seeks && records <= thd->variables.max_seeks_for_key)) + { + // Don't use rowid filter trace_access_idx.add("rowid_filter_skipped", "worst/max seeks clipping"); - else if (access_cost_factor <= 0.0) - trace_access_idx.add("rowid_filter_skipped", "cost_factor <= 0"); + filter= NULL; + } else { filter= table->best_range_rowid_filter_for_partial_join(start_key->key, rows, access_cost_factor); - if (filter) - { - tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows); - DBUG_ASSERT(tmp >= 0); - trace_access_idx.add("rowid_filter_key", - table->key_info[filter->key_no].name); - } + } + if (filter) + { + tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows); + DBUG_ASSERT(tmp >= 0); + trace_access_idx.add("rowid_filter_key", + table->key_info[filter->key_no].name); } } trace_access_idx.add("rows", records).add("cost", tmp); @@ -8623,27 +8622,23 @@ best_access_path(JOIN *join, if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE) { double rows= record_count * s->found_records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); uint key_no= s->quick->index; /* See the comment concerning using rowid filter for with ref access */ - keyread_tmp= s->table->opt_range[key_no].index_only_cost * - record_count; - access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); - if (access_cost_factor > 0.0) + double row_access_cost= s->quick->read_time * record_count; + double key_access_cost= + MY_MIN(row_access_cost, + s->table->opt_range[key_no].index_only_cost * record_count); + double access_cost_factor= MY_MIN((row_access_cost - key_access_cost) / + rows, 1.0); + filter= + s->table->best_range_rowid_filter_for_partial_join(key_no, rows, + access_cost_factor); + if (filter) { - filter= - s->table-> - best_range_rowid_filter_for_partial_join(key_no, rows, - access_cost_factor); - if (filter) - { - tmp-= filter->get_adjusted_gain(rows); - DBUG_ASSERT(tmp >= 0); - } + tmp-= filter->get_adjusted_gain(rows); + DBUG_ASSERT(tmp >= 0); } - else - trace_access_scan.add("rowid_filter_skipped", "cost_factor <= 0"); type= JT_RANGE; } @@ -21703,6 +21698,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) /* The user has aborted the execution of the query */ DBUG_RETURN(NESTED_LOOP_KILLED); } + join_tab->jbuf_loops_tracker->on_scan_init(); + if (!test_if_use_dynamic_range_scan(join_tab)) { if (!cache->put_record()) @@ -25367,39 +25364,71 @@ JOIN_TAB::remove_duplicates() { bool error; - ulong keylength= 0; - uint field_count; + ulong keylength= 0, sort_field_keylength= 0; + uint field_count, item_count; List *fields= (this-1)->fields; + Item *item; THD *thd= join->thd; - + SORT_FIELD *sortorder, *sorder; DBUG_ENTER("remove_duplicates"); DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE); THD_STAGE_INFO(join->thd, stage_removing_duplicates); - //join->explain->ops_tracker.report_duplicate_removal(); - - table->reginfo.lock_type=TL_WRITE; + if (!(sortorder= (SORT_FIELD*) my_malloc(PSI_INSTRUMENT_ME, + (fields->elements+1) * + sizeof(SORT_FIELD), + MYF(MY_WME)))) + DBUG_RETURN(TRUE); /* Calculate how many saved fields there is in list */ - field_count=0; + field_count= item_count= 0; + List_iterator it(*fields); - Item *item; - while ((item=it++)) + for (sorder= sortorder ; (item=it++) ;) { - if (item->get_tmp_table_field() && ! item->const_item()) - field_count++; + if (!item->const_item()) + { + if (item->get_tmp_table_field()) + { + /* Field is stored in temporary table, skipp */ + field_count++; + } + else + { + /* Item is not stored in temporary table, remember it */ + sorder->field= 0; // Safety, not used + sorder->item= item; + /* Calculate sorder->length */ + item->type_handler()->sort_length(thd, item, sorder); + sorder++; + item_count++; + } + } } + sorder->item= 0; // End marker - if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having) - { // only const items with no OPTION_FOUND_ROWS + if ((field_count + item_count == 0) && ! having && + !(join->select_options & OPTION_FOUND_ROWS)) + { + // only const items with no OPTION_FOUND_ROWS join->unit->lim.set_single_row(); // Only send first row + my_free(sortorder); DBUG_RETURN(false); } + /* + The table contains first fields that will be in the output, then + temporary results pointed to by the fields list. + Example: SELECT DISTINCT sum(a), sum(d) > 2 FROM ... + In this case the temporary table contains sum(a), sum(d). + */ + Field **first_field=table->field+table->s->fields - field_count; for (Field **ptr=first_field; *ptr; ptr++) keylength+= (*ptr)->sort_length() + (*ptr)->maybe_null(); + for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++) + sort_field_keylength+= ptr->length + (ptr->item->maybe_null() ? 1 : 0); /* Disable LIMIT ROWS EXAMINED in order to avoid interrupting prematurely @@ -25410,30 +25439,80 @@ JOIN_TAB::remove_duplicates() thd->reset_killed(); table->file->info(HA_STATUS_VARIABLE); + table->reginfo.lock_type=TL_WRITE; + if (table->s->db_type() == heap_hton || (!table->s->blob_fields && ((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records < thd->variables.sortbuff_size))) - error=remove_dup_with_hash_index(join->thd, table, field_count, first_field, - keylength, having); + error= remove_dup_with_hash_index(join->thd, table, field_count, + first_field, sortorder, + keylength + sort_field_keylength, having); else - error=remove_dup_with_compare(join->thd, table, first_field, having); + error=remove_dup_with_compare(join->thd, table, first_field, sortorder, + sort_field_keylength, having); if (join->select_lex != join->select_lex->master_unit()->fake_select_lex) thd->lex->set_limit_rows_examined(); free_blobs(first_field); + my_free(sortorder); DBUG_RETURN(error); } +/* + Create a sort/compare key from items + + Key is of fixed length and binary comparable +*/ + +static uchar *make_sort_key(SORT_FIELD *sortorder, uchar *key_buffer, + String *tmp_value) +{ + for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++) + { + ptr->item->type_handler()->make_sort_key_part(key_buffer, + ptr->item, + ptr, tmp_value); + key_buffer+= (ptr->item->maybe_null() ? 1 : 0) + ptr->length; + } + return key_buffer; +} + + +/* + Remove duplicates by comparing all rows with all other rows + + @param thd THD + @param table Temporary table + @param first_field Pointer to fields in temporary table that are part of + distinct, ends with null pointer + @param sortorder An array of Items part of distsinct. Terminated with an + element N with sortorder[N]->item=NULL. + @param keylength Length of key produced by sortorder + @param having Having expression (NULL if no having) +*/ + static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, + SORT_FIELD *sortorder, ulong keylength, Item *having) { handler *file=table->file; - uchar *record=table->record[0]; + uchar *record=table->record[0], *key_buffer, *key_buffer2; + char *tmp_buffer; int error; + String tmp_value; DBUG_ENTER("remove_dup_with_compare"); + if (unlikely(!my_multi_malloc(PSI_INSTRUMENT_ME, + MYF(MY_WME), + &key_buffer, keylength, + &key_buffer2, keylength, + &tmp_buffer, keylength+1, + NullS))) + DBUG_RETURN(1); + tmp_value.set(tmp_buffer, keylength, &my_charset_bin); + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(1); @@ -25442,8 +25521,8 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { if (unlikely(thd->check_killed())) { - error=0; - goto err; + error= 1; + goto end; } if (unlikely(error)) { @@ -25462,9 +25541,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY), MYF(ME_FATAL)); - error=0; - goto err; + error= 1; + goto end; } + make_sort_key(sortorder, key_buffer, &tmp_value); store_record(table,record[1]); /* Read through rest of file and mark duplicated rows deleted */ @@ -25477,7 +25557,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, break; goto err; } - if (compare_record(table, first_field) == 0) + make_sort_key(sortorder, key_buffer2, &tmp_value); + if (compare_record(table, first_field) == 0 && + (!keylength || + memcmp(key_buffer, key_buffer2, keylength) == 0)) { if (unlikely((error= file->ha_delete_row(record)))) goto err; @@ -25496,38 +25579,52 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, goto err; } + error= 0; +end: + my_free(key_buffer); file->extra(HA_EXTRA_NO_CACHE); (void) file->ha_rnd_end(); - DBUG_RETURN(0); + DBUG_RETURN(error); + err: - file->extra(HA_EXTRA_NO_CACHE); - (void) file->ha_rnd_end(); - if (error) - file->print_error(error,MYF(0)); - DBUG_RETURN(1); + DBUG_ASSERT(error); + file->print_error(error,MYF(0)); + goto end; } /** - Generate a hash index for each row to quickly find duplicate rows. + Generate a hash index for each row to quickly find duplicate rows. + + @param thd THD + @param table Temporary table + @param field_count Number of fields part of distinct + @param first_field Pointer to fields in temporary table that are part of + distinct, ends with null pointer + @param sortorder An array of Items part of distsinct. Terminated with an + element N with sortorder[N]->item=NULL. + @param keylength Length of hash key + @param having Having expression (NULL if no having) - @note - Note that this will not work on tables with blobs! + @note + Note that this will not work on tables with blobs! */ static int remove_dup_with_hash_index(THD *thd, TABLE *table, uint field_count, Field **first_field, + SORT_FIELD *sortorder, ulong key_length, Item *having) { uchar *key_buffer, *key_pos, *record=table->record[0]; + char *tmp_buffer; int error; handler *file= table->file; ulong extra_length= ALIGN_SIZE(key_length)-key_length; uint *field_lengths, *field_length; HASH hash; - Field **ptr; + String tmp_value; DBUG_ENTER("remove_dup_with_hash_index"); if (!my_multi_malloc(key_memory_hash_index_key_buffer, MYF(MY_WME), @@ -25536,10 +25633,13 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, (long) file->stats.records), &field_lengths, (uint) (field_count*sizeof(*field_lengths)), + &tmp_buffer, key_length+1, NullS)) DBUG_RETURN(1); - for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++) + tmp_value.set(tmp_buffer, key_length, &my_charset_bin); + field_length= field_lengths; + for (Field **ptr= first_field ; *ptr ; ptr++) (*field_length++)= (*ptr)->sort_length(); if (my_hash_init(key_memory_hash_index_key_buffer, &hash, &my_charset_bin, @@ -25553,7 +25653,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, if (unlikely((error= file->ha_rnd_init(1)))) goto err; - key_pos=key_buffer; + key_pos= key_buffer; for (;;) { uchar *org_key_pos; @@ -25578,11 +25678,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, /* copy fields to key buffer */ org_key_pos= key_pos; field_length=field_lengths; - for (ptr= first_field ; *ptr ; ptr++) + for (Field **ptr= first_field ; *ptr ; ptr++) { (*ptr)->make_sort_key_part(key_pos, *field_length); key_pos+= (*ptr)->maybe_null() + *field_length++; } + /* Copy result fields not stored in table to key buffer */ + key_pos= make_sort_key(sortorder, key_pos, &tmp_value); + /* Check if it exists before */ if (my_hash_search(&hash, org_key_pos, key_length)) { @@ -27873,12 +27976,16 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta, // psergey-todo: data for filtering! tracker= &eta->tracker; jbuf_tracker= &eta->jbuf_tracker; + jbuf_loops_tracker= &eta->jbuf_loops_tracker; + jbuf_unpack_tracker= &eta->jbuf_unpack_tracker; /* Enable the table access time tracker only for "ANALYZE stmt" */ if (thd->lex->analyze_stmt) { table->file->set_time_tracker(&eta->op_tracker); - eta->op_tracker.my_gap_tracker = &eta->extra_time_tracker; + eta->op_tracker.set_gap_tracker(&eta->extra_time_tracker); + + eta->jbuf_unpack_tracker.set_gap_tracker(&eta->jbuf_extra_time_tracker); } /* No need to save id and select_type here, they are kept in Explain_select */ @@ -28506,7 +28613,8 @@ int JOIN::save_explain_data_intern(Explain_query *output, Explain_table_access *eta= (new (output->mem_root) - Explain_table_access(output->mem_root)); + Explain_table_access(output->mem_root, + thd->lex->analyze_stmt)); if (!eta) DBUG_RETURN(1); @@ -29045,6 +29153,162 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, } } +enum explainable_cmd_type +{ + SELECT_CMD, INSERT_CMD, REPLACE_CMD, UPDATE_CMD, DELETE_CMD, NO_CMD +}; + +static +const LEX_CSTRING explainable_cmd_name []= +{ + {STRING_WITH_LEN("select ")}, + {STRING_WITH_LEN("insert ")}, + {STRING_WITH_LEN("replace ")}, + {STRING_WITH_LEN("update ")}, + {STRING_WITH_LEN("delete ")}, +}; + +static +const LEX_CSTRING* get_explainable_cmd_name(enum explainable_cmd_type cmd) +{ + return explainable_cmd_name + cmd; +} + +static +enum explainable_cmd_type get_explainable_cmd_type(THD *thd) +{ + switch (thd->lex->sql_command) { + case SQLCOM_SELECT: + return SELECT_CMD; + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + return INSERT_CMD; + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + return REPLACE_CMD; + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + return UPDATE_CMD; + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + return DELETE_CMD; + default: + return SELECT_CMD; + } +} + + +void TABLE_LIST::print_leaf_tables(THD *thd, String *str, + enum_query_type query_type) +{ + if (merge_underlying_list) + { + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + tbl->print_leaf_tables(thd, str, query_type); + } + else + print(thd, 0, str, query_type); +} + + +void st_select_lex::print_item_list(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + /* + outer_select() can not be used here because it is for name resolution + and will return NULL at any end of name resolution chain (view/derived) + */ + bool top_level= (get_master()->get_master() == 0); + List_iterator_fast it(item_list); + Item *item; + while ((item= it++)) + { + if (first) + first= 0; + else + str->append(','); + + if ((is_subquery_function() && !item->is_explicit_name()) || + !item->name.str) + { + /* + Do not print auto-generated aliases in subqueries. It has no purpose + in a view definition or other contexts where the query is printed. + */ + item->print(str, query_type); + } + else + { + /* + Do not print illegal names (if it is not top level SELECT). + Top level view checked (and correct name are assigned), + other cases of top level SELECT are not important, because + it is not "table field". + */ + if (top_level || + item->is_explicit_name() || + !check_column_name(item->name.str)) + item->print_item_w_name(str, query_type); + else + item->print(str, query_type); + } + } +} + + +void st_select_lex::print_set_clause(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + /* + outer_select() can not be used here because it is for name resolution + and will return NULL at any end of name resolution chain (view/derived) + */ + List_iterator_fast it(item_list); + List_iterator_fast vt(thd->lex->value_list); + Item *item; + Item *val; + while ((item= it++, val= vt++ )) + { + if (first) + { + str->append(STRING_WITH_LEN(" set ")); + first= 0; + } + else + str->append(','); + + item->print(str, query_type); + str->append(STRING_WITH_LEN(" = ")); + val->print(str, query_type); + } +} + + +void st_select_lex::print_on_duplicate_key_clause(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + List_iterator_fast it(thd->lex->update_list); + List_iterator_fast vt(thd->lex->value_list); + Item *item; + Item *val; + while ((item= it++, val= vt++ )) + { + if (first) + { + str->append(STRING_WITH_LEN(" on duplicate key update ")); + first= 0; + } + else + str->append(','); + + item->print(str, query_type); + str->append(STRING_WITH_LEN(" = ")); + val->print(str, query_type); + } +} void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) { @@ -29056,6 +29320,67 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) return; } + if (is_tvc_wrapper && (query_type & QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)) + { + first_inner_unit()->first_select()->print(thd, str, query_type); + return; + } + + bool top_level= (get_master()->get_master() == 0); + enum explainable_cmd_type sel_type= SELECT_CMD; + if (top_level) + sel_type= get_explainable_cmd_type(thd); + + if (sel_type == INSERT_CMD || sel_type == REPLACE_CMD) + { + str->append(get_explainable_cmd_name(sel_type)); + str->append(STRING_WITH_LEN("into ")); + TABLE_LIST *tbl= thd->lex->query_tables; + while (tbl->merge_underlying_list) + tbl= tbl->merge_underlying_list; + tbl->print(thd, 0, str, query_type); + if (thd->lex->field_list.elements) + { + str->append ('('); + List_iterator_fast it(thd->lex->field_list); + Item *item; + bool first= true; + while ((item= it++)) + { + if (first) + first= false; + else + str->append(','); + str->append(item->name); + } + str->append(')'); + } + + str->append(' '); + + if (thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->sql_command == SQLCOM_REPLACE) + { + str->append(STRING_WITH_LEN("values ")); + bool is_first_elem= true; + List_iterator_fast li(thd->lex->many_values); + List_item *list; + + while ((list= li++)) + { + if (is_first_elem) + is_first_elem= false; + else + str->append(','); + + print_list_item(str, list, query_type); + } + if (thd->lex->update_list.elements) + print_on_duplicate_key_clause(thd, str, query_type); + return; + } + } + if ((query_type & QT_SHOW_SELECT_NUMBER) && thd->lex->all_selects_list && thd->lex->all_selects_list->link_next && @@ -29078,7 +29403,10 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(STRING_WITH_LEN(" */ ")); } - str->append(STRING_WITH_LEN("select ")); + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) + str->append(STRING_WITH_LEN("select ")); if (join && join->cleaned) { @@ -29124,57 +29452,66 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) } //Item List - bool first= 1; + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) + print_item_list(thd, str, query_type); /* - outer_select() can not be used here because it is for name resolution - and will return NULL at any end of name resolution chain (view/derived) + from clause + TODO: support USING/FORCE/IGNORE index */ - bool top_level= (get_master()->get_master() == 0); - List_iterator_fast it(item_list); - Item *item; - while ((item= it++)) + if (table_list.elements) { - if (first) - first= 0; - else - str->append(','); - - if ((is_subquery_function() && !item->is_explicit_name()) || - !item->name.str) + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) { - /* - Do not print auto-generated aliases in subqueries. It has no purpose - in a view definition or other contexts where the query is printed. - */ - item->print(str, query_type); + str->append(STRING_WITH_LEN(" from ")); + /* go through join tree */ + print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, + query_type); } - else + if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + str->append(get_explainable_cmd_name(sel_type)); + if (sel_type == DELETE_CMD) { - /* - Do not print illegal names (if it is not top level SELECT). - Top level view checked (and correct name are assigned), - other cases of top level SELECT are not important, because - it is not "table field". - */ - if (top_level || - item->is_explicit_name() || - !check_column_name(item->name.str)) - item->print_item_w_name(str, query_type); + str->append(STRING_WITH_LEN(" from ")); + bool first= true; + for (TABLE_LIST *target_tbl= thd->lex->auxiliary_table_list.first; + target_tbl; + target_tbl= target_tbl->next_local) + { + if (first) + first= false; + else + str->append(','); + target_tbl->correspondent_table->print_leaf_tables(thd, str, + query_type); + } + + if (!first) + str->append(STRING_WITH_LEN(" using ")); + } + if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + { + if (join) + print_join(thd, 0, str, &top_join_list, query_type); else - item->print(str, query_type); + { + bool first= true; + List_iterator_fast li(leaf_tables); + TABLE_LIST *tbl; + while ((tbl= li++)) + { + if (first) + first= false; + else + str->append(','); + tbl->print(thd, 0, str, query_type); + } + } } } - - /* - from clause - TODO: support USING/FORCE/IGNORE index - */ - if (table_list.elements) - { - str->append(STRING_WITH_LEN(" from ")); - /* go through join tree */ - print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, query_type); - } else if (where) { /* @@ -29184,10 +29521,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(STRING_WITH_LEN(" from DUAL ")); } + if (sel_type == UPDATE_CMD) + print_set_clause(thd, str, query_type); + // Where Item *cur_where= where; if (join) cur_where= join->conds; + else if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + cur_where= thd->lex->upd_del_where; if (cur_where || cond_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" where ")); @@ -29246,6 +29588,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) if (unlikely(skip_locked)) str->append(STRING_WITH_LEN(" skip locked")); + if ((sel_type == INSERT_CMD || sel_type == REPLACE_CMD) && + thd->lex->update_list.elements) + print_on_duplicate_key_clause(thd, str, query_type); + + // returning clause + if (sel_type == DELETE_CMD && !item_list.elements) + { + print_item_list(thd, str, query_type); + } // PROCEDURE unsupported here } diff --git a/sql/sql_select.h b/sql/sql_select.h index 01892a5fe9ba2..dfa7df5ce0c30 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -309,6 +309,8 @@ typedef struct st_join_table { Table_access_tracker *tracker; Table_access_tracker *jbuf_tracker; + Time_and_counter_tracker *jbuf_unpack_tracker; + Counter_tracker *jbuf_loops_tracker; /* Bitmap of TAB_INFO_* bits that encodes special line for EXPLAIN 'Extra' column, or 0 if there is no info. diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 295b6017a2c75..6515fa9336259 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2661,7 +2661,8 @@ static int show_create_view(THD *thd, TABLE_LIST *table, String *buff) a different syntax, like when ANSI_QUOTES is defined. */ table->view->unit.print(buff, enum_query_type(QT_VIEW_INTERNAL | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); if (table->with_check != VIEW_CHECK_NONE) { diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 6c9a81a32c9ba..82e1f492d6786 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -20,6 +20,7 @@ #include "my_base.h" /* ha_rows */ #include /* qsort2_cmp */ #include "queues.h" +#include "sql_string.h" #include "sql_class.h" class Field; diff --git a/sql/sql_string.cc b/sql/sql_string.cc index d4639fb6bc406..7a8aa4ef0f48e 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -563,7 +563,7 @@ bool String::append(const char *s,size_t size) } /* - For an ASCII compatinble string we can just append. + For an ASCII compatible string we can just append. */ return Binary_string::append(s, arg_length); } diff --git a/sql/sql_string.h b/sql/sql_string.h index 9729d9e85fda1..dbb4760ab3407 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -203,6 +203,83 @@ class Charset { return m_charset != &my_charset_bin; } + + /* + The MariaDB version when the last collation change happened, + e.g. due to a bug fix. See functions below. + */ + static ulong latest_mariadb_version_with_collation_change() + { + return 110002; + } + + /* + Check if the collation with the given ID changed its order + since the given MariaDB version. + */ + static bool collation_changed_order(ulong mysql_version, uint cs_number) + { + if ((mysql_version < 50048 && + (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */ + cs_number == 41 || /* latin7_general_ci - bug #29461 */ + cs_number == 42 || /* latin7_general_cs - bug #29461 */ + cs_number == 20 || /* latin7_estonian_cs - bug #29461 */ + cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */ + cs_number == 22 || /* koi8u_general_ci - bug #29461 */ + cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */ + cs_number == 26)) || /* cp1250_general_ci - bug #29461 */ + (mysql_version < 50124 && + (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */ + cs_number == 35))) /* ucs2_general_ci - bug #27877 */ + return true; + + if (cs_number == 159 && /* ucs2_general_mysql500_ci - MDEV-30746 */ + ((mysql_version >= 100400 && mysql_version < 100429) || + (mysql_version >= 100500 && mysql_version < 100520) || + (mysql_version >= 100600 && mysql_version < 100613) || + (mysql_version >= 100700 && mysql_version < 100708) || + (mysql_version >= 100800 && mysql_version < 100808) || + (mysql_version >= 100900 && mysql_version < 100906) || + (mysql_version >= 101000 && mysql_version < 101004) || + (mysql_version >= 101100 && mysql_version < 101103) || + (mysql_version >= 110000 && mysql_version < 110002))) + return true; + return false; + } + + /** + Check if a collation has changed ID since the given version. + Return the new ID. + + @param mysql_version + @param cs_number - collation ID + + @retval the new collation ID (or cs_number, if no change) + */ + + static uint upgrade_collation_id(ulong mysql_version, uint cs_number) + { + if (mysql_version >= 50300 && mysql_version <= 50399) + { + switch (cs_number) { + case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci + case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci + } + } + if ((mysql_version >= 50500 && mysql_version <= 50599) || + (mysql_version >= 100000 && mysql_version <= 100005)) + { + switch (cs_number) { + case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci + case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci + case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci + case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci + case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci + } + } + return cs_number; + } + }; diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index d1c32e7d9d3ff..d4fd473887340 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -707,6 +707,7 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl, wrapper_sl->parent_lex= lex; /* Used in init_query. */ wrapper_sl->init_query(); wrapper_sl->init_select(); + wrapper_sl->is_tvc_wrapper= true; wrapper_sl->nest_level= tvc_sl->nest_level; wrapper_sl->parsing_place= tvc_sl->parsing_place; diff --git a/sql/sql_type.cc b/sql/sql_type.cc index 213de62705f32..269532e2390b4 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -9035,13 +9035,13 @@ Type_handler_timestamp_common::Item_val_native_with_conversion(THD *thd, Item *item, Native *to) const { - MYSQL_TIME ltime; if (item->type_handler()->type_handler_for_native_format() == &type_handler_timestamp2) return item->val_native(thd, to); + Datetime dt(thd, item, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)); return - item->get_date(thd, <ime, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) || - TIME_to_native(thd, <ime, to, item->datetime_precision(thd)); + !dt.is_valid_datetime() || + TIME_to_native(thd, dt.get_mysql_time(), to, item->datetime_precision(thd)); } bool Type_handler_null::union_element_finalize(Item_type_holder *item) const diff --git a/sql/sql_type.h b/sql/sql_type.h index d931c7ffb6d6f..52c17d61d2edc 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -4060,14 +4060,14 @@ class Type_handler */ virtual void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const= 0; + String *tmp) const= 0; /* create a compact size key part for a sort key */ virtual uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const=0; + String *tmp) const=0; virtual void sort_length(THD *thd, const Type_std_attributes *item, @@ -4473,12 +4473,12 @@ class Type_handler_row: public Type_handler uint32 flags) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override + String *tmp) const override { MY_ASSERT_UNREACHABLE(); } uint make_packed_sort_key_part(uchar *, Item *, const SORT_FIELD_ATTR *, - Sort_param *) const override + String *) const override { MY_ASSERT_UNREACHABLE(); return 0; @@ -4818,10 +4818,10 @@ class Type_handler_real_result: public Type_handler_numeric const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -4930,10 +4930,10 @@ class Type_handler_decimal_result: public Type_handler_numeric const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; @@ -5186,10 +5186,10 @@ class Type_handler_int_result: public Type_handler_numeric TABLE_SHARE *share) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; @@ -5297,10 +5297,10 @@ class Type_handler_temporal_result: public Type_handler uchar *buff) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -5397,10 +5397,10 @@ class Type_handler_string_result: public Type_handler CHARSET_INFO *cs) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -6620,10 +6620,10 @@ class Type_handler_timestamp_common: public Type_handler_temporal_with_date const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h index c1be1c9ccbaca..852ec1269eb1b 100644 --- a/sql/sql_type_fixedbin.h +++ b/sql/sql_type_fixedbin.h @@ -468,7 +468,7 @@ class FixedBinTypeBundle return def->frm_unpack_charset(share, buffer); } void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override + String *) const override { DBUG_ASSERT(item->type_handler() == this); NativeBuffer tmp; @@ -489,7 +489,7 @@ class FixedBinTypeBundle } uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override + String *) const override { DBUG_ASSERT(item->type_handler() == this); NativeBuffer tmp; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 033f98fd7195b..68de349a78f48 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1391,7 +1391,8 @@ int mysql_update(THD *thd, goto err; emit_explain_and_leave: - int err2= thd->lex->explain->send_explain(thd); + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + int err2= thd->lex->explain->send_explain(thd, extended); delete select; free_underlaid_joins(thd, select_lex); @@ -1465,6 +1466,8 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, select_lex->fix_prepare_information(thd, conds, &fake_conds); + if (!thd->lex->upd_del_where) + thd->lex->upd_del_where= *conds; DBUG_RETURN(FALSE); } @@ -1992,7 +1995,10 @@ bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List *fields, else { if (thd->lex->describe || thd->lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } thd->abort_on_warning= 0; DBUG_RETURN(res); diff --git a/sql/sql_view.cc b/sql/sql_view.cc index a2b744fc8bed8..e3bfe37b17220 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1006,10 +1006,12 @@ static int mysql_register_view(THD *thd, DDL_LOG_STATE *ddl_log_state, Sql_mode_instant_remove sms(thd, MODE_ANSI_QUOTES); lex->unit.print(&view_query, enum_query_type(QT_VIEW_INTERNAL | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); lex->unit.print(&is_query, enum_query_type(QT_TO_SYSTEM_CHARSET | QT_WITHOUT_INTRODUCERS | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); } DBUG_PRINT("info", ("View: %.*s", view_query.length(), view_query.ptr())); diff --git a/sql/table.cc b/sql/table.cc index d66b357766cbf..83ab3f357bf0f 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -952,39 +952,6 @@ static uint enum_value_with_check(THD *thd, TABLE_SHARE *share, } -/** - Check if a collation has changed number - - @param mysql_version - @param current collation number - - @retval new collation number (same as current collation number of no change) -*/ - -static uint upgrade_collation(ulong mysql_version, uint cs_number) -{ - if (mysql_version >= 50300 && mysql_version <= 50399) - { - switch (cs_number) { - case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci - case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci - } - } - if ((mysql_version >= 50500 && mysql_version <= 50599) || - (mysql_version >= 100000 && mysql_version <= 100005)) - { - switch (cs_number) { - case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci - case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci - case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci - case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci - case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci - } - } - return cs_number; -} - - void Column_definition_attributes::frm_pack_basic(uchar *buff) const { int2store(buff + 3, length); @@ -1044,7 +1011,7 @@ bool Column_definition_attributes::frm_unpack_charset(TABLE_SHARE *share, const uchar *buff) { uint cs_org= buff[14] + (((uint) buff[11]) << 8); - uint cs_new= upgrade_collation(share->mysql_version, cs_org); + uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org); if (cs_org != cs_new) share->incompatible_version|= HA_CREATE_USED_CHARSET; if (cs_new && !(charset= get_charset(cs_new, MYF(0)))) @@ -1898,7 +1865,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (!frm_image[32]) // New frm file in 3.23 { uint cs_org= (((uint) frm_image[41]) << 8) + (uint) frm_image[38]; - uint cs_new= upgrade_collation(share->mysql_version, cs_org); + uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org); if (cs_org != cs_new) share->incompatible_version|= HA_CREATE_USED_CHARSET; @@ -3056,6 +3023,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, goto err; field= key_part->field= share->field[key_part->fieldnr-1]; + if (Charset::collation_changed_order(share->mysql_version, + field->charset()->number)) + share->incompatible_version|= HA_CREATE_USED_CHARSET; key_part->type= field->key_type(); if (field->invisible > INVISIBLE_USER && !field->vers_sys_field()) @@ -6770,6 +6740,9 @@ bool TABLE_LIST::prepare_security(THD *thd) #ifndef DBUG_OFF void TABLE_LIST::set_check_merged() { + if (is_view()) + return; + DBUG_ASSERT(derived); /* It is not simple to check all, but at least this should be checked: @@ -7097,9 +7070,8 @@ void Field_iterator_table_ref::set_field_iterator() table_ref->alias.str)); } /* This is a merge view, so use field_translation. */ - else if (table_ref->field_translation) + else if (table_ref->is_merged_derived() && table_ref->field_translation) { - DBUG_ASSERT(table_ref->is_merged_derived()); field_it= &view_field_it; DBUG_PRINT("info", ("field_it for '%s' is Field_iterator_view", table_ref->alias.str)); @@ -9140,7 +9112,7 @@ bool TABLE::check_period_overlaps(const KEY &key, return false; uint kp_len= key.key_part[part_nr].length; if (f->cmp_prefix(f->ptr_in_record(lhs), f->ptr_in_record(rhs), - kp_len) != 0) + kp_len / f->charset()->mbmaxlen) != 0) return false; } @@ -9632,17 +9604,18 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view) set_derived(); } - if (!is_view() && + if (is_view() || !derived_table_optimization_done(this)) { /* A subquery might be forced to be materialized due to a side-effect. */ - if (!is_materialized_derived() && first_select->is_mergeable() && + if (!is_materialized_derived() && unit->can_be_merged() && (unit->outer_select() && !unit->outer_select()->with_rownum) && (!thd->lex->with_rownum || (!first_select->group_list.elements && !first_select->order_list.elements)) && - optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) && - !thd->lex->can_not_use_merged(1) && + (is_view() || + (optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) && + !thd->lex->can_not_use_merged(1))) && !is_recursive_with_table()) set_merged_derived(); else diff --git a/sql/table.h b/sql/table.h index de9b2673d46d8..41963658e8a00 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2749,6 +2749,8 @@ struct TABLE_LIST } void print(THD *thd, table_map eliminated_tables, String *str, enum_query_type query_type); + void print_leaf_tables(THD *thd, String *str, + enum_query_type query_type); bool check_single_table(TABLE_LIST **table, table_map map, TABLE_LIST *view); bool set_insert_values(MEM_ROOT *mem_root); @@ -2889,8 +2891,7 @@ struct TABLE_LIST DBUG_PRINT("enter", ("Alias: '%s' Unit: %p", (alias.str ? alias.str : ""), get_unit())); - derived_type= static_cast((derived_type & DTYPE_MASK) | - DTYPE_TABLE | DTYPE_MERGE); + derived_type= static_cast((derived_type & DTYPE_MASK) | DTYPE_MERGE); set_check_merged(); DBUG_VOID_RETURN; } @@ -2904,10 +2905,9 @@ struct TABLE_LIST DBUG_PRINT("enter", ("Alias: '%s' Unit: %p", (alias.str ? alias.str : ""), get_unit())); - derived= get_unit(); derived_type= static_cast((derived_type & (derived ? DTYPE_MASK : DTYPE_VIEW)) | - DTYPE_TABLE | DTYPE_MATERIALIZE); + DTYPE_MATERIALIZE); set_check_materialized(); DBUG_VOID_RETURN; } diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 7d8296a75a1e7..d9988914c4d42 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -502,7 +502,13 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ if (!WSREP_EMULATE_BINLOG(m_thd)) { wsrep_register_for_group_commit(m_thd); - ret = ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); + /* wait_for_prior_commit() ensures that all preceding transactions + have been committed and seqno has been synced into + storage engine. We don't release commit order here yet to + avoid following transactions to sync seqno before + wsrep_set_SE_checkpoint() below returns. This effectively pauses + group commit for the checkpoint operation, but is the only way to + ensure proper ordering. */ m_thd->wait_for_prior_commit(); } @@ -512,10 +518,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ { wsrep_unregister_from_group_commit(m_thd); } - else - { - ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); - } + ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); cs.after_applying(); } DBUG_RETURN(ret); diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 6ba14f96dd8b9..812452f451ad6 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -1,4 +1,4 @@ -/* Copyright 2016-2022 Codership Oy +/* Copyright 2016-2023 Codership Oy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -217,6 +217,19 @@ static inline bool wsrep_run_commit_hook(THD* thd, bool all) } mysql_mutex_unlock(&thd->LOCK_thd_data); } + + mysql_mutex_lock(&thd->LOCK_thd_data); + /* Transaction creating sequence is TOI or RSU, + CREATE [TEMPORARY] SEQUENCE = CREATE + INSERT (initial value) + and replicated using statement based replication, thus + the commit hooks will be skipped */ + if (ret && + (thd->wsrep_cs().mode() == wsrep::client_state::m_toi || + thd->wsrep_cs().mode() == wsrep::client_state::m_rsu) && + thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE) + ret= false; + mysql_mutex_unlock(&thd->LOCK_thd_data); + DBUG_PRINT("wsrep", ("return: %d", ret)); DBUG_RETURN(ret); } diff --git a/storage/connect/mysql-test/connect/r/mysql.result b/storage/connect/mysql-test/connect/r/mysql.result index d3c244b277ac8..1dcbca88a7b5e 100644 --- a/storage/connect/mysql-test/connect/r/mysql.result +++ b/storage/connect/mysql-test/connect/r/mysql.result @@ -364,5 +364,16 @@ hex(col) DROP TABLE t2; DROP TABLE t1; # +# MDEV-29782 CONNECT engine converted YEAR to DATETIME, causing INSERT to fail +# +CREATE TABLE t1 (id year); +CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL DBNAME='test' TABNAME='t1' OPTION_LIST='host=localhost,user=root,port=PORT'; +INSERT INTO t2 VALUES (1999); +SELECT * FROM t2; +id +1999 +DROP TABLE t2; +DROP TABLE t1; +# # End of 10.3 tests # diff --git a/storage/connect/mysql-test/connect/t/mysql.test b/storage/connect/mysql-test/connect/t/mysql.test index a50db4a645731..cd52f78fb302a 100644 --- a/storage/connect/mysql-test/connect/t/mysql.test +++ b/storage/connect/mysql-test/connect/t/mysql.test @@ -533,6 +533,21 @@ DROP TABLE t2; DROP TABLE t1; +--echo # +--echo # MDEV-29782 CONNECT engine converted YEAR to DATETIME, causing INSERT to fail +--echo # + +CREATE TABLE t1 (id year); + +--replace_result $PORT PORT +--eval CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL DBNAME='test' TABNAME='t1' OPTION_LIST='host=localhost,user=root,port=$PORT' + +INSERT INTO t2 VALUES (1999); +SELECT * FROM t2; + +DROP TABLE t2; +DROP TABLE t1; + --echo # --echo # End of 10.3 tests --echo # diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index c49db48bfb3ae..45b2c46e21724 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -183,6 +183,7 @@ int MYSQLtoPLG(int mytype, char *var) switch (mytype) { case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_YEAR: type = TYPE_SHORT; break; case MYSQL_TYPE_LONG: @@ -209,7 +210,6 @@ int MYSQLtoPLG(int mytype, char *var) case MYSQL_TYPE_TIMESTAMP: case MYSQL_TYPE_DATE: case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_YEAR: case MYSQL_TYPE_TIME: type = TYPE_DATE; break; diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index dda6a95da16c4..a33f8f0ee43be 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -1059,7 +1059,7 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode, if (lock_intention == BTR_INTENTION_DELETE) { compress_limit= BTR_CUR_PAGE_COMPRESS_LIMIT(index()); - if (buf_pool.n_pend_reads && + if (os_aio_pending_reads_approx() && trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH) { /* Most delete-intended operations are due to the purge of history. @@ -1844,7 +1844,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index, { compress_limit= BTR_CUR_PAGE_COMPRESS_LIMIT(index); - if (buf_pool.n_pend_reads && + if (os_aio_pending_reads_approx() && trx_sys.history_size_approx() > BTR_CUR_FINE_HISTORY_LENGTH) { mtr_x_lock_index(index, mtr); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 031270a05d332..6308ea0b5dc53 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -3528,9 +3528,6 @@ dberr_t buf_page_t::read_complete(const fil_node_t &node) ut_ad(zip_size() == node.space->zip_size()); ut_ad(!!zip.ssize == !!zip.data); - ut_d(auto n=) buf_pool.n_pend_reads--; - ut_ad(n > 0); - const byte *read_frame= zip.data ? zip.data : frame; ut_ad(read_frame); @@ -3830,9 +3827,8 @@ void buf_pool_t::print() << ", modified database pages=" << UT_LIST_GET_LEN(flush_list) << ", n pending decompressions=" << n_pend_unzip - << ", n pending reads=" << n_pend_reads << ", n pending flush LRU=" << n_flush() - << " list=" << buf_dblwr.pending_writes() + << " list=" << os_aio_pending_writes() << ", pages made young=" << stat.n_pages_made_young << ", not young=" << stat.n_pages_not_made_young << ", pages read=" << stat.n_pages_read @@ -3945,11 +3941,11 @@ void buf_stats_get_pool_info(buf_pool_info_t *pool_info) pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool.unzip_LRU); - pool_info->n_pend_reads = buf_pool.n_pend_reads; + pool_info->n_pend_reads = os_aio_pending_reads_approx(); pool_info->n_pending_flush_lru = buf_pool.n_flush(); - pool_info->n_pending_flush_list = buf_dblwr.pending_writes(); + pool_info->n_pending_flush_list = os_aio_pending_writes(); mysql_mutex_unlock(&buf_pool.flush_list_mutex); current_time = time(NULL); diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 8d8b92ae5608e..c5de7a418080e 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -53,7 +53,6 @@ void buf_dblwr_t::init() active_slot= &slots[0]; mysql_mutex_init(buf_dblwr_mutex_key, &mutex, nullptr); pthread_cond_init(&cond, nullptr); - pthread_cond_init(&write_cond, nullptr); } } @@ -467,7 +466,6 @@ void buf_dblwr_t::close() ut_ad(!batch_running); pthread_cond_destroy(&cond); - pthread_cond_destroy(&write_cond); for (int i= 0; i < 2; i++) { aligned_free(slots[i].write_buf); @@ -479,38 +477,31 @@ void buf_dblwr_t::close() } /** Update the doublewrite buffer on write completion. */ -void buf_dblwr_t::write_completed(bool with_doublewrite) +void buf_dblwr_t::write_completed() { ut_ad(this == &buf_dblwr); ut_ad(!srv_read_only_mode); mysql_mutex_lock(&mutex); - ut_ad(writes_pending); - if (!--writes_pending) - pthread_cond_broadcast(&write_cond); + ut_ad(is_created()); + ut_ad(srv_use_doublewrite_buf); + ut_ad(batch_running); + slot *flush_slot= active_slot == &slots[0] ? &slots[1] : &slots[0]; + ut_ad(flush_slot->reserved); + ut_ad(flush_slot->reserved <= flush_slot->first_free); - if (with_doublewrite) + if (!--flush_slot->reserved) { - ut_ad(is_created()); - ut_ad(srv_use_doublewrite_buf); - ut_ad(batch_running); - slot *flush_slot= active_slot == &slots[0] ? &slots[1] : &slots[0]; - ut_ad(flush_slot->reserved); - ut_ad(flush_slot->reserved <= flush_slot->first_free); - - if (!--flush_slot->reserved) - { - mysql_mutex_unlock(&mutex); - /* This will finish the batch. Sync data files to the disk. */ - fil_flush_file_spaces(); - mysql_mutex_lock(&mutex); + mysql_mutex_unlock(&mutex); + /* This will finish the batch. Sync data files to the disk. */ + fil_flush_file_spaces(); + mysql_mutex_lock(&mutex); - /* We can now reuse the doublewrite memory buffer: */ - flush_slot->first_free= 0; - batch_running= false; - pthread_cond_broadcast(&cond); - } + /* We can now reuse the doublewrite memory buffer: */ + flush_slot->first_free= 0; + batch_running= false; + pthread_cond_broadcast(&cond); } mysql_mutex_unlock(&mutex); @@ -754,7 +745,6 @@ void buf_dblwr_t::add_to_batch(const IORequest &request, size_t size) const ulint buf_size= 2 * block_size(); mysql_mutex_lock(&mutex); - writes_pending++; for (;;) { diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 3fa26dab25fbe..b702d79df8bf6 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -209,7 +209,7 @@ void buf_flush_remove_pages(uint32_t id) if (!deferred) break; - buf_dblwr.wait_for_page_writes(); + os_aio_wait_until_no_pending_writes(); } } @@ -339,9 +339,9 @@ void buf_page_write_complete(const IORequest &request) if (request.is_LRU()) { const bool temp= bpage->oldest_modification() == 2; - if (!temp) - buf_dblwr.write_completed(state < buf_page_t::WRITE_FIX_REINIT && - request.node->space->use_doublewrite()); + if (!temp && state < buf_page_t::WRITE_FIX_REINIT && + request.node->space->use_doublewrite()) + buf_dblwr.write_completed(); /* We must hold buf_pool.mutex while releasing the block, so that no other thread can access it before we have freed it. */ mysql_mutex_lock(&buf_pool.mutex); @@ -353,8 +353,9 @@ void buf_page_write_complete(const IORequest &request) } else { - buf_dblwr.write_completed(state < buf_page_t::WRITE_FIX_REINIT && - request.node->space->use_doublewrite()); + if (state < buf_page_t::WRITE_FIX_REINIT && + request.node->space->use_doublewrite()) + buf_dblwr.write_completed(); bpage->write_complete(false); } } @@ -847,8 +848,6 @@ bool buf_page_t::flush(bool evict, fil_space_t *space) ut_ad(lsn >= oldest_modification()); log_write_up_to(lsn, true); } - if (UNIV_LIKELY(space->purpose != FIL_TYPE_TEMPORARY)) - buf_dblwr.add_unbuffered(); space->io(IORequest{type, this, slot}, physical_offset(), size, write_frame, this); } @@ -1083,13 +1082,11 @@ static ulint buf_flush_try_neighbors(fil_space_t *space, mysql_mutex_unlock(&buf_pool.mutex); } - ut_ad(!bpage); - - if (auto n= count - 1) + if (count > 1) { MONITOR_INC_VALUE_CUMULATIVE(MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE, MONITOR_FLUSH_NEIGHBOR_COUNT, - MONITOR_FLUSH_NEIGHBOR_PAGES, n); + MONITOR_FLUSH_NEIGHBOR_PAGES, count - 1); } return count; @@ -1964,7 +1961,7 @@ static void buf_flush_wait(lsn_t lsn) break; } mysql_mutex_unlock(&buf_pool.flush_list_mutex); - buf_dblwr.wait_for_page_writes(); + os_aio_wait_until_no_pending_writes(); mysql_mutex_lock(&buf_pool.flush_list_mutex); } } @@ -1985,8 +1982,6 @@ ATTRIBUTE_COLD void buf_flush_wait_flushed(lsn_t sync_lsn) if (buf_pool.get_oldest_modification(sync_lsn) < sync_lsn) { MONITOR_INC(MONITOR_FLUSH_SYNC_WAITS); - thd_wait_begin(nullptr, THD_WAIT_DISKIO); - tpool::tpool_wait_begin(); #if 1 /* FIXME: remove this, and guarantee that the page cleaner serves us */ if (UNIV_UNLIKELY(!buf_page_cleaner_is_active)) @@ -2001,7 +1996,7 @@ ATTRIBUTE_COLD void buf_flush_wait_flushed(lsn_t sync_lsn) MONITOR_FLUSH_SYNC_COUNT, MONITOR_FLUSH_SYNC_PAGES, n_pages); } - buf_dblwr.wait_for_page_writes(); + os_aio_wait_until_no_pending_writes(); mysql_mutex_lock(&buf_pool.flush_list_mutex); } while (buf_pool.get_oldest_modification(sync_lsn) < sync_lsn); @@ -2010,7 +2005,6 @@ ATTRIBUTE_COLD void buf_flush_wait_flushed(lsn_t sync_lsn) #endif buf_flush_wait(sync_lsn); - tpool::tpool_wait_end(); thd_wait_end(nullptr); } @@ -2375,7 +2369,7 @@ static void buf_flush_page_cleaner() if (!recv_recovery_is_on() && !srv_startup_is_before_trx_rollback_phase && - srv_operation == SRV_OPERATION_NORMAL) + srv_operation <= SRV_OPERATION_EXPORT_RESTORED) log_checkpoint(); } while (false); @@ -2445,7 +2439,7 @@ static void buf_flush_page_cleaner() last_activity_count= activity_count; goto maybe_unemployed; } - else if (buf_pool.page_cleaner_idle() && buf_pool.n_pend_reads == 0) + else if (buf_pool.page_cleaner_idle() && !os_aio_pending_reads()) { /* reaching here means 3 things: - last_activity_count == activity_count: suggesting server is idle @@ -2521,7 +2515,7 @@ static void buf_flush_page_cleaner() mysql_mutex_lock(&buf_pool.flush_list_mutex); buf_flush_wait_LRU_batch_end(); mysql_mutex_unlock(&buf_pool.flush_list_mutex); - buf_dblwr.wait_for_page_writes(); + os_aio_wait_until_no_pending_writes(); } mysql_mutex_lock(&buf_pool.flush_list_mutex); @@ -2547,7 +2541,7 @@ static void buf_flush_page_cleaner() ATTRIBUTE_COLD void buf_flush_page_cleaner_init() { ut_ad(!buf_page_cleaner_is_active); - ut_ad(srv_operation == SRV_OPERATION_NORMAL || + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); buf_flush_async_lsn= 0; @@ -2571,15 +2565,7 @@ ATTRIBUTE_COLD void buf_flush_buffer_pool() { mysql_mutex_unlock(&buf_pool.flush_list_mutex); buf_flush_list(srv_max_io_capacity); - if (const size_t pending= buf_dblwr.pending_writes()) - { - timespec abstime; - service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, - "Waiting to write %zu pages", pending); - set_timespec(abstime, INNODB_EXTEND_TIMEOUT_INTERVAL / 2); - buf_dblwr.wait_for_page_writes(abstime); - } - + os_aio_wait_until_no_pending_writes(); mysql_mutex_lock(&buf_pool.flush_list_mutex); service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, "Waiting to flush " ULINTPF " pages", @@ -2587,20 +2573,18 @@ ATTRIBUTE_COLD void buf_flush_buffer_pool() } mysql_mutex_unlock(&buf_pool.flush_list_mutex); - ut_ad(!buf_pool.any_io_pending()); + ut_ad(!os_aio_pending_writes()); + ut_ad(!os_aio_pending_reads()); } /** Synchronously flush dirty blocks during recv_sys_t::apply(). NOTE: The calling thread is not allowed to hold any buffer page latches! */ void buf_flush_sync_batch(lsn_t lsn) { - thd_wait_begin(nullptr, THD_WAIT_DISKIO); - tpool::tpool_wait_begin(); + lsn= std::max(lsn, log_sys.get_lsn()); mysql_mutex_lock(&buf_pool.flush_list_mutex); buf_flush_wait(lsn); mysql_mutex_unlock(&buf_pool.flush_list_mutex); - tpool::tpool_wait_end(); - thd_wait_end(nullptr); } /** Synchronously flush dirty blocks. diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 2e9b42e9f51f1..d1c7f0ba66646 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1112,17 +1112,8 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, break; case FIL_PAGE_TYPE_ZBLOB: case FIL_PAGE_TYPE_ZBLOB2: - break; case FIL_PAGE_INDEX: case FIL_PAGE_RTREE: -#if defined UNIV_ZIP_DEBUG && defined BTR_CUR_HASH_ADAPT - /* During recovery, we only update the - compressed page, not the uncompressed one. */ - ut_a(recv_recovery_is_on() - || page_zip_validate( - &bpage->zip, page, - ((buf_block_t*) bpage)->index)); -#endif /* UNIV_ZIP_DEBUG && BTR_CUR_HASH_ADAPT */ break; default: ib::error() << "The compressed page to be" diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index fa91939aceef0..8631a03644f72 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -227,12 +227,9 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id, } buf_pool.stat.n_pages_read++; - mysql_mutex_unlock(&buf_pool.mutex); - buf_pool.n_pend_reads++; - goto func_exit_no_mutex; func_exit: mysql_mutex_unlock(&buf_pool.mutex); -func_exit_no_mutex: + if (mode == BUF_READ_IBUF_PAGES_ONLY) ibuf_mtr_commit(&mtr); @@ -319,8 +316,6 @@ buf_read_page_low( page_id.page_no() * len, len, dst, bpage); if (UNIV_UNLIKELY(fio.err != DB_SUCCESS)) { - ut_d(auto n=) buf_pool.n_pend_reads--; - ut_ad(n > 0); buf_pool.corrupted_evict(bpage, buf_page_t::READ_FIX); } else if (sync) { thd_wait_end(NULL); @@ -367,7 +362,8 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf) read-ahead, as that could break the ibuf page access order */ return 0; - if (buf_pool.n_pend_reads > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) + if (os_aio_pending_reads_approx() > + buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) return 0; fil_space_t* space= fil_space_t::get(page_id.space()); @@ -519,7 +515,8 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) /* No read-ahead to avoid thread deadlocks */ return 0; - if (buf_pool.n_pend_reads > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) + if (os_aio_pending_reads_approx() > + buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) return 0; const uint32_t buf_read_ahead_area= buf_pool.read_ahead_area; @@ -687,18 +684,8 @@ void buf_read_recv_pages(uint32_t space_id, st_::span page_nos) limit += buf_pool.chunks[j].size / 2; } - for (ulint count = 0; buf_pool.n_pend_reads >= limit; ) { - std::this_thread::sleep_for( - std::chrono::milliseconds(10)); - - if (!(++count % 1000)) { - - ib::error() - << "Waited for " << count / 100 - << " seconds for " - << buf_pool.n_pend_reads - << " pending reads"; - } + if (os_aio_pending_reads() >= limit) { + os_aio_wait_until_no_pending_reads(); } space->reacquire(); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index a3d9df4af1b21..24319bcb8a6ea 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -97,7 +97,13 @@ bool fil_space_t::try_to_close(bool print_info) if (!node->is_open()) continue; - if (const auto n= space.set_closing()) + /* Other thread is trying to do fil_delete_tablespace() + concurrently for the same tablespace. So ignore this + tablespace and try to close the other one */ + const auto n= space.set_closing(); + if (n & STOPPING) + continue; + if (n & (PENDING | NEEDS_FSYNC)) { if (!print_info) continue; @@ -1370,7 +1376,10 @@ void fil_space_t::close_all() for (ulint count= 10000; count--;) { - if (!space.set_closing()) + const auto n= space.set_closing(); + if (n & STOPPING) + goto next; + if (!(n & (PENDING | NEEDS_FSYNC))) { node->close(); goto next; @@ -2060,6 +2069,10 @@ fil_ibd_open( must_validate = true; } + const bool operation_not_for_export = + srv_operation != SRV_OPERATION_RESTORE_EXPORT + && srv_operation != SRV_OPERATION_EXPORT_RESTORED; + /* Always look for a file at the default location. But don't log an error if the tablespace is already open in remote or dict. */ ut_a(df_default.filepath()); @@ -2070,6 +2083,7 @@ fil_ibd_open( drop_garbage_tables_after_restore() a little later. */ const bool strict = validate && !tablespaces_found + && operation_not_for_export && !(srv_operation == SRV_OPERATION_NORMAL && srv_start_after_restore && srv_force_recovery < SRV_FORCE_NO_BACKGROUND @@ -2118,7 +2132,11 @@ fil_ibd_open( goto corrupted; } - os_file_get_last_error(true); + os_file_get_last_error(operation_not_for_export, + !operation_not_for_export); + if (!operation_not_for_export) { + goto corrupted; + } sql_print_error("InnoDB: Could not find a valid tablespace" " file for %.*s. %s", static_cast(name.size()), name.data(), @@ -2285,6 +2303,7 @@ fil_ibd_discover( case SRV_OPERATION_RESTORE: break; case SRV_OPERATION_NORMAL: + case SRV_OPERATION_EXPORT_RESTORED: size_t len= strlen(db); if (len <= 4 || strcmp(db + len - 4, dot_ext[IBD])) { break; diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc index 0a4119587f86e..e93ca4bc1772b 100644 --- a/storage/innobase/fsp/fsp0file.cc +++ b/storage/innobase/fsp/fsp0file.cc @@ -756,7 +756,7 @@ the double write buffer. bool Datafile::restore_from_doublewrite() { - if (srv_operation != SRV_OPERATION_NORMAL) { + if (srv_operation > SRV_OPERATION_EXPORT_RESTORED) { return true; } diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc index ef65043690765..09eea04b65532 100644 --- a/storage/innobase/fsp/fsp0sysspace.cc +++ b/storage/innobase/fsp/fsp0sysspace.cc @@ -576,7 +576,7 @@ inline dberr_t SysTablespace::read_lsn_and_check_flags() ut_a(it->order() == 0); - if (srv_operation == SRV_OPERATION_NORMAL) { + if (srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { buf_dblwr.init_or_load_pages(it->handle(), it->filepath()); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 6321074146614..f11ac9a05b65f 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -545,6 +545,7 @@ mysql_pfs_key_t trx_pool_manager_mutex_key; mysql_pfs_key_t lock_wait_mutex_key; mysql_pfs_key_t trx_sys_mutex_key; mysql_pfs_key_t srv_threads_mutex_key; +mysql_pfs_key_t tpool_cache_mutex_key; /* all_innodb_mutexes array contains mutexes that are performance schema instrumented if "UNIV_PFS_MUTEX" @@ -578,6 +579,7 @@ static PSI_mutex_info all_innodb_mutexes[] = { PSI_KEY(rtr_match_mutex), PSI_KEY(rtr_path_mutex), PSI_KEY(trx_sys_mutex), + PSI_KEY(tpool_cache_mutex), }; # endif /* UNIV_PFS_MUTEX */ @@ -2077,7 +2079,7 @@ static void innodb_ddl_recovery_done(handlerton*) { ut_ad(!ddl_recovery_done); ut_d(ddl_recovery_done= true); - if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL && + if (!srv_read_only_mode && srv_operation <= SRV_OPERATION_EXPORT_RESTORED && srv_force_recovery < SRV_FORCE_NO_BACKGROUND) { if (srv_start_after_restore && !high_level_read_only) @@ -8479,6 +8481,37 @@ wsrep_calc_row_hash( return(0); } + +/** Append table-level exclusive key. +@param thd MySQL thread handle +@param table table +@retval false on success +@retval true on failure */ +ATTRIBUTE_COLD bool wsrep_append_table_key(MYSQL_THD thd, const dict_table_t &table) +{ + char db_buf[NAME_LEN + 1]; + char tbl_buf[NAME_LEN + 1]; + ulint db_buf_len, tbl_buf_len; + + if (!table.parse_name(db_buf, tbl_buf, &db_buf_len, &tbl_buf_len)) + { + WSREP_ERROR("Parse_name for table key append failed: %s", + wsrep_thd_query(thd)); + return true; + } + + /* Append table-level exclusive key */ + const int rcode = wsrep_thd_append_table_key(thd, db_buf, + tbl_buf, WSREP_SERVICE_KEY_EXCLUSIVE); + if (rcode) + { + WSREP_ERROR("Appending table key failed: %s, %d", + wsrep_thd_query(thd), rcode); + return true; + } + + return false; +} #endif /* WITH_WSREP */ /** @@ -8649,11 +8682,16 @@ ha_innobase::update_row( && !wsrep_thd_ignore_table(m_user_thd)) { DBUG_PRINT("wsrep", ("update row key")); - if (wsrep_append_keys(m_user_thd, - wsrep_protocol_version >= 4 - ? WSREP_SERVICE_KEY_UPDATE - : WSREP_SERVICE_KEY_EXCLUSIVE, - old_row, new_row)){ + /* We use table-level exclusive key for SEQUENCES + and normal key append for others. */ + if (table->s->table_type == TABLE_TYPE_SEQUENCE) { + if (wsrep_append_table_key(m_user_thd, *m_prebuilt->table)) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } else if (wsrep_append_keys(m_user_thd, + wsrep_protocol_version >= 4 + ? WSREP_SERVICE_KEY_UPDATE + : WSREP_SERVICE_KEY_EXCLUSIVE, + old_row, new_row)) { WSREP_DEBUG("WSREP: UPDATE_ROW_KEY FAILED"); DBUG_PRINT("wsrep", ("row key failed")); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); @@ -17917,44 +17955,6 @@ innodb_monitor_update( return; } -/** Validate SET GLOBAL innodb_buffer_pool_filename. -On Windows, file names with colon (:) are not allowed. -@param thd connection -@param save &srv_buf_dump_filename -@param value new value to be validated -@return 0 for valid name */ -static int innodb_srv_buf_dump_filename_validate(THD *thd, st_mysql_sys_var*, - void *save, - st_mysql_value *value) -{ - char buff[OS_FILE_MAX_PATH]; - int len= sizeof buff; - - if (const char *buf_name= value->val_str(value, buff, &len)) - { -#ifdef _WIN32 - if (!is_filename_allowed(buf_name, len, FALSE)) - { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_WRONG_ARGUMENTS, - "InnoDB: innodb_buffer_pool_filename " - "cannot have colon (:) in the file name."); - return 1; - } -#endif /* _WIN32 */ - if (buf_name == buff) - { - ut_ad(static_cast(len) < sizeof buff); - buf_name= thd_strmake(thd, buf_name, len); - } - - *static_cast(save)= buf_name; - return 0; - } - - return 1; -} - #ifdef UNIV_DEBUG static char* srv_buffer_pool_evict; @@ -19057,9 +19057,9 @@ static MYSQL_SYSVAR_SIZE_T(buffer_pool_chunk_size, srv_buf_pool_chunk_unit, 0, 0, SIZE_T_MAX, 1024 * 1024); static MYSQL_SYSVAR_STR(buffer_pool_filename, srv_buf_dump_filename, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Filename to/from which to dump/load the InnoDB buffer pool", - innodb_srv_buf_dump_filename_validate, NULL, SRV_BUF_DUMP_FILENAME_DEFAULT); + NULL, NULL, SRV_BUF_DUMP_FILENAME_DEFAULT); static MYSQL_SYSVAR_BOOL(buffer_pool_dump_now, innodb_buffer_pool_dump_now, PLUGIN_VAR_RQCMDARG, diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 3b537afef4019..3cb51f5e15dc6 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -4935,10 +4935,10 @@ static ST_FIELD_INFO innodb_sysindex_fields_info[]= Column("N_FIELDS", SLong(), NOT_NULL), #define SYS_INDEX_PAGE_NO 5 - Column("PAGE_NO", SLong(), NOT_NULL), + Column("PAGE_NO", SLong(), NULLABLE), #define SYS_INDEX_SPACE 6 - Column("SPACE", SLong(), NOT_NULL), + Column("SPACE", SLong(), NULLABLE), #define SYS_INDEX_MERGE_THRESHOLD 7 Column("MERGE_THRESHOLD", SLong(), NOT_NULL), @@ -4990,12 +4990,14 @@ i_s_dict_fill_sys_indexes( if (index->page == FIL_NULL) { fields[SYS_INDEX_PAGE_NO]->set_null(); } else { + fields[SYS_INDEX_PAGE_NO]->set_notnull(); OK(fields[SYS_INDEX_PAGE_NO]->store(index->page, true)); } - if (space_id == ULINT_UNDEFINED) { + if (space_id == FIL_NULL) { fields[SYS_INDEX_SPACE]->set_null(); } else { + fields[SYS_INDEX_SPACE]->set_notnull(); OK(fields[SYS_INDEX_SPACE]->store(space_id, true)); } diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 7490563869818..4295c3ba34263 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -1689,8 +1689,6 @@ class buf_pool_t /** map of block->frame to buf_block_t blocks that belong to buf_buddy_alloc(); protected by buf_pool.mutex */ hash_table_t zip_hash; - /** number of pending read operations */ - Atomic_counter n_pend_reads; Atomic_counter n_pend_unzip; /*!< number of pending decompressions */ @@ -1717,7 +1715,7 @@ class buf_pool_t /** flush_list size in bytes; protected by flush_list_mutex */ ulint flush_list_bytes; /** possibly modified persistent pages (a subset of LRU); - buf_dblwr.pending_writes() is approximately COUNT(is_write_fixed()) */ + os_aio_pending_writes() is approximately COUNT(is_write_fixed()) */ UT_LIST_BASE_NODE_T(buf_page_t) flush_list; /** number of blocks ever added to flush_list; sometimes protected by flush_list_mutex */ @@ -1876,18 +1874,6 @@ class buf_pool_t /** Reserve a buffer. */ buf_tmp_buffer_t *io_buf_reserve() { return io_buf.reserve(); } - /** @return whether any I/O is pending */ - bool any_io_pending() - { - if (n_pend_reads) - return true; - mysql_mutex_lock(&flush_list_mutex); - const bool any_pending= page_cleaner_status > PAGE_CLEANER_IDLE || - buf_dblwr.pending_writes(); - mysql_mutex_unlock(&flush_list_mutex); - return any_pending; - } - /** Remove a block from flush_list. @param bpage buffer pool page */ void delete_from_flush_list(buf_page_t *bpage) noexcept; diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index d9c9239c0b434..92b840d2f4c97 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -72,10 +72,6 @@ class buf_dblwr_t ulint writes_completed; /** number of pages written by flush_buffered_writes_completed() */ ulint pages_written; - /** condition variable for !writes_pending */ - pthread_cond_t write_cond; - /** number of pending page writes */ - size_t writes_pending; slot slots[2]; slot *active_slot; @@ -124,7 +120,7 @@ class buf_dblwr_t void recover(); /** Update the doublewrite buffer on data page write completion. */ - void write_completed(bool with_doublewrite); + void write_completed(); /** Flush possible buffered writes to persistent storage. It is very important to call this function after a batch of writes has been posted, and also when we may have to wait for a page latch! @@ -167,40 +163,6 @@ class buf_dblwr_t my_cond_wait(&cond, &mutex.m_mutex); mysql_mutex_unlock(&mutex); } - - /** Register an unbuffered page write */ - void add_unbuffered() - { - mysql_mutex_lock(&mutex); - writes_pending++; - mysql_mutex_unlock(&mutex); - } - - size_t pending_writes() - { - mysql_mutex_lock(&mutex); - const size_t pending{writes_pending}; - mysql_mutex_unlock(&mutex); - return pending; - } - - /** Wait for writes_pending to reach 0 */ - void wait_for_page_writes() - { - mysql_mutex_lock(&mutex); - while (writes_pending) - my_cond_wait(&write_cond, &mutex.m_mutex); - mysql_mutex_unlock(&mutex); - } - - /** Wait for writes_pending to reach 0 */ - void wait_for_page_writes(const timespec &abstime) - { - mysql_mutex_lock(&mutex); - while (writes_pending) - my_cond_timedwait(&write_cond, &mutex.m_mutex, &abstime); - mysql_mutex_unlock(&mutex); - } }; /** The doublewrite buffer */ diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 483d594c6b925..008cb3d1261a4 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -626,8 +626,7 @@ struct fil_space_t final @return number of pending operations, possibly with NEEDS_FSYNC flag */ uint32_t set_closing() { - return n_pending.fetch_or(CLOSING, std::memory_order_acquire) & - (PENDING | NEEDS_FSYNC); + return n_pending.fetch_or(CLOSING, std::memory_order_acquire); } public: diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index da9dec0582798..d5239ec3f9a32 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -462,5 +462,15 @@ void destroy_background_thd(MYSQL_THD thd); void innobase_reset_background_thd(MYSQL_THD); +#ifdef WITH_WSREP +/** Append table-level exclusive key. +@param thd MySQL thread handle +@param table table +@retval false on success +@retval true on failure */ +struct dict_table_t; +bool wsrep_append_table_key(MYSQL_THD thd, const dict_table_t &table); +#endif /* WITH_WSREP */ + #endif /* !UNIV_INNOCHECKSUM */ #endif /* HA_INNODB_PROTOTYPES_H */ diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index 09e4ece88943c..58a2ee3468cb7 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -378,7 +378,6 @@ typedef srw_lock log_rwlock_t; lsn_t get_lsn(std::memory_order order= std::memory_order_relaxed) const { return lsn.load(order); } - void set_lsn(lsn_t lsn) { this->lsn.store(lsn, std::memory_order_release); } lsn_t get_flushed_lsn(std::memory_order order= std::memory_order_acquire) const noexcept diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index c0e5f7683a97f..764071cfab98f 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -933,13 +933,14 @@ os_file_flush_func( /** Retrieves the last error number if an error occurs in a file io function. The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, -the OS error number + 100 is returned. -@param[in] report true if we want an error message printed - for all errors -@return error number, or OS error number + 100 */ -ulint -os_file_get_last_error( - bool report); +the OS error number + OS_FILE_ERROR_MAX is returned. +@param[in] report_all_errors true if we want an error message + printed of all errors +@param[in] on_error_silent true then don't print any diagnostic + to the log +@return error number, or OS error number + OS_FILE_ERROR_MAX */ +ulint os_file_get_last_error(bool report_all_errors, + bool on_error_silent= false); /** NOTE! Use the corresponding macro os_file_read(), not directly this function! @@ -1048,6 +1049,13 @@ void os_aio_free(); @retval DB_IO_ERROR on I/O error */ dberr_t os_aio(const IORequest &type, void *buf, os_offset_t offset, size_t n); +/** @return number of pending reads */ +size_t os_aio_pending_reads(); +/** @return approximate number of pending reads */ +size_t os_aio_pending_reads_approx(); +/** @return number of pending writes */ +size_t os_aio_pending_writes(); + /** Wait until there are no pending asynchronous writes. */ void os_aio_wait_until_no_pending_writes(); diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index b85fa51838426..0f02e307a472b 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -329,6 +329,9 @@ extern my_bool srv_immediate_scrub_data_uncompressed; enum srv_operation_mode { /** Normal mode (MariaDB Server) */ SRV_OPERATION_NORMAL, + /** Mariabackup is executing server to export already restored + tablespaces */ + SRV_OPERATION_EXPORT_RESTORED, /** Mariabackup taking a backup */ SRV_OPERATION_BACKUP, /** Mariabackup restoring a backup for subsequent --copy-back */ diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 793f7b327c8b7..88b6bae963f83 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -1205,7 +1205,7 @@ static void fil_name_process(const char *name, ulint len, uint32_t space_id, return; } - ut_ad(srv_operation == SRV_OPERATION_NORMAL + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -1324,8 +1324,9 @@ static void fil_name_process(const char *name, ulint len, uint32_t space_id, ut_ad(space == NULL); if (srv_force_recovery == 0) { sql_print_error("InnoDB: Recovery cannot access" - " file %s (tablespace " - UINT32PF ")", name, space_id); + " file %.*s (tablespace " + UINT32PF ")", int(len), name, + space_id); sql_print_information("InnoDB: You may set " "innodb_force_recovery=1" " to ignore this and" @@ -1336,9 +1337,10 @@ static void fil_name_process(const char *name, ulint len, uint32_t space_id, } sql_print_warning("InnoDB: Ignoring changes to" - " file %s (tablespace " UINT32PF ")" + " file %.*s (tablespace " + UINT32PF ")" " due to innodb_force_recovery", - name, space_id); + int(len), name, space_id); } } } @@ -3413,7 +3415,7 @@ static void log_sort_flush_list() @param last_batch whether it is possible to write more redo log */ void recv_sys_t::apply(bool last_batch) { - ut_ad(srv_operation == SRV_OPERATION_NORMAL || + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -3568,7 +3570,7 @@ void recv_sys_t::apply(bool last_batch) for (;;) { const bool empty= pages.empty(); - if (empty && !buf_pool.n_pend_reads) + if (empty && !os_aio_pending_reads()) break; if (!is_corrupt_fs() && !is_corrupt_log()) @@ -3581,7 +3583,6 @@ void recv_sys_t::apply(bool last_batch) { mysql_mutex_unlock(&mutex); os_aio_wait_until_no_pending_reads(); - ut_ad(!buf_pool.n_pend_reads); mysql_mutex_lock(&mutex); ut_ad(pages.empty()); } @@ -4139,7 +4140,7 @@ dberr_t recv_recovery_from_checkpoint_start() { bool rescan = false; - ut_ad(srv_operation == SRV_OPERATION_NORMAL + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); ut_d(mysql_mutex_lock(&buf_pool.flush_list_mutex)); @@ -4256,7 +4257,7 @@ dberr_t recv_recovery_from_checkpoint_start() rescan = true; } - if (srv_operation == SRV_OPERATION_NORMAL) { + if (srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { deferred_spaces.deferred_dblwr(); buf_dblwr.recover(); } @@ -4319,7 +4320,7 @@ dberr_t recv_recovery_from_checkpoint_start() } log_sys.buf_free = recv_sys.offset; if (recv_needed_recovery - && srv_operation == SRV_OPERATION_NORMAL) { + && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { /* Write a FILE_CHECKPOINT marker as the first thing, before generating any other redo log. This ensures that subsequent crash recovery will be possible even diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index c7164ccc3ff77..d28d7b02c4371 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -124,6 +124,11 @@ class io_slots { wait(); } + + mysql_mutex_t& mutex() + { + return m_cache.mutex(); + } }; static io_slots *read_slots; @@ -727,22 +732,16 @@ os_file_punch_hole_posix( return(DB_IO_NO_PUNCH_HOLE); } - - /** Retrieves the last error number if an error occurs in a file io function. The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, the OS error number + 100 is returned. @param[in] report_all_errors true if we want an error message - printed of all errors + printed of all errors @param[in] on_error_silent true then don't print any diagnostic to the log @return error number, or OS error number + 100 */ -static -ulint -os_file_get_last_error_low( - bool report_all_errors, - bool on_error_silent) +ulint os_file_get_last_error(bool report_all_errors, bool on_error_silent) { int err = errno; @@ -1803,16 +1802,13 @@ bool os_file_flush_func(os_file_t file) The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, then OS error number + OS_FILE_ERROR_MAX is returned. -@param[in] report_all_errors true if we want an error message printed - of all errors +@param[in] report_all_errors true if we want an error message +printed of all errors @param[in] on_error_silent true then don't print any diagnostic to the log @return error number, or OS error number + OS_FILE_ERROR_MAX */ -static -ulint -os_file_get_last_error_low( - bool report_all_errors, - bool on_error_silent) +ulint os_file_get_last_error(bool report_all_errors, bool on_error_silent) + { ulint err = (ulint) GetLastError(); @@ -2936,20 +2932,6 @@ os_file_read_func( return err ? err : DB_IO_ERROR; } -/** Retrieves the last error number if an error occurs in a file io function. -The number should be retrieved before any other OS calls (because they may -overwrite the error number). If the number is not known to this program, -the OS error number + 100 is returned. -@param[in] report_all_errors true if we want an error printed - for all errors -@return error number, or OS error number + 100 */ -ulint -os_file_get_last_error( - bool report_all_errors) -{ - return(os_file_get_last_error_low(report_all_errors, false)); -} - /** Handle errors for file operations. @param[in] name name of a file or NULL @param[in] operation operation @@ -2966,7 +2948,7 @@ os_file_handle_error_cond_exit( { ulint err; - err = os_file_get_last_error_low(false, on_error_silent); + err = os_file_get_last_error(false, on_error_silent); switch (err) { case OS_FILE_DISK_FULL: @@ -3663,6 +3645,30 @@ void os_aio_wait_until_no_pending_writes() buf_dblwr.wait_flush_buffered_writes(); } +/** @return number of pending reads */ +size_t os_aio_pending_reads() +{ + mysql_mutex_lock(&read_slots->mutex()); + size_t pending= read_slots->pending_io_count(); + mysql_mutex_unlock(&read_slots->mutex()); + return pending; +} + +/** @return approximate number of pending reads */ +size_t os_aio_pending_reads_approx() +{ + return read_slots->pending_io_count(); +} + +/** @return number of pending writes */ +size_t os_aio_pending_writes() +{ + mysql_mutex_lock(&write_slots->mutex()); + size_t pending= write_slots->pending_io_count(); + mysql_mutex_unlock(&write_slots->mutex()); + return pending; +} + /** Wait until all pending asynchronous reads have completed. */ void os_aio_wait_until_no_pending_reads() { diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index ba1bb24b43b23..89e6d1497b74e 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -3371,7 +3371,7 @@ page_zip_validate_low( differed. Let us ignore it. */ page_zip_fail(("page_zip_validate:" " min_rec_flag" - " (%s" ULINTPF "," ULINTPF + " (%s" UINT32PF "," UINT32PF ",0x%02x)\n", sloppy ? "ignored, " : "", page_get_space_id(page), @@ -3416,7 +3416,8 @@ page_zip_validate_low( page + PAGE_NEW_INFIMUM, TRUE); trec = page_rec_get_next_low( temp_page + PAGE_NEW_INFIMUM, TRUE); - const ulint n_core = page_is_leaf(page) ? index->n_fields : 0; + const ulint n_core = (index && page_is_leaf(page)) + ? index->n_fields : 0; do { if (page_offset(rec) != page_offset(trec)) { diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc index c2b2bc7120ded..d190a001b6f81 100644 --- a/storage/innobase/rem/rem0cmp.cc +++ b/storage/innobase/rem/rem0cmp.cc @@ -300,8 +300,9 @@ int cmp_data(ulint mtype, ulint prtype, bool descending, DBUG_ASSERT(is_strnncoll_compatible(prtype & DATA_MYSQL_TYPE_MASK)); if (CHARSET_INFO *cs= all_charsets[dtype_get_charset_coll(prtype)]) { - cmp= cs->coll->strnncollsp_nchars(cs, data1, len1, data2, len2, - std::max(len1, len2)); + cmp= cs->coll-> + strnncollsp_nchars(cs, data1, len1, data2, len2, std::max(len1, len2), + MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES); goto func_exit; } goto no_collation; diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index 98cf2dda9003d..d743ef9aa5c90 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -291,7 +291,9 @@ rec_init_offsets_comp_ordinary( != n_core) ? UT_BITS_IN_BYTES(unsigned(index->get_n_nullable(n_core))) : (redundant_temp - ? UT_BITS_IN_BYTES(index->n_nullable) + ? (index->is_instant() + ? UT_BITS_IN_BYTES(index->get_n_nullable(n_core)) + : UT_BITS_IN_BYTES(index->n_nullable)) : index->n_core_null_bytes); if (mblob) { @@ -448,7 +450,7 @@ rec_init_offsets_comp_ordinary( continue; } - len = offs += len; + len = offs += static_cast(len); } else { len = offs += field->fixed_len; } diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 2dcc16130c3f4..8b35728f35b8d 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3883,6 +3883,7 @@ static dberr_t fil_iterate( block->page.zip.data = src; frame_changed = true; } else if (!page_compressed + && type != FIL_PAGE_TYPE_XDES && !block->page.zip.data) { block->page.frame = src; frame_changed = true; diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index e327717ce659c..463fb90d714a7 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -49,6 +49,7 @@ Created 4/20/1996 Heikki Tuuri #ifdef WITH_WSREP #include #include +#include "ha_prototypes.h" #endif /* WITH_WSREP */ /************************************************************************* @@ -2578,42 +2579,6 @@ but GCC 4.8.5 does not support pop_options. */ # pragma GCC optimize ("O0") #endif -#ifdef WITH_WSREP -/** Start bulk insert operation for Galera by appending -table-level exclusive key for bulk insert. -@param trx transaction -@param index index -@retval false on success -@retval true on failure */ -ATTRIBUTE_COLD static bool row_ins_wsrep_start_bulk(trx_t *trx, const dict_index_t &index) -{ - char db_buf[NAME_LEN + 1]; - char tbl_buf[NAME_LEN + 1]; - ulint db_buf_len, tbl_buf_len; - - if (!index.table->parse_name(db_buf, tbl_buf, &db_buf_len, &tbl_buf_len)) - { - WSREP_ERROR("Parse_name for bulk insert failed: %s", - wsrep_thd_query(trx->mysql_thd)); - trx->error_state = DB_ROLLBACK; - return true; - } - - /* Append table-level exclusive key for bulk insert. */ - const int rcode = wsrep_thd_append_table_key(trx->mysql_thd, db_buf, - tbl_buf, WSREP_SERVICE_KEY_EXCLUSIVE); - if (rcode) - { - WSREP_ERROR("Appending table key for bulk insert failed: %s, %d", - wsrep_thd_query(trx->mysql_thd), rcode); - trx->error_state = DB_ROLLBACK; - return true; - } - - return false; -} -#endif - /***************************************************************//** Tries to insert an entry into a clustered index, ignoring foreign key constraints. If a record with the same unique key is found, the other @@ -2780,10 +2745,13 @@ row_ins_clust_index_entry_low( #ifdef WITH_WSREP if (trx->is_wsrep()) { - if (!wsrep_thd_is_local_transaction(trx->mysql_thd)) - goto skip_bulk_insert; - if (row_ins_wsrep_start_bulk(trx, *index)) - goto err_exit; + if (!wsrep_thd_is_local_transaction(trx->mysql_thd)) + goto skip_bulk_insert; + if (wsrep_append_table_key(trx->mysql_thd, *index->table)) + { + trx->error_state = DB_ROLLBACK; + goto err_exit; + } } #endif /* WITH_WSREP */ diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc index a4b52fd2a2f19..9b11b7fbc3f46 100644 --- a/storage/innobase/row/row0vers.cc +++ b/storage/innobase/row/row0vers.cc @@ -859,6 +859,30 @@ static bool dtuple_coll_eq(const dtuple_t &tuple1, const dtuple_t &tuple2) return true; } +/** Find out whether data tuple has missing data type +for indexed virtual column. +@param tuple data tuple +@param index virtual index +@return true if tuple has missing column type */ +static bool dtuple_vcol_data_missing(const dtuple_t &tuple, + dict_index_t *index) +{ + for (ulint i= 0; i < index->n_uniq; i++) + { + dict_col_t *col= index->fields[i].col; + if (!col->is_virtual()) + continue; + dict_v_col_t *vcol= reinterpret_cast(col); + for (ulint j= 0; j < index->table->n_v_cols; j++) + { + if (vcol == &index->table->v_cols[j] + && tuple.v_fields[j].type.mtype == DATA_MISSING) + return true; + } + } + return false; +} + /** Finds out if a version of the record, where the version >= the current purge_sys.view, should have ientry as its secondary index entry. We check if there is any not delete marked version of the record where the trx @@ -1068,6 +1092,9 @@ row_vers_old_has_index_entry( if (dict_index_has_virtual(index)) { if (vrow) { + if (dtuple_vcol_data_missing(*vrow, index)) { + goto nochange_index; + } /* Keep the virtual row info for the next version, unless it is changed */ mem_heap_empty(v_heap); @@ -1078,6 +1105,7 @@ row_vers_old_has_index_entry( if (!cur_vrow) { /* Nothing for this index has changed, continue */ +nochange_index: version = prev_version; continue; } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 5d08c18a8e570..017a7b8d08af7 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -189,7 +189,11 @@ static dberr_t create_log_file(bool create_new_db, lsn_t lsn) ib_logfile0 in log_t::rename_resized(). */ delete_log_files(); - DBUG_ASSERT(!buf_pool.any_io_pending()); + ut_ad(!os_aio_pending_reads()); + ut_ad(!os_aio_pending_writes()); + ut_d(mysql_mutex_lock(&buf_pool.flush_list_mutex)); + ut_ad(!buf_pool.get_oldest_modification(0)); + ut_d(mysql_mutex_unlock(&buf_pool.flush_list_mutex)); log_sys.latch.wr_lock(SRW_LOCK_CALL); log_sys.set_capacity(); @@ -598,7 +602,7 @@ dberr_t srv_undo_tablespaces_init(bool create_new_db) srv_undo_tablespaces_open= 0; ut_a(srv_undo_tablespaces <= TRX_SYS_N_RSEGS); - ut_a(!create_new_db || srv_operation == SRV_OPERATION_NORMAL); + ut_a(!create_new_db || srv_operation <= SRV_OPERATION_EXPORT_RESTORED); if (srv_undo_tablespaces == 1) srv_undo_tablespaces= 0; @@ -857,7 +861,11 @@ static lsn_t srv_prepare_to_delete_redo_log_file() log_write_up_to(flushed_lsn, false); ut_ad(flushed_lsn == log_sys.get_lsn()); - ut_ad(!buf_pool.any_io_pending()); + ut_ad(!os_aio_pending_reads()); + ut_ad(!os_aio_pending_writes()); + ut_d(mysql_mutex_lock(&buf_pool.flush_list_mutex)); + ut_ad(!buf_pool.get_oldest_modification(0)); + ut_d(mysql_mutex_unlock(&buf_pool.flush_list_mutex)); DBUG_RETURN(flushed_lsn); } @@ -875,7 +883,7 @@ dberr_t srv_start(bool create_new_db) dberr_t err = DB_SUCCESS; mtr_t mtr; - ut_ad(srv_operation == SRV_OPERATION_NORMAL + ut_ad(srv_operation <= SRV_OPERATION_RESTORE_EXPORT || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -1204,7 +1212,8 @@ dberr_t srv_start(bool create_new_db) } switch (srv_operation) { - case SRV_OPERATION_NORMAL: + case SRV_OPERATION_NORMAL: + case SRV_OPERATION_EXPORT_RESTORED: case SRV_OPERATION_RESTORE_EXPORT: /* Initialize the change buffer. */ err = dict_boot(); @@ -1342,7 +1351,7 @@ dberr_t srv_start(bool create_new_db) } } - if (srv_operation != SRV_OPERATION_NORMAL) { + if (srv_operation > SRV_OPERATION_EXPORT_RESTORED) { ut_ad(srv_operation == SRV_OPERATION_RESTORE_EXPORT || srv_operation == SRV_OPERATION_RESTORE); return(err); @@ -1374,7 +1383,11 @@ dberr_t srv_start(bool create_new_db) threads until creating a log checkpoint at the end of create_log_file(). */ ut_d(recv_no_log_write = true); - DBUG_ASSERT(!buf_pool.any_io_pending()); + ut_ad(!os_aio_pending_reads()); + ut_ad(!os_aio_pending_writes()); + ut_d(mysql_mutex_lock(&buf_pool.flush_list_mutex)); + ut_ad(!buf_pool.get_oldest_modification(0)); + ut_d(mysql_mutex_unlock(&buf_pool.flush_list_mutex)); /* Close the redo log file, so that we can replace it */ log_sys.close_file(); @@ -1559,7 +1572,8 @@ dberr_t srv_start(bool create_new_db) return(srv_init_abort(err)); } - if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) { + if (!srv_read_only_mode + && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { /* Initialize the innodb_temporary tablespace and keep it open until shutdown. */ err = srv_open_tmp_tablespace(create_new_db); @@ -1649,7 +1663,7 @@ void innodb_preshutdown() if (srv_read_only_mode) return; - if (!srv_fast_shutdown && srv_operation == SRV_OPERATION_NORMAL) + if (!srv_fast_shutdown && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { /* Because a slow shutdown must empty the change buffer, we had better prevent any further changes from being buffered. */ @@ -1689,6 +1703,7 @@ void innodb_shutdown() mysql_mutex_unlock(&buf_pool.flush_list_mutex); break; case SRV_OPERATION_NORMAL: + case SRV_OPERATION_EXPORT_RESTORED: /* Shut down the persistent files. */ logs_empty_and_mark_files_at_shutdown(); } diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index c245dcea03687..10ec791f186d7 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -188,7 +188,7 @@ static MYSQL_SYSVAR_BOOL(page_checksum, maria_page_checksums, 0, /* It is only command line argument */ static MYSQL_SYSVAR_CONST_STR(log_dir_path, maria_data_root, - PLUGIN_VAR_NOSYSVAR | PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Path to the directory where to store transactional log", NULL, NULL, mysql_real_data_home); diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index 244b11a30fcd7..ec8225d9e01ef 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -3068,7 +3068,8 @@ pfs_start_table_io_wait_v1(PSI_table_locker_state *state, PFS_table_share *share= pfs_table->m_share; wait->m_thread_internal_id= pfs_thread->m_thread_internal_id; - wait->m_class= &global_table_io_class; + if (wait->m_class == NULL || wait->m_class->m_type != PFS_CLASS_TABLE_LOCK) + wait->m_class= &global_table_io_class; wait->m_timer_start= timer_start; wait->m_timer_end= 0; wait->m_object_instance_addr= pfs_table->m_identity; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc index 81cd2200ae06c..a87fe01b3a1ce 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc @@ -9,8 +9,8 @@ if ($rpl_inited) # Write file to make mysql-test-run.pl expect the "crash", but don't start # it until it's told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Send shutdown to the connected server and give diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test index e084b57fbdaf8..88a02c469bb2c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test @@ -16,6 +16,7 @@ # restart server to change error log and ignore corruptopn on startup --let $_mysqld_option=--log-error=$LOG --rocksdb_allow_to_start_after_corruption=1 --source include/restart_mysqld_with_option.inc +--let $_server_id= `SELECT @@server_id` --echo # --echo # Test server crashes on corrupted data and restarts diff --git a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test index 49e5e5c11726f..da9d8602c011c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test @@ -4,8 +4,8 @@ DROP TABLE IF EXISTS t1; --enable_warnings ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --let $_cache_file_name= $MYSQLTEST_VARDIR/tmp/persistent_cache --exec echo "wait" >$_expect_file_name diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test index e7ab37d2658be..b2647b38e088d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test @@ -17,8 +17,8 @@ CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITI # Write file to make mysql-test-run.pl expect the "crash", but don't restart the # server until it is told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --let LOG=$MYSQLTEST_VARDIR/tmp/validate_datadic.err --exec echo "wait" >$_expect_file_name diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test index 743f942af9cd2..1daa9898c1ae6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test @@ -7,8 +7,8 @@ SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000; # Write file to make mysql-test-run.pl expect the "crash", but don't restart the # server until it is told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" >$_expect_file_name # Send shutdown to the connected server and give it 10 seconds to die before diff --git a/storage/spider/mysql-test/spider/bugfix/include/restart_spider.inc b/storage/spider/mysql-test/spider/bugfix/include/restart_spider.inc deleted file mode 100644 index a5446a6188dc0..0000000000000 --- a/storage/spider/mysql-test/spider/bugfix/include/restart_spider.inc +++ /dev/null @@ -1,8 +0,0 @@ ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.1.expect - ---exec echo "wait" > $_expect_file_name ---shutdown_server ---source include/wait_until_disconnected.inc ---exec echo "restart" > $_expect_file_name ---enable_reconnect ---source include/wait_until_connected_again.inc diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result index 5715edf2bd686..975d3834d4258 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result @@ -9,4 +9,5 @@ CREATE FUNCTION spider_bg_direct_sql RETURNS INT SONAME 'ha_spider.so'; ERROR HY000: Can't execute the query because you have a conflicting read lock SELECT * FROM t; c +# restart DROP TABLE t; diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29904.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29904.result new file mode 100644 index 0000000000000..c89309a514db6 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29904.result @@ -0,0 +1,4 @@ +# +# MDEV-29904 SPIDER plugin initialization fails upon startup +# +# restart: --plugin-load-add=ha_spider diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test index 00d8ee73ebc9b..626364efb99a5 100644 --- a/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test @@ -6,6 +6,6 @@ FLUSH TABLES WITH READ LOCK; CREATE FUNCTION spider_bg_direct_sql RETURNS INT SONAME 'ha_spider.so'; SELECT * FROM t; ---source include/restart_spider.inc +--source include/restart_mysqld.inc DROP TABLE t; diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29904.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29904.test new file mode 100644 index 0000000000000..d3dcb363890cb --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29904.test @@ -0,0 +1,6 @@ +--echo # +--echo # MDEV-29904 SPIDER plugin initialization fails upon startup +--echo # + +--let $restart_parameters=--plugin-load-add=ha_spider +--source include/restart_mysqld.inc diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index 5eb97141ffd8c..812a5178036bb 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -9116,6 +9116,8 @@ void *spider_table_bg_sts_action( thd->mysys_var->current_cond = &thread->cond; thd->mysys_var->current_mutex = &thread->mutex; } + bool spd_wsrep_on = thd->variables.wsrep_on; + thd->variables.wsrep_on = false; while (spider_init_queries[i].length && !thd->killed && !thread->killed && thread->init_command) { @@ -9129,6 +9131,7 @@ void *spider_table_bg_sts_action( } ++i; } + thd->variables.wsrep_on = spd_wsrep_on; thd->mysys_var->current_cond = &thread->cond; thd->mysys_var->current_mutex = &thread->mutex; thd->client_capabilities -= CLIENT_MULTI_RESULTS; diff --git a/strings/conf_to_src.c b/strings/conf_to_src.c index 39ceee53ef9bb..692effbd3f466 100644 --- a/strings/conf_to_src.c +++ b/strings/conf_to_src.c @@ -471,7 +471,7 @@ main(int argc, char **argv __attribute__((unused))) bzero((void*)&all_charsets,sizeof(all_charsets)); bzero((void*) refids, sizeof(refids)); - sprintf(filename,"%s/%s",argv[1],"Index.xml"); + snprintf(filename,sizeof(filename),"%s/%s",argv[1],"Index.xml"); my_read_charset_file(filename); for (cs= all_charsets; @@ -482,7 +482,8 @@ main(int argc, char **argv __attribute__((unused))) { if ( (!simple_cs_is_full(cs)) && (cs->cs_name.str)) { - sprintf(filename,"%s/%s.xml",argv[1], cs->cs_name.str); + snprintf(filename, sizeof filename, "%s/%.*s.xml", + argv[1], cs->csname.length, cs->csname.str); my_read_charset_file(filename); } cs->state|= MY_CS_LOADED; diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c index e934453f8db50..a6c3fb6d0eaa8 100644 --- a/strings/ctype-bin.c +++ b/strings/ctype-bin.c @@ -132,7 +132,8 @@ static int my_strnncollsp_binary(CHARSET_INFO * cs __attribute__((unused)), static int my_strnncollsp_nchars_binary(CHARSET_INFO * cs __attribute__((unused)), const uchar *s, size_t slen, const uchar *t, size_t tlen, - size_t nchars) + size_t nchars, + uint flags) { set_if_smaller(slen, nchars); set_if_smaller(tlen, nchars); @@ -217,7 +218,8 @@ static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)), static int my_strnncollsp_nchars_8bit_bin(CHARSET_INFO * cs, const uchar *a, size_t a_length, const uchar *b, size_t b_length, - size_t nchars) + size_t nchars, + uint flags) { set_if_smaller(a_length, nchars); set_if_smaller(b_length, nchars); diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c index 42372e3b13cbf..16ab99b31de9b 100644 --- a/strings/ctype-simple.c +++ b/strings/ctype-simple.c @@ -214,7 +214,8 @@ static int my_strnncollsp_nchars_simple(CHARSET_INFO * cs, const uchar *a, size_t a_length, const uchar *b, size_t b_length, - size_t nchars) + size_t nchars, + uint flags) { set_if_smaller(a_length, nchars); set_if_smaller(b_length, nchars); diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c index 3ae5240f9bca4..7ca33807fe4ee 100644 --- a/strings/ctype-tis620.c +++ b/strings/ctype-tis620.c @@ -593,7 +593,8 @@ static int my_strnncollsp_nchars_tis620(CHARSET_INFO * cs, const uchar *a, size_t a_length, const uchar *b, size_t b_length, - size_t nchars) + size_t nchars, + uint flags) { set_if_smaller(a_length, nchars); set_if_smaller(b_length, nchars); diff --git a/strings/ctype-uca.inl b/strings/ctype-uca.inl index 9b8c271ab50c1..6aa72847c81c7 100644 --- a/strings/ctype-uca.inl +++ b/strings/ctype-uca.inl @@ -398,6 +398,7 @@ static inline weight_and_nchars_t MY_FUNCTION_NAME(scanner_next_pad_trim)(my_uca_scanner *scanner, my_uca_scanner_param *param, size_t nchars, + uint flags, uint *generated) { weight_and_nchars_t res; @@ -411,7 +412,10 @@ MY_FUNCTION_NAME(scanner_next_pad_trim)(my_uca_scanner *scanner, We reached the end of the string, but the caller wants more weights. Perform space padding. */ - res.weight= my_space_weight(param->level); + res.weight= + flags & MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES ? + my_space_weight(param->level) : 0; + res.nchars= 1; (*generated)++; } @@ -448,7 +452,8 @@ MY_FUNCTION_NAME(strnncollsp_nchars_onelevel)(CHARSET_INFO *cs, const MY_UCA_WEIGHT_LEVEL *level, const uchar *s, size_t slen, const uchar *t, size_t tlen, - size_t nchars) + size_t nchars, + uint flags) { my_uca_scanner sscanner; my_uca_scanner tscanner; @@ -481,16 +486,17 @@ TODO: strnncollsp_nchars_onelevel s_res= MY_FUNCTION_NAME(scanner_next_pad_trim)(&sscanner, ¶m, s_nchars_left, - &generated); + flags, &generated); t_res= MY_FUNCTION_NAME(scanner_next_pad_trim)(&tscanner, ¶m, t_nchars_left, - &generated); + flags, &generated); if ((diff= (s_res.weight - t_res.weight))) return diff; if (generated == 2) { - if (cs->state & MY_CS_NOPAD) + if ((cs->state & MY_CS_NOPAD) && + (flags & MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES)) { /* Both values are auto-generated. There's no real data any more. @@ -542,11 +548,12 @@ static int MY_FUNCTION_NAME(strnncollsp_nchars)(CHARSET_INFO *cs, const uchar *s, size_t slen, const uchar *t, size_t tlen, - size_t nchars) + size_t nchars, + uint flags) { return MY_FUNCTION_NAME(strnncollsp_nchars_onelevel)(cs, &cs->uca->level[0], s, slen, t, tlen, - nchars); + nchars, flags); } @@ -557,7 +564,8 @@ static int MY_FUNCTION_NAME(strnncollsp_nchars_multilevel)(CHARSET_INFO *cs, const uchar *s, size_t slen, const uchar *t, size_t tlen, - size_t nchars) + size_t nchars, + uint flags) { uint i, level_flags= cs->levels_for_order; for (i= 0; level_flags; i++, level_flags>>= 1) @@ -569,7 +577,7 @@ MY_FUNCTION_NAME(strnncollsp_nchars_multilevel)(CHARSET_INFO *cs, &cs->uca->level[i], s, slen, t, tlen, - nchars); + nchars, flags); if (ret) return ret; } diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index 5c41380cbc969..4b29d65673116 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -3033,6 +3033,14 @@ static inline int my_weight_mb2_ucs2_general_ci(uchar b0, uchar b1) } +static inline int my_weight_mb2_ucs2_general_mysql500_ci(uchar b0, uchar b1) +{ + my_wc_t wc= UCS2_CODE(b0, b1); + MY_UNICASE_CHARACTER *page= my_unicase_mysql500_pages[wc >> 8]; + return (int) (page ? page[wc & 0xFF].sort : wc); +} + + #define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_general_ci #define DEFINE_STRNXFRM_UNICODE #define DEFINE_STRNXFRM_UNICODE_NOPAD @@ -3046,6 +3054,18 @@ static inline int my_weight_mb2_ucs2_general_ci(uchar b0, uchar b1) #include "strcoll.inl" +#define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_general_mysql500_ci +#define DEFINE_STRNXFRM_UNICODE +#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_ucs2_quick(pwc, s, e) +#define OPTIMIZE_ASCII 0 +#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR +#define UNICASE_PAGE0 my_unicase_mysql500_page00 +#define UNICASE_PAGES my_unicase_mysql500_pages +#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x)) +#define WEIGHT_MB2(b0,b1) my_weight_mb2_ucs2_general_mysql500_ci(b0,b1) +#include "strcoll.inl" + + #define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_bin #define DEFINE_STRNXFRM_UNICODE_BIN2 #define MY_MB_WC(cs, pwc, s, e) my_mb_wc_ucs2_quick(pwc, s, e) @@ -3335,6 +3355,27 @@ static MY_COLLATION_HANDLER my_collation_ucs2_general_ci_handler = }; +static MY_COLLATION_HANDLER my_collation_ucs2_general_mysql500_ci_handler = +{ + NULL, /* init */ + my_strnncoll_ucs2_general_mysql500_ci, + my_strnncollsp_ucs2_general_mysql500_ci, + my_strnncollsp_nchars_ucs2_general_mysql500_ci, + my_strnxfrm_ucs2_general_mysql500_ci, + my_strnxfrmlen_unicode, + my_like_range_generic, + my_wildcmp_ucs2_ci, + my_strcasecmp_mb2_or_mb4, + my_instr_mb, + my_hash_sort_ucs2, + my_propagate_simple, + my_min_str_mb_simple, + my_max_str_mb_simple, + my_ci_get_id_generic, + my_ci_get_collation_name_generic +}; + + static MY_COLLATION_HANDLER my_collation_ucs2_bin_handler = { NULL, /* init */ @@ -3494,7 +3535,7 @@ struct charset_info_st my_charset_ucs2_general_mysql500_ci= 0, /* escape_with_backslash_is_dangerous */ MY_CS_COLL_LEVELS_S1, &my_charset_ucs2_handler, - &my_collation_ucs2_general_ci_handler + &my_collation_ucs2_general_mysql500_ci_handler }; diff --git a/strings/ctype-unidata.h b/strings/ctype-unidata.h index 6712f5e1d79a9..9900fd0ceddda 100644 --- a/strings/ctype-unidata.h +++ b/strings/ctype-unidata.h @@ -21,6 +21,9 @@ extern MY_UNICASE_CHARACTER my_unicase_default_page00[256]; extern MY_UNICASE_CHARACTER *my_unicase_default_pages[256]; +extern MY_UNICASE_CHARACTER my_unicase_mysql500_page00[256]; +extern MY_UNICASE_CHARACTER *my_unicase_mysql500_pages[256]; + size_t my_strxfrm_pad_nweights_unicode(uchar *str, uchar *strend, size_t nweights); size_t my_strxfrm_pad_unicode(uchar *str, uchar *strend); diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 1238f8490782f..06a10034a33a8 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -249,7 +249,7 @@ MY_UNICASE_CHARACTER my_unicase_default_page00[]={ Almost similar to my_unicase_default_page00, but maps sorting order for U+00DF to 0x00DF instead of 0x0053. */ -static MY_UNICASE_CHARACTER plane00_mysql500[]={ +MY_UNICASE_CHARACTER my_unicase_mysql500_page00[]={ {0x0000,0x0000,0x0000}, {0x0001,0x0001,0x0001}, {0x0002,0x0002,0x0002}, {0x0003,0x0003,0x0003}, {0x0004,0x0004,0x0004}, {0x0005,0x0005,0x0005}, @@ -2002,8 +2002,8 @@ MY_UNICASE_INFO my_unicase_default= /* Reproduce old utf8mb3_general_ci behaviour before we fixed Bug#27877. */ -MY_UNICASE_CHARACTER *my_unicase_pages_mysql500[256]={ - plane00_mysql500, +MY_UNICASE_CHARACTER *my_unicase_mysql500_pages[256]={ + my_unicase_mysql500_page00, plane01, plane02, plane03, plane04, plane05, plane06, plane07, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -2043,7 +2043,7 @@ MY_UNICASE_CHARACTER *my_unicase_pages_mysql500[256]={ MY_UNICASE_INFO my_unicase_mysql500= { 0xFFFF, - my_unicase_pages_mysql500 + my_unicase_mysql500_pages }; @@ -5560,14 +5560,14 @@ static inline int my_weight_mb3_utf8mb3_general_ci(uchar b0, uchar b1, uchar b2) static inline int my_weight_mb1_utf8mb3_general_mysql500_ci(uchar b) { - return (int) plane00_mysql500[b & 0xFF].sort; + return (int) my_unicase_mysql500_page00[b & 0xFF].sort; } static inline int my_weight_mb2_utf8mb3_general_mysql500_ci(uchar b0, uchar b1) { my_wc_t wc= UTF8MB2_CODE(b0, b1); - MY_UNICASE_CHARACTER *page= my_unicase_pages_mysql500[wc >> 8]; + MY_UNICASE_CHARACTER *page= my_unicase_mysql500_pages[wc >> 8]; /* `page` should never be NULL for 2-byte utf8 characters. See comments in my_weight_mb2_utf8mb3_general_ci(). @@ -5580,7 +5580,7 @@ static inline int my_weight_mb3_utf8mb3_general_mysql500_ci(uchar b0, uchar b1, uchar b2) { my_wc_t wc= UTF8MB3_CODE(b0, b1, b2); - MY_UNICASE_CHARACTER *page= my_unicase_pages_mysql500[wc >> 8]; + MY_UNICASE_CHARACTER *page= my_unicase_mysql500_pages[wc >> 8]; return (int) (page ? page[wc & 0xFF].sort : wc); } @@ -5590,8 +5590,8 @@ my_weight_mb3_utf8mb3_general_mysql500_ci(uchar b0, uchar b1, uchar b2) #define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf8mb3_quick(pwc, s, e) #define OPTIMIZE_ASCII 1 #define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR -#define UNICASE_PAGE0 plane00_mysql500 -#define UNICASE_PAGES my_unicase_pages_mysql500 +#define UNICASE_PAGE0 my_unicase_mysql500_page00 +#define UNICASE_PAGES my_unicase_mysql500_pages #define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x)) #define WEIGHT_MB1(x) my_weight_mb1_utf8mb3_general_mysql500_ci(x) #define WEIGHT_MB2(x,y) my_weight_mb2_utf8mb3_general_mysql500_ci(x,y) diff --git a/strings/ctype.c b/strings/ctype.c index 7ff7e36ce2a02..3d13fe55be8f8 100644 --- a/strings/ctype.c +++ b/strings/ctype.c @@ -1360,7 +1360,8 @@ my_convert_fix(CHARSET_INFO *to_cs, char *to, size_t to_length, int my_strnncollsp_nchars_generic(CHARSET_INFO *cs, const uchar *str1, size_t len1, const uchar *str2, size_t len2, - size_t nchars) + size_t nchars, + uint flags) { int error; len1= my_well_formed_length(cs, (const char *) str1, @@ -1377,7 +1378,8 @@ int my_strnncollsp_nchars_generic(CHARSET_INFO *cs, int my_strnncollsp_nchars_generic_8bit(CHARSET_INFO *cs, const uchar *str1, size_t len1, const uchar *str2, size_t len2, - size_t nchars) + size_t nchars, + uint flags) { set_if_smaller(len1, nchars); set_if_smaller(len2, nchars); diff --git a/strings/strcoll.inl b/strings/strcoll.inl index eb5c6e3c717cf..ce220bebadbc3 100644 --- a/strings/strcoll.inl +++ b/strings/strcoll.inl @@ -390,7 +390,8 @@ static int MY_FUNCTION_NAME(strnncollsp_nchars)(CHARSET_INFO *cs __attribute__((unused)), const uchar *a, size_t a_length, const uchar *b, size_t b_length, - size_t nchars) + size_t nchars, + uint flags) { const uchar *a_end= a + a_length; const uchar *b_end= b + b_length; diff --git a/strings/strings_def.h b/strings/strings_def.h index c919861d8aae9..39fdd1780a4ed 100644 --- a/strings/strings_def.h +++ b/strings/strings_def.h @@ -108,12 +108,14 @@ static inline const uchar *skip_trailing_space(const uchar *ptr,size_t len) int my_strnncollsp_nchars_generic(CHARSET_INFO *cs, const uchar *str1, size_t len1, const uchar *str2, size_t len2, - size_t nchars); + size_t nchars, + uint flags); int my_strnncollsp_nchars_generic_8bit(CHARSET_INFO *cs, const uchar *str1, size_t len1, const uchar *str2, size_t len2, - size_t nchars); + size_t nchars, + uint flags); uint my_8bit_charset_flags_from_data(CHARSET_INFO *cs); uint my_8bit_collation_flags_from_data(CHARSET_INFO *cs); diff --git a/strings/uctypedump.c b/strings/uctypedump.c index 30ae33afee15b..397b6e586f1ac 100644 --- a/strings/uctypedump.c +++ b/strings/uctypedump.c @@ -120,7 +120,7 @@ int main(int ac, char ** av) } else { - strcpy(tok,s); + safe_strcpy(tok, sizeof(tok), s); } end=tok+strlen(tok); @@ -225,7 +225,7 @@ int main(int ac, char ** av) { char plane_name[128]="NULL"; if(uctype[plane].ctype){ - sprintf(plane_name,"uctype_page%02X",(uint) plane); + snprintf(plane_name,sizeof(plane_name),"uctype_page%02X",(uint) plane); } printf("\t{%d,%s}%s\n",uctype[plane].pctype,plane_name,plane<255?",":""); } diff --git a/strings/xml.c b/strings/xml.c index d16df34bf30dc..7260ecadc6639 100644 --- a/strings/xml.c +++ b/strings/xml.c @@ -304,10 +304,10 @@ static int my_xml_leave(MY_XML_PARSER *p, const char *str, size_t slen) if (glen) { mstr(g, tag, sizeof(g)-1, glen); - sprintf(p->errstr,"'' unexpected ('' wanted)",s,g); + snprintf(p->errstr,sizeof(p->errstr),"'' unexpected ('' wanted)",s,g); } else - sprintf(p->errstr,"'' unexpected (END-OF-INPUT wanted)", s); + snprintf(p->errstr,sizeof(p->errstr),"'' unexpected (END-OF-INPUT wanted)", s); return MY_XML_ERROR; } @@ -362,7 +362,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) { if (MY_XML_IDENT != (lex=my_xml_scan(p,&a))) { - sprintf(p->errstr,"%s unexpected (ident wanted)",lex2str(lex)); + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected (ident wanted)",lex2str(lex)); return MY_XML_ERROR; } if (MY_XML_OK != my_xml_leave(p,a.beg,(size_t) (a.end-a.beg))) @@ -390,7 +390,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) } else { - sprintf(p->errstr,"%s unexpected (ident or '/' wanted)", + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected (ident or '/' wanted)", lex2str(lex)); return MY_XML_ERROR; } @@ -412,7 +412,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) } else { - sprintf(p->errstr,"%s unexpected (ident or string wanted)", + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected (ident or string wanted)", lex2str(lex)); return MY_XML_ERROR; } @@ -449,7 +449,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) { if (lex != MY_XML_QUESTION) { - sprintf(p->errstr,"%s unexpected ('?' wanted)",lex2str(lex)); + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected ('?' wanted)",lex2str(lex)); return MY_XML_ERROR; } if (MY_XML_OK != my_xml_leave(p,NULL,0)) @@ -465,7 +465,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) if (lex != MY_XML_GT) { - sprintf(p->errstr,"%s unexpected ('>' wanted)",lex2str(lex)); + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected ('>' wanted)",lex2str(lex)); return MY_XML_ERROR; } } @@ -486,7 +486,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) if (p->attr.start[0]) { - sprintf(p->errstr,"unexpected END-OF-INPUT"); + snprintf(p->errstr,sizeof(p->errstr),"unexpected END-OF-INPUT"); return MY_XML_ERROR; } return MY_XML_OK; diff --git a/tpool/tpool_structs.h b/tpool/tpool_structs.h index b49204f2d75ac..550a92d6e583c 100644 --- a/tpool/tpool_structs.h +++ b/tpool/tpool_structs.h @@ -14,14 +14,13 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ #pragma once +#include +#include #include #include -#include -#include #include #include - /* Suppress TSAN warnings, that we believe are not critical. */ #if defined(__has_feature) #define TPOOL_HAS_FEATURE(...) __has_feature(__VA_ARGS__) @@ -37,6 +36,11 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ #define TPOOL_SUPPRESS_TSAN #endif +#ifdef HAVE_PSI_INTERFACE +typedef unsigned int mysql_pfs_key_t; +extern mysql_pfs_key_t tpool_cache_mutex_key; +#endif + namespace tpool { @@ -55,13 +59,13 @@ namespace tpool template class cache { /** Protects updates of m_pos and m_cache members */ - std::mutex m_mtx; + mysql_mutex_t m_mtx; /** Notify waiting threads about "cache full" or "cache not empty" conditions @see get() and wait() */ - std::condition_variable m_cv; + pthread_cond_t m_cv; /** Cached items vector.Does not change after construction */ std::vector m_base; @@ -108,13 +112,22 @@ template class cache Constructor @param size - maximum number of items in cache */ - cache(size_t size) : m_mtx(), m_cv(), m_base(size), m_cache(size), + cache(size_t size) : m_base(size), m_cache(size), m_waiters(), m_pos(0) { + mysql_mutex_init(tpool_cache_mutex_key, &m_mtx, nullptr); + pthread_cond_init(&m_cv, nullptr); + for(size_t i= 0 ; i < size; i++) m_cache[i]= &m_base[i]; } + ~cache() + { + mysql_mutex_destroy(&m_mtx); + pthread_cond_destroy(&m_cv); + } + /** Retrieve an item from cache. Waits for free item, if cache is currently empty. @@ -122,21 +135,25 @@ template class cache */ T* get() { - std::unique_lock lk(m_mtx); - while(is_empty()) - m_cv.wait(lk); + mysql_mutex_lock(&m_mtx); + while (is_empty()) + my_cond_wait(&m_cv, &m_mtx.m_mutex); assert(m_pos < capacity()); // return last element - return m_cache[m_pos++]; + T *t= m_cache[m_pos++]; + mysql_mutex_unlock(&m_mtx); + return t; } - /** - Put back an item to cache. - @param item - item to put back + mysql_mutex_t &mutex() { return m_mtx; } + + /** + Put back an element to cache. + @param ele element to put back */ void put(T *ele) { - std::unique_lock lk(m_mtx); + mysql_mutex_lock(&m_mtx); assert(!is_full()); // put element to the logical end of the array m_cache[--m_pos] = ele; @@ -144,7 +161,8 @@ template class cache /* Notify waiters when the cache becomes not empty, or when it becomes full */ if (m_pos == 1 || (m_waiters && is_full())) - m_cv.notify_all(); + pthread_cond_broadcast(&m_cv); + mysql_mutex_unlock(&m_mtx); } /** Check if pointer represents cached element */ @@ -154,16 +172,25 @@ template class cache return ele >= &m_base[0] && ele <= &m_base[capacity() - 1]; } - /** Wait until cache is full.*/ - void wait() + /** Wait until cache is full + @param m cache mutex (locked) */ + void wait(mysql_mutex_t &m) { - std::unique_lock lk(m_mtx); + mysql_mutex_assert_owner(&m); m_waiters++; - while(!is_full()) - m_cv.wait(lk); + while (!is_full()) + my_cond_wait(&m_cv, &m.m_mutex); m_waiters--; } + /* Wait until cache is full.*/ + void wait() + { + mysql_mutex_lock(&m_mtx); + wait(m_mtx); + mysql_mutex_unlock(&m_mtx); + } + /** @return approximate number of "borrowed" items. A "dirty" read, not used in any critical functionality. diff --git a/unittest/strings/strings-t.c b/unittest/strings/strings-t.c index ac09e116a6561..be5fc085ef0ac 100644 --- a/unittest/strings/strings-t.c +++ b/unittest/strings/strings-t.c @@ -1042,9 +1042,14 @@ typedef struct LEX_CSTRING a; LEX_CSTRING b; size_t nchars; + uint flags; int res; } STRNNCOLLSP_CHAR_PARAM; +#undef TCHAR +#define TCHAR MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES + +#define TVCHAR 0 /* Some lines in the below test data are marked as follows: @@ -1066,266 +1071,273 @@ typedef struct */ static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_mbminlen1_xpad_common[]= { - {{CSTR("a")}, {CSTR("a")}, 0, 0}, - {{CSTR("a")}, {CSTR("a")}, 1, 0}, - {{CSTR("a")}, {CSTR("a")}, 2, 0}, - {{CSTR("a")}, {CSTR("a")}, 3, 0}, - {{CSTR("a")}, {CSTR("a")}, 100, 0}, - - {{CSTR("a")}, {CSTR("ab")}, 0, 0}, - {{CSTR("a")}, {CSTR("ab")}, 1, 0}, - {{CSTR("a")}, {CSTR("ab")}, 2, -1}, - {{CSTR("a")}, {CSTR("ab")}, 3, -1}, - {{CSTR("a")}, {CSTR("ab")}, 100, -1}, - - {{CSTR("a")}, {CSTR("a ")}, 0, 0}, - {{CSTR("a")}, {CSTR("a ")}, 1, 0}, - {{CSTR("a")}, {CSTR("a ")}, 2, 0}, - {{CSTR("a")}, {CSTR("a ")}, 3, 0}, - {{CSTR("a")}, {CSTR("a ")}, 100, 0}, - - {{CSTR("a")}, {CSTR("a ")}, 0, 0}, - {{CSTR("a")}, {CSTR("a ")}, 1, 0}, - {{CSTR("a")}, {CSTR("a ")}, 2, 0}, - {{CSTR("a")}, {CSTR("a ")}, 3, 0}, - {{CSTR("a")}, {CSTR("a ")}, 100, 0}, - - {{CSTR("ss")}, {CSTR("ss")}, 0, 0}, - {{CSTR("ss")}, {CSTR("ss")}, 1, 0}, - {{CSTR("ss")}, {CSTR("ss")}, 2, 0}, - {{CSTR("ss")}, {CSTR("ss")}, 3, 0}, - {{CSTR("ss")}, {CSTR("ss")}, 100, 0}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR("a")}, {CSTR("a")}, 0, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a")}, 1, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a")}, 2, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a")}, 3, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a")}, 100, TCHAR, 0}, + + {{CSTR("a")}, {CSTR("ab")}, 0, TCHAR, 0}, + {{CSTR("a")}, {CSTR("ab")}, 1, TCHAR, 0}, + {{CSTR("a")}, {CSTR("ab")}, 2, TCHAR, -1}, + {{CSTR("a")}, {CSTR("ab")}, 3, TCHAR, -1}, + {{CSTR("a")}, {CSTR("ab")}, 100, TCHAR, -1}, + + {{CSTR("a")}, {CSTR("a ")}, 0, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 1, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 2, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 3, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 100, TCHAR, 0}, + + {{CSTR("a")}, {CSTR("a ")}, 0, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 1, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 2, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 3, TCHAR, 0}, + {{CSTR("a")}, {CSTR("a ")}, 100, TCHAR, 0}, + + {{CSTR("ss")}, {CSTR("ss")}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("ss")}, 1, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("ss")}, 2, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("ss")}, 3, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("ss")}, 100, TCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; /* Tests for utf8, for both PAD SPACE and NOPAD collations */ static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_utf8mbx_xpad_common[]= { - {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 0, 0}, - {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 1, 0}, - {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 2, 0}, - {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 3, 0}, - {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 100, 0}, + {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 0, TCHAR, 0}, + {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 1, TCHAR, 0}, + {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 2, TCHAR, 0}, + {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 3, TCHAR, 0}, + {{CSTR(UTF8_sz)}, {CSTR(UTF8_sz)}, 100, TCHAR, 0}, - {{NULL, 0}, {NULL, 0}, 0, 0} + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; /* Tests for latin1, for both PAD and NOPAD collations */ static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_latin1_xpad_common[]= { - {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 0, 0}, - {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 1, 0}, - {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 2, 0}, - {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 3, 0}, - {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 100, 0}, + {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 0, TCHAR, 0}, + {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 1, TCHAR, 0}, + {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 2, TCHAR, 0}, + {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 3, TCHAR, 0}, + {{CSTR(LATIN1_sz)}, {CSTR(LATIN1_sz)}, 100, TCHAR, 0}, - {{NULL, 0}, {NULL, 0}, 0, 0} + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; /* Tests for utf8 collations that sort "A WITH DIAERESIS" equal to "A" */ static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_utf8mbx_xpad_a_eq_auml[]= { - {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 0, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 1, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 2, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 3, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 100, 0}, - - {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 0, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 1, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 2, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 3, 0}, - {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 100, 0}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 0, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 1, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 2, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 3, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah")}, 100, TCHAR, 0}, + + {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 0, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 1, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 2, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 3, TCHAR, 0}, + {{CSTR(UTF8_auml "h")}, {CSTR("ah ")}, 100, TCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_utf8mb3_unicode_ci[]= { - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 0, 0}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 1, 0}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}/*IF*/, 2, 1}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 3, 0}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 4, 0}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 100, 0}, - - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, -1}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 4, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, 0}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 1, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}/*IF*/, 2, TCHAR, 1}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 3, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 4, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 100, TCHAR, 0}, + + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 4, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, TCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_utf8mb3_unicode_nopad_ci[]= { - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 0, 0}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 1, 0}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}/*IF*/, 2, 1}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 3, 1}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 4, 1}, - {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 100, 1}, - - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, -1}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, -1}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, -1}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 4, -1}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, -1}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 1, TCHAR, 0}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}/*IF*/, 2, TCHAR, 1}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 3, TCHAR, 1}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 4, TCHAR, 1}, + {{CSTR("ss")}, {CSTR("s" "\x00" "s")}, 100, TCHAR, 1}, + + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 4, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, TCHAR, -1}, + + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, TVCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, TVCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, TVCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, TVCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 4, TVCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, TVCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_utf8mb3_danish_ci[]= { - {{CSTR("aa")}, {CSTR("")}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR("")}, 1, 1}, - {{CSTR("aa")}, {CSTR("")}, 2, 1}, - {{CSTR("aa")}, {CSTR("")}, 3, 1}, - {{CSTR("aa")}, {CSTR("")}, 100, 1}, - - {{CSTR("aa")}, {CSTR("a")}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR("a")}, 1, 0}, - {{CSTR("aa")}, {CSTR("a")}, 2, 1}, - {{CSTR("aa")}, {CSTR("a")}, 3, 1}, - {{CSTR("aa")}, {CSTR("a")}, 100, 1}, - - {{CSTR("aa")}, {CSTR("aa")}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR("aa")}/*CF*/, 1, 0}, - {{CSTR("aa")}, {CSTR("aa")}, 2, 0}, - {{CSTR("aa")}, {CSTR("aa")}, 3, 0}, - {{CSTR("aa")}, {CSTR("aa")}, 100, 0}, - - {{CSTR("aa")}, {CSTR("\x00" "a")}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR("\x00" "a")}/*IF*/, 1, 1}, - {{CSTR("aa")}, {CSTR("\x00" "a")}, 2, 1}, - {{CSTR("aa")}, {CSTR("\x00" "a")}, 3, 1}, - {{CSTR("aa")}, {CSTR("\x00" "a")}, 100, 1}, - - {{CSTR("aa")}, {CSTR("\x00" "aa")}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR("\x00" "aa")}/*IF*/, 1, 1}, - {{CSTR("aa")}, {CSTR("\x00" "aa")}/*IF*/, 2, 1}, - {{CSTR("aa")}, {CSTR("\x00" "aa")}, 3, 0}, - {{CSTR("aa")}, {CSTR("\x00" "aa")}, 100, 0}, - - {{CSTR("aa")}, {CSTR("a" "\x00" "a")}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR("a" "\x00" "a")}, 1, 0}, - {{CSTR("aa")}, {CSTR("a" "\x00" "a")}/*IF*/, 2, 1}, - {{CSTR("aa")}, {CSTR("a" "\x00" "a")}, 3, 1}, - {{CSTR("aa")}, {CSTR("a" "\x00" "a")}, 100, 1}, - - {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 0, 0}, - {{CSTR("aa")}/*CF*/, {CSTR(UTF8_ARING)}, 1, -1}, - {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 2, 0}, - {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 3, 0}, - {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 100, 0}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR("aa")}, {CSTR("")}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR("")}, 1, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("")}, 2, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("")}, 3, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("")}, 100, TCHAR, 1}, + + {{CSTR("aa")}, {CSTR("a")}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR("a")}, 1, TCHAR, 0}, + {{CSTR("aa")}, {CSTR("a")}, 2, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("a")}, 3, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("a")}, 100, TCHAR, 1}, + + {{CSTR("aa")}, {CSTR("aa")}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR("aa")}/*CF*/, 1, TCHAR, 0}, + {{CSTR("aa")}, {CSTR("aa")}, 2, TCHAR, 0}, + {{CSTR("aa")}, {CSTR("aa")}, 3, TCHAR, 0}, + {{CSTR("aa")}, {CSTR("aa")}, 100, TCHAR, 0}, + + {{CSTR("aa")}, {CSTR("\x00" "a")}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR("\x00" "a")}/*IF*/, 1, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("\x00" "a")}, 2, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("\x00" "a")}, 3, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("\x00" "a")}, 100, TCHAR, 1}, + + {{CSTR("aa")}, {CSTR("\x00" "aa")}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR("\x00" "aa")}/*IF*/, 1, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("\x00" "aa")}/*IF*/, 2, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("\x00" "aa")}, 3, TCHAR, 0}, + {{CSTR("aa")}, {CSTR("\x00" "aa")}, 100, TCHAR, 0}, + + {{CSTR("aa")}, {CSTR("a" "\x00" "a")}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR("a" "\x00" "a")}, 1, TCHAR, 0}, + {{CSTR("aa")}, {CSTR("a" "\x00" "a")}/*IF*/, 2, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("a" "\x00" "a")}, 3, TCHAR, 1}, + {{CSTR("aa")}, {CSTR("a" "\x00" "a")}, 100, TCHAR, 1}, + + {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 0, TCHAR, 0}, + {{CSTR("aa")}/*CF*/, {CSTR(UTF8_ARING)}, 1, TCHAR, -1}, + {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 2, TCHAR, 0}, + {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 3, TCHAR, 0}, + {{CSTR("aa")}, {CSTR(UTF8_ARING)}, 100, TCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_latin1_german2_ci[]= { - {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 0, 0}, - {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 1, -1}, - {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 2, 0}, - {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 3, 0}, - {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 100, 0}, - - {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 0, 0}, - {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 1, -1}, - {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 2, 0}, - {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 3, 0}, - {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 100, 0}, - - {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 0, 0}, - {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 1, -1}, - {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 2, 0}, - {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 3, 0}, - {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 100, 0}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 1, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 2, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 3, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(LATIN1_sz)}, 100, TCHAR, 0}, + + {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 0, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 1, TCHAR, -1}, + {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 2, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 3, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(LATIN1_auml)}, 100, TCHAR, 0}, + + {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 0, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 1, TCHAR, -1}, + {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 2, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 3, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(LATIN1_auml " ")}, 100, TCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_utf8mbx_german2_ci[]= { - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, -1}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, 0}, - {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, 0}, - - {{CSTR("ae")}, {CSTR(UTF8_auml)}, 0, 0}, - {{CSTR("ae")}, {CSTR(UTF8_auml)}, 1, -1}, - {{CSTR("ae")}, {CSTR(UTF8_auml)}, 2, 0}, - {{CSTR("ae")}, {CSTR(UTF8_auml)}, 3, 0}, - {{CSTR("ae")}, {CSTR(UTF8_auml)}, 100, 0}, - - {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 0, 0}, - {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 1, -1}, - {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 2, 0}, - {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 3, 0}, - {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 100, 0}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 0, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 1, TCHAR, -1}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 2, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 3, TCHAR, 0}, + {{CSTR("ss")}, {CSTR(UTF8_sz)}, 100, TCHAR, 0}, + + {{CSTR("ae")}, {CSTR(UTF8_auml)}, 0, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(UTF8_auml)}, 1, TCHAR, -1}, + {{CSTR("ae")}, {CSTR(UTF8_auml)}, 2, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(UTF8_auml)}, 3, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(UTF8_auml)}, 100, TCHAR, 0}, + + {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 0, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 1, TCHAR, -1}, + {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 2, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 3, TCHAR, 0}, + {{CSTR("ae")}, {CSTR(UTF8_auml " ")}, 100, TCHAR, 0}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_mbminlen1_xpad_czech[]= { - {{CSTR("c")}, {CSTR("ch")}, 0, 0}, - {{CSTR("c")}, {CSTR("ch")}, 1, 0}, - {{CSTR("c")}, {CSTR("ch")}, 2, -1}, + {{CSTR("c")}, {CSTR("ch")}, 0, TCHAR, 0}, + {{CSTR("c")}, {CSTR("ch")}, 1, TCHAR, 0}, + {{CSTR("c")}, {CSTR("ch")}, 2, TCHAR, -1}, - {{CSTR("h")}, {CSTR("ch")}, 0, 0}, - {{CSTR("h")}, {CSTR("ch")}, 1, 1}, - {{CSTR("h")}, {CSTR("ch")}, 2, -1}, + {{CSTR("h")}, {CSTR("ch")}, 0, TCHAR, 0}, + {{CSTR("h")}, {CSTR("ch")}, 1, TCHAR, 1}, + {{CSTR("h")}, {CSTR("ch")}, 2, TCHAR, -1}, - {{CSTR("i")}, {CSTR("ch")}, 0, 0}, - {{CSTR("i")}, {CSTR("ch")}, 1, 1}, - {{CSTR("i")}, {CSTR("ch")}, 2, 1}, + {{CSTR("i")}, {CSTR("ch")}, 0, TCHAR, 0}, + {{CSTR("i")}, {CSTR("ch")}, 1, TCHAR, 1}, + {{CSTR("i")}, {CSTR("ch")}, 2, TCHAR, 1}, - {{NULL, 0}, {NULL, 0}, 0, 0} + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; static STRNNCOLLSP_CHAR_PARAM strnncollsp_char_mbminlen2_xpad_common[]= { - {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 0, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 1, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 2, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 3, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 100, 0}, - - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 0, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 1, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 2, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 3, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 100, 0}, - - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 0, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 1, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 2, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 3, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 100, 0}, - - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 0, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 1, 0}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 2, -1}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 3, -1}, - {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 100, -1}, - - {{NULL, 0}, {NULL, 0}, 0, 0} + {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 0, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 1, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 2, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 3, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a)}, 100, TCHAR, 0}, + + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 0, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 1, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 2, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 3, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp)}, 100, TCHAR, 0}, + + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 0, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 1, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 2, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 3, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_sp UCS2_sp)}, 100, TCHAR, 0}, + + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 0, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 1, TCHAR, 0}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 2, TCHAR, -1}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 3, TCHAR, -1}, + {{CSTR(UCS2_a)}, {CSTR(UCS2_a UCS2_b)}, 100, TCHAR, -1}, + + {{NULL, 0}, {NULL, 0}, 0, 0, 0} }; @@ -1337,7 +1349,7 @@ strnncollsp_char_one(CHARSET_INFO *cs, const STRNNCOLLSP_CHAR_PARAM *p) int res= cs->coll->strnncollsp_nchars(cs, (uchar *) p->a.str, p->a.length, (uchar *) p->b.str, p->b.length, - p->nchars); + p->nchars, p->flags); str2hex(ahex, sizeof(ahex), p->a.str, p->a.length); str2hex(bhex, sizeof(bhex), p->b.str, p->b.length); diag("%-25s %-12s %-12s %3d %7d %7d%s", @@ -1353,7 +1365,7 @@ strnncollsp_char_one(CHARSET_INFO *cs, const STRNNCOLLSP_CHAR_PARAM *p) res= cs->coll->strnncollsp_nchars(cs, (uchar *) p->b.str, p->b.length, (uchar *) p->a.str, p->a.length, - p->nchars); + p->nchars, p->flags); if (!eqres(res, -p->res)) { diag("Comparison in reverse order failed. Expected %d, got %d",