From 34e8585437abc862f31aeb1a4022ef278a4af6f0 Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Wed, 16 Aug 2023 11:57:34 +0200 Subject: [PATCH 001/165] MDEV-29974: Missed kill waiting for worker queues to drain When the SQL driver thread goes to wait for room in the parallel slave worker queue, there was a race where a kill at the right moment could be ignored and the wait proceed uninterrupted by the kill. Fix by moving the THD::check_killed() to occur _after_ doing ENTER_COND(). This bug was seen as sporadic failure of the testcase rpl.rpl_parallel (rpl.rpl_parallel_gco_wait_kill since 10.5), with "Slave stopped with wrong error code". Signed-off-by: Kristian Nielsen --- sql/rpl_parallel.cc | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index ba5cf54e673f2..3bd27c7393272 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -2281,21 +2281,6 @@ rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond, /* The thread is ready to queue into. */ break; } - else if (unlikely(rli->sql_driver_thd->check_killed(1))) - { - unlock_or_exit_cond(rli->sql_driver_thd, &thr->LOCK_rpl_thread, - did_enter_cond, old_stage); - my_error(ER_CONNECTION_KILLED, MYF(0)); -#ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF("rpl_parallel_wait_queue_max", - { - debug_sync_set_action(rli->sql_driver_thd, - STRING_WITH_LEN("now SIGNAL wait_queue_killed")); - };); -#endif - slave_output_error_info(rgi, rli->sql_driver_thd); - return NULL; - } else { /* @@ -2323,6 +2308,23 @@ rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond, old_stage); *did_enter_cond= true; } + + if (unlikely(rli->sql_driver_thd->check_killed(1))) + { + unlock_or_exit_cond(rli->sql_driver_thd, &thr->LOCK_rpl_thread, + did_enter_cond, old_stage); + my_error(ER_CONNECTION_KILLED, MYF(0)); +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("rpl_parallel_wait_queue_max", + { + debug_sync_set_action(rli->sql_driver_thd, + STRING_WITH_LEN("now SIGNAL wait_queue_killed")); + };); +#endif + slave_output_error_info(rgi, rli->sql_driver_thd); + return NULL; + } + mysql_cond_wait(&thr->COND_rpl_thread_queue, &thr->LOCK_rpl_thread); } } From 44df6f35aa690cdac0409b7853468891ade33cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 17 Aug 2023 10:31:28 +0300 Subject: [PATCH 002/165] MDEV-31875 ROW_FORMAT=COMPRESSED table: InnoDB: ... Only 0 bytes read buf_read_ahead_random(), buf_read_ahead_linear(): Avoid read-ahead of the last page(s) of ROW_FORMAT=COMPRESSED tablespaces that use a page size of 1024 or 2048 bytes. We invoke os_file_set_size() on integer multiples of 4096 bytes in order to be compatible with the requirements of innodb_flush_method=O_DIRECT regardless of the physical block size of the underlying storage. This change must be null-merged to MariaDB Server 10.5 and later. There, out-of-bounds read-ahead should be handled gracefully by simply discarding the buffer page that had been allocated. Tested by: Matthias Leich --- storage/innobase/buf/buf0rea.cc | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 1ea3070cbda1c..5ea177dbbd0c2 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -266,8 +266,24 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf) * buf_read_ahead_random_area; if (fil_space_t* space = fil_space_acquire(page_id.space())) { - high = space->max_page_number_for_io(high); + ulint space_size = space->committed_size; + ulint zip_size = space->zip_size(); space->release(); + /* Avoid read-ahead of the last page(s) of + small-page-size ROW_FORMAT=COMPRESSED tablespaces, + because fil_space_extend_must_retry() would invoke + os_file_set_size() on integer multiples of 4 KiB. */ + switch (UNIV_EXPECT(zip_size, 0)) { + case 1024: + space_size &= ~ulint{3}; + break; + case 2048: + space_size &= ~ulint{1}; + break; + } + if (high > space_size) { + high = space_size; + } } else { return(0); } @@ -531,7 +547,20 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) if (fil_space_t* space = fil_space_acquire(page_id.space())) { space_size = space->committed_size; + ulint zip_size = space->zip_size(); space->release(); + /* Avoid read-ahead of the last page(s) of + small-page-size ROW_FORMAT=COMPRESSED tablespaces, + because fil_space_extend_must_retry() would invoke + os_file_set_size() on integer multiples of 4 KiB. */ + switch (UNIV_EXPECT(zip_size, 0)) { + case 1024: + space_size &= ~ulint{3}; + break; + case 2048: + space_size &= ~ulint{1}; + break; + } if (high > space_size) { /* The area is not whole */ From 518fe519887cfb354cd463afa76077df49e96057 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 17 Aug 2023 10:31:44 +0300 Subject: [PATCH 003/165] MDEV-31254 InnoDB: Trying to read doublewrite buffer page buf_read_page_low(): Remove an error message that could be triggered by buf_read_ahead_linear() or buf_read_ahead_random(). This is a backport of commit c9eff1a144ba44846373660a30d342d3f0dc91a5 from MariaDB Server 10.5. --- storage/innobase/buf/buf0rea.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 5ea177dbbd0c2..9e06fc9cf8194 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -128,9 +128,6 @@ buf_read_page_low( if (page_id.space() == TRX_SYS_SPACE && buf_dblwr_page_inside(page_id.page_no())) { - - ib::error() << "Trying to read doublewrite buffer page " - << page_id; return(0); } From 5a8a8fc9538861f699805db4baddecdb87af5052 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 17 Aug 2023 10:31:55 +0300 Subject: [PATCH 004/165] MDEV-31928 Assertion xid ... < 128 failed in trx_undo_write_xid() trx_undo_write_xid(): Correct an off-by-one error in a debug assertion. --- mysql-test/suite/innodb/r/innodb-xa.result | 27 +++++++++++++++++ mysql-test/suite/innodb/t/innodb-xa.test | 35 +++++++++++++++++++++- storage/innobase/trx/trx0undo.cc | 2 +- 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/innodb/r/innodb-xa.result b/mysql-test/suite/innodb/r/innodb-xa.result index 6eae842b14c36..6651bd5090db4 100644 --- a/mysql-test/suite/innodb/r/innodb-xa.result +++ b/mysql-test/suite/innodb/r/innodb-xa.result @@ -15,3 +15,30 @@ xa prepare 'xid2'; release savepoint `sv1`; xa commit 'xid2'; drop table t1; +# +# MDEV-31928 Assertion xid ... < 128 failed in trx_undo_write_xid() +# +CREATE TABLE t (a INT PRIMARY KEY) ENGINE=INNODB; +XA START 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA START 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA START 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +INSERT INTO t VALUES(1); +XA END 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA END 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA END 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +XA PREPARE 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA PREPARE 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA PREPARE 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +XA COMMIT 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA COMMIT 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1234567890' at line 1 +XA COMMIT 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +DROP TABLE t; +# End of 10.4 tests diff --git a/mysql-test/suite/innodb/t/innodb-xa.test b/mysql-test/suite/innodb/t/innodb-xa.test index d94cd75aa147a..9eb5848b04d33 100644 --- a/mysql-test/suite/innodb/t/innodb-xa.test +++ b/mysql-test/suite/innodb/t/innodb-xa.test @@ -1,6 +1,6 @@ --source include/have_innodb.inc ---disable_abort_on_error +--error ER_XAER_NOTA xa rollback 'xid2'; drop table if exists t1; create table t1(a int)engine=innodb; @@ -9,9 +9,42 @@ xa start 'xid2'; insert into `t1` values (1); savepoint `sv1`; xa end 'xid2'; +--error ER_XAER_RMFAIL start transaction; xa prepare 'xid2'; release savepoint `sv1`; xa commit 'xid2'; drop table t1; +-- echo # +-- echo # MDEV-31928 Assertion xid ... < 128 failed in trx_undo_write_xid() +-- echo # + +CREATE TABLE t (a INT PRIMARY KEY) ENGINE=INNODB; +--error ER_PARSE_ERROR +XA START 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +--error ER_PARSE_ERROR +XA START 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +XA START 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; + +INSERT INTO t VALUES(1); + +--error ER_PARSE_ERROR +XA END 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +--error ER_PARSE_ERROR +XA END 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +XA END 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +--error ER_PARSE_ERROR +XA PREPARE 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +--error ER_PARSE_ERROR +XA PREPARE 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +XA PREPARE 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +--error ER_PARSE_ERROR +XA COMMIT 'gtrid_67890123456789012345678901234567890123456789012345678901234','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; +--error ER_PARSE_ERROR +XA COMMIT 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_67890123456789012345678901234567890123456789012345678901234',1234567890; +XA COMMIT 'gtrid_6789012345678901234567890123456789012345678901234567890123','bqual_6789012345678901234567890123456789012345678901234567890123',1234567890; + +DROP TABLE t; + +-- echo # End of 10.4 tests diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index 8768b934dba77..917f662eaa5a3 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -665,7 +665,7 @@ trx_undo_write_xid( { DBUG_ASSERT(xid->gtrid_length >= 0); DBUG_ASSERT(xid->bqual_length >= 0); - DBUG_ASSERT(xid->gtrid_length + xid->bqual_length < XIDDATASIZE); + DBUG_ASSERT(xid->gtrid_length + xid->bqual_length <= XIDDATASIZE); mlog_write_ulint(log_hdr + TRX_UNDO_XA_FORMAT, static_cast(xid->formatID), From a6bf4b580710b7f7b488db92974664abf66218da Mon Sep 17 00:00:00 2001 From: Monty Date: Sat, 5 Aug 2023 01:08:05 +0300 Subject: [PATCH 005/165] MDEV-29693 ANALYZE TABLE still flushes table definition cache when engine-independent statistics is used This commits enables reloading of engine-independent statistics without flushing the table from table definition cache. This is achieved by allowing multiple version of the TABLE_STATISTICS_CB object and having independent pointers to it in TABLE and TABLE_SHARE. The TABLE_STATISTICS_CB object have reference pointers and are freed when no one is pointing to it anymore. TABLE's TABLE_STATISTICS_CB pointer is updated to use the TABLE_SHARE's pointer when read_statistics_for_tables() is called at the beginning of a query. Main changes: - read_statistics_for_table() will allocate an new TABLE_STATISTICS_CB object. - All get_stat_values() functions has a new parameter that tells where collected data should be stored. get_stat_values() are not using the table_field object anymore to store data. - All get_stat_values() functions returns 1 if they found any data in the statistics tables. Other things: - Fixed INSERT DELAYED to not read statistics tables. - Removed Statistics_state from TABLE_STATISTICS_CB as this is not needed anymore as wer are not changing TABLE_SHARE->stats_cb while calculating or loading statistics. - Store values used with store_from_statistical_minmax_field() in TABLE_STATISTICS_CB::mem_root. This allowed me to remove the function delete_stat_values_for_table_share(). - Field_blob::store_from_statistical_minmax_field() is implemented but is not normally used as we do not yet support EIS statistics for blobs. For example Field_blob::update_min() and Field_blob::update_max() are not implemented. Note that the function can be called if there is an concurrent "ALTER TABLE MODIFY field BLOB" running because of a bug in ALTER TABLE where it deletes entries from column_stats before it has an exclusive lock on the table. - Use result of field->val_str(&val) as a pointer to the result instead of val (safetly fix). - Allocate memory for collected statistics in THD::mem_root, not in in TABLE::mem_root. This could cause the TABLE object to grow if a ANALYZE TABLE was run many times on the same table. This was done in allocate_statistics_for_table(), create_min_max_statistical_fields_for_table() and create_min_max_statistical_fields_for_table_share(). - Store in TABLE_STATISTICS_CB::stats_available which statistics was found in the statistics tables. - Removed index_table from class Index_prefix_calc as it was not used. - Added TABLE_SHARE::LOCK_statistics to ensure we don't load EITS in parallel. First thread will load it, others will reuse the loaded data. - Eliminate read_histograms_for_table(). The loading happens within read_statistics_for_tables() if histograms are needed. One downside is that if we have read statistics without histograms before and someone requires histograms, we have to read all statistics again (once) from the statistics tables. A smaller downside is the need to call alloc_root() for each individual histogram. Before we could allocate all the space for histograms with a single alloc_root. - Fixed bug in MyISAM and Aria where they did not properly notice that table had changed after analyze table. This was not a problem before this patch as then the MyISAM and Aria tables where flushed as part of ANALYZE table which did hide this issue. - Fixed a bug in ANALYZE table where table->records could be seen as 0 in collect_statistics_for_table(). The effect of this unlikely bug was that a full table scan could be done even if analyze_sample_percentage was not set to 1. - Changed multiple mallocs in a row to use multi_alloc_root(). - Added a mutex protection in update_statistics_for_table() to ensure that several tables are not updating the statistics at the same time. Some of the changes in sql_statistics.cc are based on a patch from Oleg Smirnov Co-authored-by: Oleg Smirnov Co-authored-by: Vicentiu Ciorbaru Reviewer: Sergei Petrunia --- mysql-test/main/analyze.result | 28 + mysql-test/main/analyze.test | 27 + mysql-test/main/join.result | 2 +- mysql-test/main/join_outer.result | 4 +- mysql-test/main/join_outer_jcl6.result | 4 +- .../main/partition_explicit_prune.result | 3 +- mysql-test/main/stat_tables.result | 78 ++ mysql-test/main/stat_tables.test | 64 +- mysql-test/main/stat_tables_flush.result | 91 ++ mysql-test/main/stat_tables_flush.test | 50 ++ mysql-test/main/stat_tables_innodb.result | 78 ++ sql/field.cc | 24 +- sql/field.h | 5 +- sql/mysqld.cc | 2 + sql/mysqld.h | 1 + sql/sql_admin.cc | 28 +- sql/sql_base.cc | 3 +- sql/sql_base.h | 3 + sql/sql_insert.cc | 6 +- sql/sql_show.cc | 2 +- sql/sql_statistics.cc | 840 ++++++++---------- sql/sql_statistics.h | 6 +- sql/table.cc | 85 +- sql/table.h | 123 +-- storage/maria/ma_locking.c | 18 +- storage/myisam/mi_locking.c | 9 +- 26 files changed, 1023 insertions(+), 561 deletions(-) create mode 100644 mysql-test/main/stat_tables_flush.result create mode 100644 mysql-test/main/stat_tables_flush.test diff --git a/mysql-test/main/analyze.result b/mysql-test/main/analyze.result index a1332abd177ce..17325863acd1f 100644 --- a/mysql-test/main/analyze.result +++ b/mysql-test/main/analyze.result @@ -71,3 +71,31 @@ optimize table t1 extended; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'extended' at line 1 drop table t1; End of 5.0 tests +# +# Test analyze of text column (not yet supported) +# +set optimizer_use_condition_selectivity=4; +set histogram_type='single_prec_hb'; +set histogram_size=255; +create table t1 (a int not null, t tinytext, tx text); +insert into t1 select seq+1, repeat('X',seq*5), repeat('X',seq*10) from seq_0_to_50; +insert into t1 select seq+100, repeat('X',5), "" from seq_1_to_10; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 't' +test.t1 analyze Warning Engine-independent statistics are not collected for column 'tx' +test.t1 analyze status OK +explain select count(*) from t1 where t='XXXXXX'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 61 Using where +select column_name, min_value, max_value, hist_size from mysql.column_stats where table_name='t1'; +column_name min_value max_value hist_size +a 1 110 255 +drop table t1; +set use_stat_tables=default; +set histogram_type=default; +set histogram_size=default; +# +# End of 10.6 tests +# diff --git a/mysql-test/main/analyze.test b/mysql-test/main/analyze.test index 85a8815816237..3c7179fbf1950 100644 --- a/mysql-test/main/analyze.test +++ b/mysql-test/main/analyze.test @@ -1,3 +1,5 @@ +--source include/have_sequence.inc + # # Bug #10901 Analyze Table on new table destroys table # This is minimal test case to get error @@ -87,3 +89,28 @@ optimize table t1 extended; drop table t1; --echo End of 5.0 tests + +--echo # +--echo # Test analyze of text column (not yet supported) +--echo # + +set optimizer_use_condition_selectivity=4; +set histogram_type='single_prec_hb'; +set histogram_size=255; + +create table t1 (a int not null, t tinytext, tx text); +insert into t1 select seq+1, repeat('X',seq*5), repeat('X',seq*10) from seq_0_to_50; +insert into t1 select seq+100, repeat('X',5), "" from seq_1_to_10; +analyze table t1; +explain select count(*) from t1 where t='XXXXXX'; +select column_name, min_value, max_value, hist_size from mysql.column_stats where table_name='t1'; + +drop table t1; + +set use_stat_tables=default; +set histogram_type=default; +set histogram_size=default; + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/main/join.result b/mysql-test/main/join.result index 427b06aabbeb9..104e7a8c83c12 100644 --- a/mysql-test/main/join.result +++ b/mysql-test/main/join.result @@ -1280,7 +1280,7 @@ pk v pk v SHOW STATUS LIKE 'Handler_read_%'; Variable_name Value Handler_read_first 0 -Handler_read_key 14 +Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 diff --git a/mysql-test/main/join_outer.result b/mysql-test/main/join_outer.result index 6bcaee31e546f..9722211fadff4 100644 --- a/mysql-test/main/join_outer.result +++ b/mysql-test/main/join_outer.result @@ -1804,7 +1804,7 @@ sum(t3.b) show status like "handler_read%"; Variable_name Value Handler_read_first 0 -Handler_read_key 13 +Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 @@ -1819,7 +1819,7 @@ sum(t3.b) show status like "handler_read%"; Variable_name Value Handler_read_first 0 -Handler_read_key 7 +Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 diff --git a/mysql-test/main/join_outer_jcl6.result b/mysql-test/main/join_outer_jcl6.result index 3d73ebdc9ba18..26865a72d478d 100644 --- a/mysql-test/main/join_outer_jcl6.result +++ b/mysql-test/main/join_outer_jcl6.result @@ -1811,7 +1811,7 @@ sum(t3.b) show status like "handler_read%"; Variable_name Value Handler_read_first 0 -Handler_read_key 13 +Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 @@ -1826,7 +1826,7 @@ sum(t3.b) show status like "handler_read%"; Variable_name Value Handler_read_first 0 -Handler_read_key 7 +Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 diff --git a/mysql-test/main/partition_explicit_prune.result b/mysql-test/main/partition_explicit_prune.result index 07af2d58a42e2..8b49210d11975 100644 --- a/mysql-test/main/partition_explicit_prune.result +++ b/mysql-test/main/partition_explicit_prune.result @@ -350,7 +350,6 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 1 -HANDLER_READ_KEY 8 HANDLER_TMP_WRITE 24 # Should be 1 commit # 4 locks (1 ha_partition + 1 ha_innobase) x 2 (lock/unlock) @@ -777,7 +776,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_READ_KEY 8 +HANDLER_READ_KEY 6 HANDLER_READ_RND_NEXT 2 HANDLER_TMP_WRITE 24 HANDLER_UPDATE 2 diff --git a/mysql-test/main/stat_tables.result b/mysql-test/main/stat_tables.result index 379e9737e1c2f..1c43dd268a85d 100644 --- a/mysql-test/main/stat_tables.result +++ b/mysql-test/main/stat_tables.result @@ -903,4 +903,82 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f 1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 2.78 10.00 Using where drop table t1; set @@global.histogram_size=@save_histogram_size; +# # End of 10.4 tests +# +# +# MDEV-29693 ANALYZE TABLE still flushes table definition cache +# when engine-independent statistics is used +# +create table t1 (a int); +insert into t1 select seq from seq_0_to_99; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connect con1, localhost, root,,; +connection con1; +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection default; +update t1 set a= a +100; +# Explain shows outdated statistics: +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection con1; +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection default; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +# Now explain shows updated statistics: +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection con1; +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection con1; +# Run update and analyze in con1: +update t1 set a= a - 150; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +connection default; +# Explain shows updated statistics: +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 99.22 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +disconnect con1; +drop table t1; +# +# End of 10.6 tests +# diff --git a/mysql-test/main/stat_tables.test b/mysql-test/main/stat_tables.test index 7488ccb6877f8..4d4a969f49c7f 100644 --- a/mysql-test/main/stat_tables.test +++ b/mysql-test/main/stat_tables.test @@ -1,9 +1,9 @@ # Tests will be skipped for the view protocol because the view protocol creates # an additional util connection and other statistics data --- source include/no_view_protocol.inc - +--source include/no_view_protocol.inc --source include/have_stat_tables.inc --source include/have_partition.inc +--source include/have_sequence.inc select @@global.use_stat_tables; select @@session.use_stat_tables; @@ -640,4 +640,64 @@ drop table t1; set @@global.histogram_size=@save_histogram_size; +--echo # --echo # End of 10.4 tests +--echo # + +--echo # +--echo # MDEV-29693 ANALYZE TABLE still flushes table definition cache +--echo # when engine-independent statistics is used +--echo # + +create table t1 (a int); +insert into t1 select seq from seq_0_to_99; +analyze table t1 persistent for all; +analyze table t1 persistent for all; + +explain extended select count(*) from t1 where a < 50; + +connect (con1, localhost, root,,); +--connection con1 +explain extended select count(*) from t1 where a < 50; + +let $open_tables=`select variable_value from information_schema.global_status where variable_name="OPENED_TABLES"`; + +--connection default +update t1 set a= a +100; + +--echo # Explain shows outdated statistics: +explain extended select count(*) from t1 where a < 50; +--connection con1 +explain extended select count(*) from t1 where a < 50; + +--connection default +analyze table t1 persistent for all; +--echo # Now explain shows updated statistics: +explain extended select count(*) from t1 where a < 50; +--connection con1 +explain extended select count(*) from t1 where a < 50; + +--connection con1 +--echo # Run update and analyze in con1: +update t1 set a= a - 150; +analyze table t1 persistent for all; + +--connection default +--echo # Explain shows updated statistics: +explain extended select count(*) from t1 where a < 50; + +disconnect con1; + +let $new_open_tables=`select variable_value from information_schema.global_status where variable_name="OPENED_TABLES"`; + +if ($open_tables != $new_open_tables) +{ +--let $diff=`select $new_open_tables - $open_tables` +--echo "Fail: Test opened $diff new tables, 0 was expected" +} + +drop table t1; + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/main/stat_tables_flush.result b/mysql-test/main/stat_tables_flush.result new file mode 100644 index 0000000000000..9a88d5d388dc3 --- /dev/null +++ b/mysql-test/main/stat_tables_flush.result @@ -0,0 +1,91 @@ +# +# Check that ANALYZE TABLE is remembered by MyISAM and Aria +# +create table t1 (a int) engine=myisam; +insert into t1 select seq from seq_0_to_99; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +flush tables; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +update t1 set a=100 where a=1; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +update t1 set a=100 where a=2; +flush tables; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +# Aria transactional=0 +ALTER TABLE t1 ENGINE=aria transactional=0; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +update t1 set a=100 where a=10; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +flush tables; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +update t1 set a=100 where a=11; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +update t1 set a=100 where a=12; +flush tables; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +# Aria transactional=1 +ALTER TABLE t1 ENGINE=aria transactional=1; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +update t1 set a=100 where a=20; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +flush tables; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +update t1 set a=100 where a=21; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +update t1 set a=100 where a=22; +flush tables; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +drop table t1; +# +# End of 10.5 tests +# diff --git a/mysql-test/main/stat_tables_flush.test b/mysql-test/main/stat_tables_flush.test new file mode 100644 index 0000000000000..abbbb0d646735 --- /dev/null +++ b/mysql-test/main/stat_tables_flush.test @@ -0,0 +1,50 @@ +--source include/have_sequence.inc + +--echo # +--echo # Check that ANALYZE TABLE is remembered by MyISAM and Aria +--echo # + +create table t1 (a int) engine=myisam; +insert into t1 select seq from seq_0_to_99; +analyze table t1 persistent for all; +flush tables; +analyze table t1 persistent for all; +update t1 set a=100 where a=1; +analyze table t1 persistent for all; +update t1 set a=100 where a=2; +flush tables; +analyze table t1 persistent for all; + +--echo # Aria transactional=0 +ALTER TABLE t1 ENGINE=aria transactional=0; +analyze table t1 persistent for all; +update t1 set a=100 where a=10; +analyze table t1 persistent for all; +analyze table t1 persistent for all; +flush tables; +analyze table t1 persistent for all; +update t1 set a=100 where a=11; +analyze table t1 persistent for all; +update t1 set a=100 where a=12; +flush tables; +analyze table t1 persistent for all; + +--echo # Aria transactional=1 + +ALTER TABLE t1 ENGINE=aria transactional=1; +analyze table t1 persistent for all; +update t1 set a=100 where a=20; +analyze table t1 persistent for all; +analyze table t1 persistent for all; +flush tables; +analyze table t1 persistent for all; +update t1 set a=100 where a=21; +analyze table t1 persistent for all; +update t1 set a=100 where a=22; +flush tables; +analyze table t1 persistent for all; +drop table t1; + +--echo # +--echo # End of 10.5 tests +--echo # diff --git a/mysql-test/main/stat_tables_innodb.result b/mysql-test/main/stat_tables_innodb.result index 5b62f228b1f30..163a417f81082 100644 --- a/mysql-test/main/stat_tables_innodb.result +++ b/mysql-test/main/stat_tables_innodb.result @@ -935,7 +935,85 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f 1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 2.78 10.00 Using where drop table t1; set @@global.histogram_size=@save_histogram_size; +# # End of 10.4 tests +# +# +# MDEV-29693 ANALYZE TABLE still flushes table definition cache +# when engine-independent statistics is used +# +create table t1 (a int); +insert into t1 select seq from seq_0_to_99; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connect con1, localhost, root,,; +connection con1; +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection default; +update t1 set a= a +100; +# Explain shows outdated statistics: +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection con1; +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection default; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +# Now explain shows updated statistics: +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection con1; +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +connection con1; +# Run update and analyze in con1: +update t1 set a= a - 150; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +connection default; +# Explain shows updated statistics: +explain extended select count(*) from t1 where a < 50; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 100 99.22 Using where +Warnings: +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50 +disconnect con1; +drop table t1; +# +# End of 10.6 tests +# set global innodb_stats_persistent= @innodb_stats_persistent_save; set global innodb_stats_persistent_sample_pages= @innodb_stats_persistent_sample_pages_save; diff --git a/sql/field.cc b/sql/field.cc index 4a7ee0f3ce0e8..3d11d0b878f4b 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1997,13 +1997,35 @@ int Field::store_to_statistical_minmax_field(Field *field, String *val) } -int Field::store_from_statistical_minmax_field(Field *stat_field, String *str) +int Field::store_from_statistical_minmax_field(Field *stat_field, String *str, + MEM_ROOT *mem) { stat_field->val_str(str); return store_text(str->ptr(), str->length(), &my_charset_bin); } +/* + Same as above, but store the string in the statistics mem_root to make it + easy to free everything by just freeing the mem_root. +*/ + +int Field_blob::store_from_statistical_minmax_field(Field *stat_field, + String *str, + MEM_ROOT *mem) +{ + String *tmp= stat_field->val_str(str); + uchar *ptr; + if (!(ptr= (uchar*) memdup_root(mem, tmp->ptr(), tmp->length()))) + { + set_ptr((uint32) 0, NULL); + return 1; + } + set_ptr(tmp->length(), ptr); + return 0; +} + + /** Pack the field into a format suitable for storage and transfer. diff --git a/sql/field.h b/sql/field.h index 9534cfabeed25..e4e88408f8abe 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1011,7 +1011,8 @@ class Field: public Value_source field statistical table field str value buffer */ - virtual int store_from_statistical_minmax_field(Field *field, String *str); + virtual int store_from_statistical_minmax_field(Field *field, String *str, + MEM_ROOT *mem); #ifdef HAVE_MEM_CHECK /** @@ -4469,6 +4470,8 @@ class Field_blob :public Field_longstr { } bool make_empty_rec_store_default_value(THD *thd, Item *item) override; int store(const char *to, size_t length, CHARSET_INFO *charset) override; + int store_from_statistical_minmax_field(Field *stat_field, String *str, + MEM_ROOT *mem) override; using Field_str::store; void hash_not_null(Hasher *hasher) override; double val_real() override; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 843f9b5cbaee8..634e4e91bd7b7 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -919,6 +919,7 @@ PSI_mutex_key key_LOCK_gtid_waiting; PSI_mutex_key key_LOCK_after_binlog_sync; PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered; PSI_mutex_key key_TABLE_SHARE_LOCK_share; +PSI_mutex_key key_TABLE_SHARE_LOCK_statistics; PSI_mutex_key key_LOCK_ack_receiver; PSI_mutex_key key_TABLE_SHARE_LOCK_rotation; @@ -986,6 +987,7 @@ static PSI_mutex_info all_server_mutexes[]= { &key_structure_guard_mutex, "Query_cache::structure_guard_mutex", 0}, { &key_TABLE_SHARE_LOCK_ha_data, "TABLE_SHARE::LOCK_ha_data", 0}, { &key_TABLE_SHARE_LOCK_share, "TABLE_SHARE::LOCK_share", 0}, + { &key_TABLE_SHARE_LOCK_statistics, "TABLE_SHARE::LOCK_statistics", 0}, { &key_TABLE_SHARE_LOCK_rotation, "TABLE_SHARE::LOCK_rotation", 0}, { &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL}, { &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL}, diff --git a/sql/mysqld.h b/sql/mysqld.h index 3014da58b3d54..cab7dafdc191f 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -334,6 +334,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list, key_relay_log_info_log_space_lock, key_relay_log_info_run_lock, key_rpl_group_info_sleep_lock, key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data, + key_TABLE_SHARE_LOCK_statistics, key_LOCK_start_thread, key_LOCK_error_messages, key_PARTITION_LOCK_auto_inc; diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 00d7e5efecd93..b7a845f04c913 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -923,8 +923,14 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, bitmap_clear_all(tab->read_set); for (uint fields= 0; *field_ptr; field_ptr++, fields++) { + /* + Note that type() always return MYSQL_TYPE_BLOB for + all blob types. Another function needs to be added + if we in the future want to distingush between blob + types here. + */ enum enum_field_types type= (*field_ptr)->type(); - if (type < MYSQL_TYPE_MEDIUM_BLOB || + if (type < MYSQL_TYPE_TINY_BLOB || type > MYSQL_TYPE_BLOB) tab->field[fields]->register_field_in_read_map(); else @@ -952,7 +958,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, } pos--; enum enum_field_types type= tab->field[pos]->type(); - if (type < MYSQL_TYPE_MEDIUM_BLOB || + if (type < MYSQL_TYPE_TINY_BLOB || type > MYSQL_TYPE_BLOB) tab->field[pos]->register_field_in_read_map(); else @@ -984,6 +990,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, tab->keys_in_use_for_query.set_bit(--pos); } } + /* Ensure that number of records are updated */ + table->table->file->info(HA_STATUS_VARIABLE); if (!(compl_result_code= alloc_statistics_for_table(thd, table->table)) && !(compl_result_code= @@ -1279,13 +1287,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, if (table->table && !table->view) { - /* - Don't skip flushing if we are collecting EITS statistics. - */ - const bool skip_flush= - (operator_func == &handler::ha_analyze) && - (table->table->file->ha_table_flags() & HA_ONLINE_ANALYZE) && - !collect_eis; + /* Skip FLUSH TABLES if we are doing analyze */ + const bool skip_flush= (operator_func == &handler::ha_analyze); if (table->table->s->tmp_table) { /* @@ -1305,6 +1308,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, table->table= 0; // For query cache query_cache_invalidate3(thd, table, 0); } + else if (collect_eis && skip_flush && compl_result_code == HA_ADMIN_OK) + { + TABLE_LIST *save_next_global= table->next_global; + table->next_global= 0; + read_statistics_for_tables(thd, table, true /* force_reload */); + table->next_global= save_next_global; + } } /* Error path, a admin command failed. */ if (thd->transaction_rollback_request || fatal_error) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 691925c82b2ce..4299d63105949 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -5323,7 +5323,8 @@ bool open_and_lock_tables(THD *thd, const DDL_options_st &options, goto err; /* Don't read statistics tables when opening internal tables */ - if (!(flags & MYSQL_OPEN_IGNORE_LOGGING_FORMAT)) + if (!(flags & (MYSQL_OPEN_IGNORE_LOGGING_FORMAT | + MYSQL_OPEN_IGNORE_ENGINE_STATS))) (void) read_statistics_for_tables_if_needed(thd, tables); if (derived) diff --git a/sql/sql_base.h b/sql/sql_base.h index 0bfef1ca7ee24..a67d28f47c9f2 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -130,6 +130,9 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, */ #define MYSQL_OPEN_IGNORE_LOGGING_FORMAT 0x20000 +/* Don't use statistics tables */ +#define MYSQL_OPEN_IGNORE_ENGINE_STATS 0x40000 + /** Please refer to the internals manual. */ #define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\ MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |\ diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 132be65a84883..73044c60c2054 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -580,7 +580,8 @@ bool open_and_lock_for_insert_delayed(THD *thd, TABLE_LIST *table_list) Open tables used for sub-selects or in stored functions, will also cache these functions. */ - if (open_and_lock_tables(thd, table_list->next_global, TRUE, 0)) + if (open_and_lock_tables(thd, table_list->next_global, TRUE, + MYSQL_OPEN_IGNORE_ENGINE_STATS)) { end_delayed_insert(thd); error= TRUE; @@ -2751,6 +2752,9 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) /* Ensure we don't use the table list of the original table */ copy->pos_in_table_list= 0; + /* We don't need statistics for insert delayed */ + copy->stats_cb= 0; + /* Make a copy of all fields. The copied fields need to point into the copied record. This is done diff --git a/sql/sql_show.cc b/sql/sql_show.cc index e90c52ceabda2..d12dbcebde547 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -6750,7 +6750,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, KEY *key_info=show_table->s->key_info; if (show_table->file) { - (void) read_statistics_for_tables(thd, tables); + (void) read_statistics_for_tables(thd, tables, false); show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_CONST | diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index d0b6ac20d1d07..e09370f94c427 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -62,7 +62,7 @@ /* Currently there are only 3 persistent statistical tables */ static const uint STATISTICS_TABLES= 3; -/* +/* The names of the statistical tables in this array must correspond the definitions of the tables in the file ../scripts/mysql_system_tables.sql */ @@ -74,6 +74,21 @@ static const LEX_CSTRING stat_table_name[STATISTICS_TABLES]= }; +TABLE_STATISTICS_CB::TABLE_STATISTICS_CB(): + usage_count(0), table_stats(0), total_hist_size(0), + stats_available(TABLE_STAT_NO_STATS) +{ + init_sql_alloc(PSI_INSTRUMENT_ME, &mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, + MYF(0)); +} + +TABLE_STATISTICS_CB::~TABLE_STATISTICS_CB() +{ + DBUG_ASSERT(usage_count == 0); + free_root(&mem_root, MYF(0)); +} + + /** @details The function builds a list of TABLE_LIST elements for system statistical @@ -349,8 +364,7 @@ class Column_statistics_collected :public Column_statistics Reading statistical data from a statistical table is performed by the following pattern. First a table dependent method sets the values of the - the fields that comprise the lookup key. Then an implementation of the - method get_stat_values() declared in Stat_table as a pure virtual method + the fields that comprise the lookup key. Then, get_stat_values(...) call finds the row from the statistical table by the set key. If the row is found the values of statistical fields are read from this row and are distributed in the internal structures. @@ -447,8 +461,8 @@ class Stat_table KEY *stat_key_info; /* Structure for the index to access stat_table */ /* Table for which statistical data is read / updated */ - TABLE *table; - TABLE_SHARE *table_share; /* Table share for 'table */ + const TABLE *table; + const TABLE_SHARE *table_share; /* Table share for 'table */ const LEX_CSTRING *db_name; /* Name of the database containing 'table' */ const LEX_CSTRING *table_name; /* Name of the table 'table' */ @@ -485,7 +499,7 @@ class Stat_table statistics has been collected. */ - Stat_table(TABLE *stat, TABLE *tab) + Stat_table(TABLE *stat, const TABLE *tab) :stat_table(stat), table(tab) { table_share= tab->s; @@ -528,7 +542,8 @@ class Stat_table The method is called by the update_table_name_key_parts function. */ - virtual void change_full_table_name(const LEX_CSTRING *db, const LEX_CSTRING *tab)= 0; + virtual void change_full_table_name(const LEX_CSTRING *db, + const LEX_CSTRING *tab)= 0; /** @@ -547,19 +562,6 @@ class Stat_table virtual void store_stat_fields()= 0; - /** - @brief - Read statistical data from fields of the statistical table - - @details - This is a purely virtual method. - The implementation for any derived read shall read the appropriate - statistical data from the corresponding fields of stat_table. - */ - - virtual void get_stat_values()= 0; - - /** @brief Find a record in the statistical table by a primary key @@ -746,7 +748,8 @@ class Table_stat: public Stat_table table_name_field= stat_table->field[TABLE_STAT_TABLE_NAME]; } - void change_full_table_name(const LEX_CSTRING *db, const LEX_CSTRING *tab) + void change_full_table_name(const LEX_CSTRING *db, + const LEX_CSTRING *tab) override { db_name_field->store(db->str, db->length, system_charset_info); table_name_field->store(tab->str, tab->length, system_charset_info); @@ -762,7 +765,7 @@ class Table_stat: public Stat_table must be passed as a value for the parameter 'stat'. */ - Table_stat(TABLE *stat, TABLE *tab) :Stat_table(stat, tab) + Table_stat(TABLE *stat, const TABLE *tab) :Stat_table(stat, tab) { common_init_table_stat(); } @@ -816,7 +819,7 @@ class Table_stat: public Stat_table the field write_stat.cardinality' from the TABLE structure for 'table'. */ - void store_stat_fields() + void store_stat_fields() override { Field *stat_field= stat_table->field[TABLE_STAT_CARDINALITY]; if (table->collected_stats->cardinality_is_null) @@ -834,21 +837,19 @@ class Table_stat: public Stat_table Read statistical data from statistical fields of table_stat @details - This implementation of a purely virtual method first looks for a record - the statistical table table_stat by its primary key set the record - buffer with the help of Table_stat::set_key_fields. Then, if the row is - found the function reads the value of the column 'cardinality' of the table - table_stat and sets the value of the flag read_stat.cardinality_is_null - and the value of the field read_stat.cardinality' from the TABLE structure - for 'table' accordingly. - */ + Find a record in mysql.table_stat that has statistics for this table. + We search for record using a PK lookup. The lookup values are in the stat + table's record buffer, they were put there by Table_stat::set_key_fields. + + The result is stored in *read_stats. + */ - void get_stat_values() + bool get_stat_values(Table_statistics *read_stats) { - Table_statistics *read_stats= table_share->stats_cb.table_stats; + bool res; read_stats->cardinality_is_null= TRUE; read_stats->cardinality= 0; - if (find_stat()) + if ((res= find_stat())) { Field *stat_field= stat_table->field[TABLE_STAT_CARDINALITY]; if (!stat_field->is_null()) @@ -857,8 +858,8 @@ class Table_stat: public Stat_table read_stats->cardinality= stat_field->val_int(); } } + return res; } - }; @@ -890,7 +891,8 @@ class Column_stat: public Stat_table column_name_field= stat_table->field[COLUMN_STAT_COLUMN_NAME]; } - void change_full_table_name(const LEX_CSTRING *db, const LEX_CSTRING *tab) + void change_full_table_name(const LEX_CSTRING *db, + const LEX_CSTRING *tab) override { db_name_field->store(db->str, db->length, system_charset_info); table_name_field->store(tab->str, tab->length, system_charset_info); @@ -906,7 +908,7 @@ class Column_stat: public Stat_table column_stats must be passed as a value for the parameter 'stat'. */ - Column_stat(TABLE *stat, TABLE *tab) :Stat_table(stat, tab) + Column_stat(TABLE *stat, const TABLE *tab) :Stat_table(stat, tab) { common_init_column_stat_table(); } @@ -1019,7 +1021,7 @@ class Column_stat: public Stat_table length of the column. */ - void store_stat_fields() + void store_stat_fields() override { StringBuffer val; @@ -1085,120 +1087,107 @@ class Column_stat: public Stat_table Read statistical data from statistical fields of column_stats @details - This implementation of a purely virtual method first looks for a record - in the statistical table column_stats by its primary key set in the record - buffer with the help of Column_stat::set_key_fields. Then, if the row is + Find a record in mysql.column_stats that has statistics for this column. + We search for record using a PK lookup. The lookup values are in the stat + table's record buffer. Then, if the row is found, the function reads the values of the columns 'min_value', 'max_value', 'nulls_ratio', 'avg_length', 'avg_frequency', 'hist_size' and - 'hist_type" of the table column_stat and sets accordingly the value of - the bitmap read_stat.column_stat_nulls' and the values of the fields - min_value, max_value, nulls_ratio, avg_length, avg_frequency, hist_size and - hist_type of the structure read_stat from the Field structure for the field - 'table_field'. - */ + 'hist_type" of the table column_stat and sets the members of *read_stats + accordingly. + */ - void get_stat_values() + bool get_stat_values(Column_statistics *read_stats, MEM_ROOT *mem_root, + bool want_histograms) { - table_field->read_stats->set_all_nulls(); + bool res; + read_stats->set_all_nulls(); - if (table_field->read_stats->min_value) - table_field->read_stats->min_value->set_null(); - if (table_field->read_stats->max_value) - table_field->read_stats->max_value->set_null(); + if (read_stats->min_value) + read_stats->min_value->set_null(); + if (read_stats->max_value) + read_stats->max_value->set_null(); - if (find_stat()) + if ((res= find_stat())) { char buff[MAX_FIELD_WIDTH]; String val(buff, sizeof(buff), &my_charset_bin); for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HIST_TYPE; i++) - { + { Field *stat_field= stat_table->field[i]; if (!stat_field->is_null() && (i > COLUMN_STAT_MAX_VALUE || (i == COLUMN_STAT_MIN_VALUE && - table_field->read_stats->min_value) || + read_stats->min_value) || (i == COLUMN_STAT_MAX_VALUE && - table_field->read_stats->max_value))) + read_stats->max_value))) { - table_field->read_stats->set_not_null(i); + read_stats->set_not_null(i); switch (i) { case COLUMN_STAT_MIN_VALUE: { - Field *field= table_field->read_stats->min_value; + Field *field= read_stats->min_value; field->set_notnull(); if (table_field->type() == MYSQL_TYPE_BIT) field->store(stat_field->val_int(), true); else - field->store_from_statistical_minmax_field(stat_field, &val); + field->store_from_statistical_minmax_field(stat_field, &val, + mem_root); break; } case COLUMN_STAT_MAX_VALUE: { - Field *field= table_field->read_stats->max_value; + Field *field= read_stats->max_value; field->set_notnull(); if (table_field->type() == MYSQL_TYPE_BIT) field->store(stat_field->val_int(), true); else - field->store_from_statistical_minmax_field(stat_field, &val); + field->store_from_statistical_minmax_field(stat_field, &val, + mem_root); break; } case COLUMN_STAT_NULLS_RATIO: - table_field->read_stats->set_nulls_ratio(stat_field->val_real()); + read_stats->set_nulls_ratio(stat_field->val_real()); break; case COLUMN_STAT_AVG_LENGTH: - table_field->read_stats->set_avg_length(stat_field->val_real()); + read_stats->set_avg_length(stat_field->val_real()); break; case COLUMN_STAT_AVG_FREQUENCY: - table_field->read_stats->set_avg_frequency(stat_field->val_real()); + read_stats->set_avg_frequency(stat_field->val_real()); break; case COLUMN_STAT_HIST_SIZE: - table_field->read_stats->histogram.set_size(stat_field->val_int()); - break; + read_stats->histogram.set_size(stat_field->val_int()); + break; case COLUMN_STAT_HIST_TYPE: Histogram_type hist_type= (Histogram_type) (stat_field->val_int() - 1); - table_field->read_stats->histogram.set_type(hist_type); - break; + read_stats->histogram.set_type(hist_type); + break; } } } - } - } - - /** - @brief - Read histogram from of column_stats - - @details - This method first looks for a record in the statistical table column_stats - by its primary key set the record buffer with the help of - Column_stat::set_key_fields. Then, if the row is found, the function reads - the value of the column 'histogram' of the table column_stat and sets - accordingly the corresponding bit in the bitmap read_stat.column_stat_nulls. - The method assumes that the value of histogram size and the pointer to - the histogram location has been already set in the fields size and values - of read_stats->histogram. - */ - - void get_histogram_value() - { - if (find_stat()) - { - char buff[MAX_FIELD_WIDTH]; - String val(buff, sizeof(buff), &my_charset_bin); - uint fldno= COLUMN_STAT_HISTOGRAM; - Field *stat_field= stat_table->field[fldno]; - table_field->read_stats->set_not_null(fldno); - stat_field->val_str(&val); - memcpy(table_field->read_stats->histogram.get_values(), - val.ptr(), table_field->read_stats->histogram.get_size()); + if (want_histograms) + { + char buff[MAX_FIELD_WIDTH]; + String val(buff, sizeof(buff), &my_charset_bin), *result; + uint hist_size; + if ((hist_size= read_stats->histogram.get_size())) + { + uchar *histogram_buf= (uchar *) alloc_root(mem_root, hist_size); + if (!histogram_buf) + return false; /* purecov: inspected */ + read_stats->histogram.set_values(histogram_buf); + read_stats->set_not_null(COLUMN_STAT_HISTOGRAM); + result= stat_table->field[COLUMN_STAT_HISTOGRAM]->val_str(&val); + memcpy(histogram_buf, result->ptr(), hist_size); + } + } } + return res; } - }; @@ -1233,7 +1222,8 @@ class Index_stat: public Stat_table prefix_arity_field= stat_table->field[INDEX_STAT_PREFIX_ARITY]; } - void change_full_table_name(const LEX_CSTRING *db, const LEX_CSTRING *tab) + void change_full_table_name(const LEX_CSTRING *db, + const LEX_CSTRING *tab) override { db_name_field->store(db->str, db->length, system_charset_info); table_name_field->store(tab->str, tab->length, system_charset_info); @@ -1251,7 +1241,7 @@ class Index_stat: public Stat_table for the parameter 'stat'. */ - Index_stat(TABLE *stat, TABLE*tab) :Stat_table(stat, tab) + Index_stat(TABLE *stat, const TABLE *tab) :Stat_table(stat, tab) { common_init_index_stat_table(); } @@ -1355,7 +1345,7 @@ class Index_stat: public Stat_table equal to 0, the value of the column is set to NULL. */ - void store_stat_fields() + void store_stat_fields() override { Field *stat_field= stat_table->field[INDEX_STAT_AVG_FREQUENCY]; double avg_frequency= @@ -1375,29 +1365,29 @@ class Index_stat: public Stat_table Read statistical data from statistical fields of index_stats @details - This implementation of a purely virtual method first looks for a record the - statistical table index_stats by its primary key set the record buffer with - the help of Index_stat::set_key_fields. If the row is found the function - reads the value of the column 'avg_freguency' of the table index_stat and - sets the value of read_stat.avg_frequency[Index_stat::prefix_arity] - from the KEY_INFO structure 'table_key_info' accordingly. If the value of - the column is NULL, read_stat.avg_frequency[Index_stat::prefix_arity] is - set to 0. Otherwise, read_stat.avg_frequency[Index_stat::prefix_arity] is - set to the value of the column. - */ + Find a record in mysql.index_stats that has statistics for the index prefix + of interest (the prefix length is in this->prefix_arity). + We search for record using a PK lookup. The lookup values are in the stat + table's record buffer. + + The result is stored in read_stats->avg_frequency[this->prefix_arity]. + If mysql.index_stats doesn't have the value or has SQL NULL, we store the + value of 0. + */ - void get_stat_values() + bool get_stat_values(Index_statistics *read_stats) { double avg_frequency= 0; - if(find_stat()) + bool res; + if ((res= find_stat())) { Field *stat_field= stat_table->field[INDEX_STAT_AVG_FREQUENCY]; if (!stat_field->is_null()) avg_frequency= stat_field->val_real(); } - table_key_info->read_stats->set_avg_frequency(prefix_arity-1, avg_frequency); - } - + read_stats->set_avg_frequency(prefix_arity-1, avg_frequency); + return res; + } }; @@ -1727,7 +1717,6 @@ class Count_distinct_field: public Sql_alloc { return table_field->collected_stats->histogram.get_values(); } - }; @@ -1787,8 +1776,6 @@ class Index_prefix_calc: public Sql_alloc private: - /* Table containing index specified by index_info */ - TABLE *index_table; /* Info for the index i for whose prefix 'avg_frequency' is calculated */ KEY *index_info; /* The maximum number of the components in the prefixes of interest */ @@ -1825,7 +1812,7 @@ class Index_prefix_calc: public Sql_alloc bool is_partial_fields_present; Index_prefix_calc(THD *thd, TABLE *table, KEY *key_info) - : index_table(table), index_info(key_info), prefixes(0), empty(true), + : index_info(key_info), prefixes(0), empty(true), calc_state(NULL), is_single_comp_pk(false), is_partial_fields_present(false) { uint i; @@ -1859,8 +1846,8 @@ class Index_prefix_calc: public Sql_alloc } if (!(state->last_prefix= - new (thd->mem_root) Cached_item_field(thd, - key_info->key_part[i].field))) + new (thd->mem_root) + Cached_item_field(thd, key_info->key_part[i].field))) break; state->entry_count= state->prefix_count= 0; prefixes++; @@ -1971,12 +1958,12 @@ class Index_prefix_calc: public Sql_alloc */ static -void create_min_max_statistical_fields_for_table(TABLE *table) +void create_min_max_statistical_fields_for_table(THD *thd, TABLE *table) { uint rec_buff_length= table->s->rec_buff_length; if ((table->collected_stats->min_max_record_buffers= - (uchar *) alloc_root(&table->mem_root, 2*rec_buff_length))) + (uchar *) alloc_root(thd->mem_root, 2*rec_buff_length))) { uchar *record= table->collected_stats->min_max_record_buffers; memset(record, 0, 2*rec_buff_length); @@ -1990,7 +1977,7 @@ void create_min_max_statistical_fields_for_table(TABLE *table) my_ptrdiff_t diff= record-table->record[0]; if (!bitmap_is_set(table->read_set, table_field->field_index)) continue; - if (!(fld= table_field->clone(&table->mem_root, table, diff))) + if (!(fld= table_field->clone(thd->mem_root, table, diff))) continue; if (i == 0) table_field->collected_stats->min_value= fld; @@ -2007,22 +1994,20 @@ void create_min_max_statistical_fields_for_table(TABLE *table) Create fields for min/max values to read column statistics @param - thd Thread handler + thd Thread handler @param - table_share Table share the fields are created for + table_share Table share the fields are created for @param - is_safe TRUE <-> at any time only one thread can perform the function + stats_cb TABLE_STATISTICS_CB object whose mem_root is used for allocations @details - The function first allocates record buffers to store min/max values - for 'table_share's fields. Then for each field f it creates Field structures + The function first allocates record buffers to store min/max values for + fields in the table. For each field f it creates Field structures that points to these buffers rather that to the record buffer as the Field object for f does. The pointers of the created fields are placed in the read_stats structure of the Field object for f. - The function allocates the buffers for min/max values in the table share - memory. - If the parameter is_safe is TRUE then it is guaranteed that at any given time - only one thread is executed the code of the function. + The function allocates the buffers for min/max values in the stats_cb + memory. @note The buffers allocated when min/max values are used to collect statistics @@ -2030,14 +2015,14 @@ void create_min_max_statistical_fields_for_table(TABLE *table) are used when statistics on min/max values for column is read as they are allocated in different mem_roots. The same is true for the fields created for min/max values. -*/ +*/ -static -void create_min_max_statistical_fields_for_table_share(THD *thd, - TABLE_SHARE *table_share) +static void +create_min_max_statistical_fields(THD *thd, + const TABLE_SHARE *table_share, + TABLE_STATISTICS_CB *stats_cb) { - TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb; - Table_statistics *stats= stats_cb->table_stats; + Table_statistics *stats= stats_cb->table_stats; if (stats->min_max_record_buffers) return; @@ -2052,7 +2037,10 @@ void create_min_max_statistical_fields_for_table_share(THD *thd, for (uint i=0; i < 2; i++, record+= rec_buff_length) { - for (Field **field_ptr= table_share->field; *field_ptr; field_ptr++) + Column_statistics *column_stats= stats_cb->table_stats->column_stats; + for (Field **field_ptr= table_share->field; + *field_ptr; + field_ptr++, column_stats++) { Field *fld; Field *table_field= *field_ptr; @@ -2060,9 +2048,9 @@ void create_min_max_statistical_fields_for_table_share(THD *thd, if (!(fld= table_field->clone(&stats_cb->mem_root, NULL, diff))) continue; if (i == 0) - table_field->read_stats->min_value= fld; + column_stats->min_value= fld; else - table_field->read_stats->max_value= fld; + column_stats->max_value= fld; } } } @@ -2081,10 +2069,10 @@ void create_min_max_statistical_fields_for_table_share(THD *thd, The function allocates the memory for the statistical data on 'table' with the intention to collect the data there. The memory is allocated for the statistics on the table, on the table's columns, and on the table's - indexes. The memory is allocated in the table's mem_root. + indexes. The memory is allocated in the thd's mem_root. @retval - 0 If the memory for all statistical data has been successfully allocated + 0 If the memory for all statistical data has been successfully allocated @retval 1 Otherwise @@ -2097,56 +2085,40 @@ void create_min_max_statistical_fields_for_table_share(THD *thd, int alloc_statistics_for_table(THD* thd, TABLE *table) { Field **field_ptr; - - DBUG_ENTER("alloc_statistics_for_table"); - - uint columns= 0; - for (field_ptr= table->field; *field_ptr; field_ptr++) - { - if (bitmap_is_set(table->read_set, (*field_ptr)->field_index)) - columns++; - } - - Table_statistics *table_stats= - (Table_statistics *) alloc_root(&table->mem_root, - sizeof(Table_statistics)); - - Column_statistics_collected *column_stats= - (Column_statistics_collected *) alloc_root(&table->mem_root, - sizeof(Column_statistics_collected) * - columns); - + uint fields= bitmap_bits_set(table->read_set); uint keys= table->s->keys; - Index_statistics *index_stats= - (Index_statistics *) alloc_root(&table->mem_root, - sizeof(Index_statistics) * keys); - uint key_parts= table->s->ext_key_parts; - ulonglong *idx_avg_frequency= (ulonglong*) alloc_root(&table->mem_root, - sizeof(ulonglong) * key_parts); - uint hist_size= thd->variables.histogram_size; Histogram_type hist_type= (Histogram_type) (thd->variables.histogram_type); - uchar *histogram= NULL; - if (hist_size > 0) - { - if ((histogram= (uchar *) alloc_root(&table->mem_root, - hist_size * columns))) - bzero(histogram, hist_size * columns); - - } + Table_statistics *table_stats; + Column_statistics_collected *column_stats; + Index_statistics *index_stats; + ulonglong *idx_avg_frequency; + uchar *histogram; + DBUG_ENTER("alloc_statistics_for_table"); - if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency || - (hist_size && !histogram)) + if (!multi_alloc_root(thd->mem_root, + &table_stats, sizeof(*table_stats), + &column_stats, sizeof(*column_stats) * fields, + &index_stats, sizeof(*index_stats) * keys, + &idx_avg_frequency, + sizeof(idx_avg_frequency) * key_parts, + &histogram, hist_size * fields, + NullS)) DBUG_RETURN(1); + if (hist_size > 0) + bzero(histogram, hist_size * fields); + else + histogram= 0; + table->collected_stats= table_stats; table_stats->column_stats= column_stats; table_stats->index_stats= index_stats; table_stats->idx_avg_frequency= idx_avg_frequency; table_stats->histograms= histogram; - memset(column_stats, 0, sizeof(Column_statistics) * columns); + bzero(column_stats, sizeof(Column_statistics) * fields); for (field_ptr= table->field; *field_ptr; field_ptr++) { @@ -2158,6 +2130,8 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) histogram+= hist_size; (*field_ptr)->collected_stats= column_stats++; } + else + (*field_ptr)->collected_stats= 0; } memset(idx_avg_frequency, 0, sizeof(ulonglong) * key_parts); @@ -2172,7 +2146,7 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) idx_avg_frequency+= key_info->ext_key_parts; } - create_min_max_statistical_fields_for_table(table); + create_min_max_statistical_fields_for_table(thd, table); DBUG_RETURN(0); } @@ -2186,6 +2160,8 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) thd Thread handler @param table_share Table share for which the memory for statistical data is allocated + @param + stats_cb TABLE_STATISTICS_CB object for storing the statistical data @note The function allocates the memory for the statistical data on a table in the @@ -2214,87 +2190,49 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) guarantee the correctness of the allocation. */ -static int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share) +static int +alloc_engine_independent_statistics(THD *thd, const TABLE_SHARE *table_share, + TABLE_STATISTICS_CB *stats_cb) { - Field **field_ptr; - KEY *key_info, *end; - TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb; - - DBUG_ENTER("alloc_statistics_for_table_share"); - Table_statistics *table_stats= stats_cb->table_stats; - if (!table_stats) - { - table_stats= (Table_statistics *) alloc_root(&stats_cb->mem_root, - sizeof(Table_statistics)); - if (!table_stats) - DBUG_RETURN(1); - memset(table_stats, 0, sizeof(Table_statistics)); - stats_cb->table_stats= table_stats; - } - uint fields= table_share->fields; - Column_statistics *column_stats= table_stats->column_stats; - if (!column_stats) - { - column_stats= (Column_statistics *) alloc_root(&stats_cb->mem_root, - sizeof(Column_statistics) * - (fields+1)); - if (column_stats) - { - memset(column_stats, 0, sizeof(Column_statistics) * (fields+1)); - table_stats->column_stats= column_stats; - for (field_ptr= table_share->field; - *field_ptr; - field_ptr++, column_stats++) - { - (*field_ptr)->read_stats= column_stats; - (*field_ptr)->read_stats->min_value= NULL; - (*field_ptr)->read_stats->max_value= NULL; - } - create_min_max_statistical_fields_for_table_share(thd, table_share); - } - } - uint keys= table_share->keys; - Index_statistics *index_stats= table_stats->index_stats; - if (!index_stats) - { - index_stats= (Index_statistics *) alloc_root(&stats_cb->mem_root, - sizeof(Index_statistics) * - keys); - if (index_stats) - { - table_stats->index_stats= index_stats; - for (key_info= table_share->key_info, end= key_info + keys; - key_info < end; - key_info++, index_stats++) - { - key_info->read_stats= index_stats; - } - } - } - uint key_parts= table_share->ext_key_parts; - ulonglong *idx_avg_frequency= table_stats->idx_avg_frequency; - if (!idx_avg_frequency) + Index_statistics *index_stats; + ulonglong *idx_avg_frequency; + DBUG_ENTER("alloc_engine_independent_statistics"); + + Column_statistics *column_stats; + if (!multi_alloc_root(&stats_cb->mem_root, + &table_stats, sizeof(Table_statistics), + &column_stats, sizeof(Column_statistics) * fields, + &index_stats, sizeof(Index_statistics) * keys, + &idx_avg_frequency, + sizeof(idx_avg_frequency) * key_parts, + NullS)) + DBUG_RETURN(1); + + /* Zero variables but not the gaps between them */ + bzero(table_stats, sizeof(Table_statistics)); + bzero(column_stats, sizeof(Column_statistics) * fields); + bzero(index_stats, sizeof(Index_statistics) * keys); + bzero(idx_avg_frequency, sizeof(idx_avg_frequency) * key_parts); + + stats_cb->table_stats= table_stats; + table_stats->column_stats= column_stats; + table_stats->index_stats= index_stats; + table_stats->idx_avg_frequency= idx_avg_frequency; + + create_min_max_statistical_fields(thd, table_share, stats_cb); + + for (KEY *key_info= table_share->key_info, *end= key_info + keys; + key_info < end; + key_info++, index_stats++) { - idx_avg_frequency= (ulonglong*) alloc_root(&stats_cb->mem_root, - sizeof(ulonglong) * key_parts); - if (idx_avg_frequency) - { - memset(idx_avg_frequency, 0, sizeof(ulonglong) * key_parts); - table_stats->idx_avg_frequency= idx_avg_frequency; - for (key_info= table_share->key_info, end= key_info + keys; - key_info < end; - key_info++) - { - key_info->read_stats->init_avg_frequency(idx_avg_frequency); - idx_avg_frequency+= key_info->ext_key_parts; - } - } + index_stats->init_avg_frequency(idx_avg_frequency); + idx_avg_frequency+= key_info->ext_key_parts; } - DBUG_RETURN(column_stats && index_stats && idx_avg_frequency ? 0 : 1); + DBUG_RETURN(0); } @@ -2516,7 +2454,6 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index) { int rc= 0; KEY *key_info= &table->key_info[index]; - DBUG_ENTER("collect_statistics_for_index"); /* No statistics for FULLTEXT indexes. */ @@ -2626,7 +2563,6 @@ int collect_statistics_for_table(THD *thd, TABLE *table) handler *file=table->file; double sample_fraction= thd->variables.sample_percentage / 100; const ha_rows MIN_THRESHOLD_FOR_SAMPLING= 50000; - DBUG_ENTER("collect_statistics_for_table"); table->collected_stats->cardinality_is_null= TRUE; @@ -2791,6 +2727,12 @@ int update_statistics_for_table(THD *thd, TABLE *table) if (open_stat_tables(thd, tables, TRUE)) DBUG_RETURN(rc); + /* + Ensure that no one is reading satistics while we are writing them + This ensures that statistics is always read consistently + */ + mysql_mutex_lock(&table->s->LOCK_statistics); + save_binlog_format= thd->set_current_stmt_binlog_format_stmt(); /* Update the statistical table table_stats */ @@ -2842,6 +2784,7 @@ int update_statistics_for_table(THD *thd, TABLE *table) rc= 1; new_trans.restore_old_transaction(); + mysql_mutex_unlock(&table->s->LOCK_statistics); DBUG_RETURN(rc); } @@ -2851,11 +2794,14 @@ int update_statistics_for_table(THD *thd, TABLE *table) Read statistics for a table from the persistent statistical tables @param - thd The thread handle + thd The thread handle + @param + table The table to read statistics on. @param - table The table to read statistics on + stat_tables The array of TABLE_LIST objects for statistical tables @param - stat_tables The array of TABLE_LIST objects for statistical tables + force_reload Flag to require reloading the statistics from the tables + even if it has been already loaded @details For each statistical table the function looks for the rows from this @@ -2869,9 +2815,9 @@ int update_statistics_for_table(THD *thd, TABLE *table) The function is called by read_statistics_for_tables_if_needed(). @retval - 0 If data has been successfully read for the table + pointer to object If data has been successfully read for the table @retval - 1 Otherwise + 0 Otherwise @note Objects of the helper classes Table_stat, Column_stat and Index_stat @@ -2880,99 +2826,134 @@ int update_statistics_for_table(THD *thd, TABLE *table) */ static -int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables) +TABLE_STATISTICS_CB* +read_statistics_for_table(THD *thd, TABLE *table, + TABLE_LIST *stat_tables, bool force_reload, + bool want_histograms) { + bool found; uint i; TABLE *stat_table; Field *table_field; Field **field_ptr; KEY *key_info, *key_info_end; TABLE_SHARE *table_share= table->s; - DBUG_ENTER("read_statistics_for_table"); - DEBUG_SYNC(thd, "statistics_mem_alloc_start1"); - DEBUG_SYNC(thd, "statistics_mem_alloc_start2"); - if (!table_share->stats_cb.start_stats_load()) - DBUG_RETURN(table_share->stats_cb.stats_are_ready() ? 0 : 1); + if (!force_reload && table_share->stats_cb) + { + if (table->stats_cb == table_share->stats_cb) + DBUG_RETURN(table->stats_cb); // Use current + table->update_engine_independent_stats(); // Copy table_share->stats_cb + DBUG_RETURN(table->stats_cb); + } + + /* + Read data into a new TABLE_STATISTICS_CB object and replace + TABLE_SHARE::stats_cb with this new one once the reading is finished + */ + TABLE_STATISTICS_CB *new_stats_cb; + if (!(new_stats_cb= new TABLE_STATISTICS_CB)) + DBUG_RETURN(0); /* purecov: inspected */ - if (alloc_statistics_for_table_share(thd, table_share)) + if (alloc_engine_independent_statistics(thd, table_share, new_stats_cb)) { - table_share->stats_cb.abort_stats_load(); - DBUG_RETURN(1); + /* purecov: begin inspected */ + delete new_stats_cb; + DBUG_RETURN(0); + /* purecov: end */ } /* Don't write warnings for internal field conversions */ Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); /* Read statistics from the statistical table table_stats */ - Table_statistics *read_stats= table_share->stats_cb.table_stats; + Table_statistics *read_stats= new_stats_cb->table_stats; stat_table= stat_tables[TABLE_STAT].table; Table_stat table_stat(stat_table, table); table_stat.set_key_fields(); - table_stat.get_stat_values(); - + if (table_stat.get_stat_values(new_stats_cb->table_stats)) + new_stats_cb->stats_available|= TABLE_STAT_TABLE; + /* Read statistics from the statistical table column_stats */ stat_table= stat_tables[COLUMN_STAT].table; ulong total_hist_size= 0; Column_stat column_stat(stat_table, table); - for (field_ptr= table_share->field; *field_ptr; field_ptr++) + Column_statistics *column_statistics= new_stats_cb->table_stats->column_stats; + found= 0; + for (field_ptr= table_share->field; + *field_ptr; + field_ptr++, column_statistics++) { table_field= *field_ptr; column_stat.set_key_fields(table_field); - column_stat.get_stat_values(); - total_hist_size+= table_field->read_stats->histogram.get_size(); + found|= column_stat.get_stat_values(column_statistics, + &new_stats_cb->mem_root, + want_histograms); + total_hist_size+= column_statistics->histogram.get_size(); + } + if (found) + { + new_stats_cb->stats_available|= TABLE_STAT_COLUMN; + if (total_hist_size && want_histograms) + new_stats_cb->stats_available|= TABLE_STAT_HISTOGRAM; } - table_share->stats_cb.total_hist_size= total_hist_size; + + new_stats_cb->total_hist_size= total_hist_size; /* Read statistics from the statistical table index_stats */ stat_table= stat_tables[INDEX_STAT].table; Index_stat index_stat(stat_table, table); + Index_statistics *index_statistics= new_stats_cb->table_stats->index_stats; for (key_info= table_share->key_info, key_info_end= key_info + table_share->keys; - key_info < key_info_end; key_info++) + key_info < key_info_end; key_info++, index_statistics++) { uint key_parts= key_info->ext_key_parts; + found= 0; for (i= 0; i < key_parts; i++) { index_stat.set_key_fields(key_info, i+1); - index_stat.get_stat_values(); + found|= index_stat.get_stat_values(index_statistics); } - + if (found) + new_stats_cb->stats_available|= TABLE_STAT_INDEX; + key_part_map ext_key_part_map= key_info->ext_key_part_map; if (key_info->user_defined_key_parts != key_info->ext_key_parts && - key_info->read_stats->get_avg_frequency(key_info->user_defined_key_parts) == 0) + index_statistics->get_avg_frequency(key_info->user_defined_key_parts) == 0) { KEY *pk_key_info= table_share->key_info + table_share->primary_key; uint k= key_info->user_defined_key_parts; uint pk_parts= pk_key_info->user_defined_key_parts; ha_rows n_rows= read_stats->cardinality; - double k_dist= n_rows / key_info->read_stats->get_avg_frequency(k-1); + double k_dist= n_rows / index_statistics->get_avg_frequency(k-1); uint m= 0; + Index_statistics *pk_read_stats= (new_stats_cb->table_stats->index_stats + + table_share->primary_key); for (uint j= 0; j < pk_parts; j++) { if (!(ext_key_part_map & 1 << j)) { for (uint l= k; l < k + m; l++) { - double avg_frequency= - pk_key_info->read_stats->get_avg_frequency(j-1); + double avg_frequency= pk_read_stats->get_avg_frequency(j-1); set_if_smaller(avg_frequency, 1); - double val= pk_key_info->read_stats->get_avg_frequency(j) / - avg_frequency; - key_info->read_stats->set_avg_frequency (l, val); + double val= (pk_read_stats->get_avg_frequency(j) / + avg_frequency); + index_statistics->set_avg_frequency (l, val); } } else { - double avg_frequency= pk_key_info->read_stats->get_avg_frequency(j); - key_info->read_stats->set_avg_frequency(k + m, avg_frequency); + double avg_frequency= pk_read_stats->get_avg_frequency(j); + index_statistics->set_avg_frequency(k + m, avg_frequency); m++; } } for (uint l= k; l < k + m; l++) { - double avg_frequency= key_info->read_stats->get_avg_frequency(l); + double avg_frequency= index_statistics->get_avg_frequency(l); if (avg_frequency == 0 || read_stats->cardinality_is_null) avg_frequency= 1; else if (avg_frequency > 1) @@ -2980,116 +2961,14 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables) avg_frequency/= k_dist; set_if_bigger(avg_frequency, 1); } - key_info->read_stats->set_avg_frequency(l, avg_frequency); + index_statistics->set_avg_frequency(l, avg_frequency); } } } - - table_share->stats_cb.end_stats_load(); - DBUG_RETURN(0); -} - - -/** - @breif - Cleanup of min/max statistical values for table share -*/ - -void delete_stat_values_for_table_share(TABLE_SHARE *table_share) -{ - TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb; - Table_statistics *table_stats= stats_cb->table_stats; - if (!table_stats) - return; - Column_statistics *column_stats= table_stats->column_stats; - if (!column_stats) - return; - - for (Field **field_ptr= table_share->field; - *field_ptr; - field_ptr++, column_stats++) - { - if (column_stats->min_value) - { - delete column_stats->min_value; - column_stats->min_value= NULL; - } - if (column_stats->max_value) - { - delete column_stats->max_value; - column_stats->max_value= NULL; - } - } + DBUG_RETURN(new_stats_cb); } -/** - @brief - Read histogram for a table from the persistent statistical tables - - @param - thd The thread handle - @param - table The table to read histograms for - @param - stat_tables The array of TABLE_LIST objects for statistical tables - - @details - For the statistical table columns_stats the function looks for the rows - from this table that contain statistical data on 'table'. If such rows - are found the histograms from them are read into the memory allocated - for histograms of 'table'. Later at the query processing these histogram - are supposed to be used by the optimizer. - The parameter stat_tables should point to an array of TABLE_LIST - objects for all statistical tables linked into a list. All statistical - tables are supposed to be opened. - The function is called by read_statistics_for_tables_if_needed(). - - @retval - 0 If data has been successfully read for the table - @retval - 1 Otherwise - - @note - Objects of the helper Column_stat are employed read histogram - from the statistical table column_stats now. -*/ - -static -int read_histograms_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables) -{ - TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb; - DBUG_ENTER("read_histograms_for_table"); - - if (stats_cb->start_histograms_load()) - { - uchar *histogram= (uchar *) alloc_root(&stats_cb->mem_root, - stats_cb->total_hist_size); - if (!histogram) - { - stats_cb->abort_histograms_load(); - DBUG_RETURN(1); - } - memset(histogram, 0, stats_cb->total_hist_size); - - Column_stat column_stat(stat_tables[COLUMN_STAT].table, table); - for (Field **field_ptr= table->s->field; *field_ptr; field_ptr++) - { - Field *table_field= *field_ptr; - if (uint hist_size= table_field->read_stats->histogram.get_size()) - { - column_stat.set_key_fields(table_field); - table_field->read_stats->histogram.set_values(histogram); - column_stat.get_histogram_value(); - histogram+= hist_size; - } - } - stats_cb->end_histograms_load(); - } - table->histograms_are_read= true; - DBUG_RETURN(0); -} - /** @brief Read statistics for tables from a table list if it is needed @@ -3127,65 +3006,97 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables) case SQLCOM_CREATE_TABLE: case SQLCOM_SET_OPTION: case SQLCOM_DO: - return read_statistics_for_tables(thd, tables); + return read_statistics_for_tables(thd, tables, 0); default: return 0; } } -static void dump_stats_from_share_to_table(TABLE *table) +/* + Update TABLE field and key objects with pointers to + the current statistical data in table->stats_cb +*/ + + +void TABLE_STATISTICS_CB::update_stats_in_table(TABLE *table) { - TABLE_SHARE *table_share= table->s; - KEY *key_info= table_share->key_info; - KEY *key_info_end= key_info + table_share->keys; - KEY *table_key_info= table->key_info; - for ( ; key_info < key_info_end; key_info++, table_key_info++) - table_key_info->read_stats= key_info->read_stats; - - Field **field_ptr= table_share->field; - Field **table_field_ptr= table->field; - for ( ; *field_ptr; field_ptr++, table_field_ptr++) - (*table_field_ptr)->read_stats= (*field_ptr)->read_stats; + DBUG_ASSERT(table->stats_cb == this); + + /* + Table_statistics doesn't need to be updated: set_statistics_for_table() + sets TABLE::used_stat_records from table->stats_cb.table_stats.cardinality + */ + + KEY *key_info= table->key_info; + KEY *key_info_end= key_info + table->s->keys; + Index_statistics *index_stats= table_stats->index_stats; + + for ( ; key_info < key_info_end; key_info++, index_stats++) + key_info->read_stats= index_stats; + + Field **field_ptr= table->field; + Column_statistics *column_stats= table_stats->column_stats; + + for ( ; *field_ptr; field_ptr++, column_stats++) + (*field_ptr)->read_stats= column_stats; + /* Mark that stats are now usable */ table->stats_is_read= true; } -int read_statistics_for_tables(THD *thd, TABLE_LIST *tables) +int +read_statistics_for_tables(THD *thd, TABLE_LIST *tables, bool force_reload) { TABLE_LIST stat_tables[STATISTICS_TABLES]; - + bool found_stat_table= false; + bool statistics_for_tables_is_needed= false; + bool want_histograms= thd->variables.optimizer_use_condition_selectivity > 3; DBUG_ENTER("read_statistics_for_tables"); - if (thd->bootstrap || thd->variables.use_stat_tables == NEVER) + if (thd->bootstrap || thd->variables.use_stat_tables == NEVER || !tables) DBUG_RETURN(0); - bool found_stat_table= false; - bool statistics_for_tables_is_needed= false; - for (TABLE_LIST *tl= tables; tl; tl= tl->next_global) { + TABLE *table= tl->table; TABLE_SHARE *table_share; - if (!tl->is_view_or_derived() && tl->table && (table_share= tl->table->s) && - table_share->tmp_table == NO_TMP_TABLE) + + /* Skip tables that can't have statistics. */ + if (tl->is_view_or_derived() || !table || !(table_share= table->s)) + continue; + /* Skip temporary tables */ + if (table_share->tmp_table != NO_TMP_TABLE) + continue; + + if (table_share->table_category == TABLE_CATEGORY_USER) { - if (table_share->table_category == TABLE_CATEGORY_USER) + /* Force reloading means we always read all stats tables. */ + if (force_reload || !table_share->stats_cb) + { + statistics_for_tables_is_needed= true; + continue; + } + + /* Stats versions don't match, take a reference under a mutex. */ + if (table->stats_cb != table_share->stats_cb) + { + table->update_engine_independent_stats(); + table->stats_cb->update_stats_in_table(table); + } + /* + We need to read histograms if they exist but have not yet been + loaded into memory. + */ + if (want_histograms && + table->stats_cb->histograms_exists() && + !(table->stats_cb->stats_available & TABLE_STAT_HISTOGRAM)) { - if (table_share->stats_cb.stats_are_ready()) - { - if (!tl->table->stats_is_read) - dump_stats_from_share_to_table(tl->table); - tl->table->histograms_are_read= - table_share->stats_cb.histograms_are_ready(); - if (table_share->stats_cb.histograms_are_ready() || - thd->variables.optimizer_use_condition_selectivity <= 3) - continue; - } statistics_for_tables_is_needed= true; } - else if (is_stat_table(&tl->db, &tl->alias)) - found_stat_table= true; } + else if (is_stat_table(&tl->db, &tl->alias)) + found_stat_table= true; } DEBUG_SYNC(thd, "statistics_read_start"); @@ -3205,20 +3116,50 @@ int read_statistics_for_tables(THD *thd, TABLE_LIST *tables) for (TABLE_LIST *tl= tables; tl; tl= tl->next_global) { + TABLE *table= tl->table; TABLE_SHARE *table_share; - if (!tl->is_view_or_derived() && tl->table && (table_share= tl->table->s) && - table_share->tmp_table == NO_TMP_TABLE && - table_share->table_category == TABLE_CATEGORY_USER) + + /* Skip tables that can't have statistics. */ + if (tl->is_view_or_derived() || !table || !(table_share= table->s) || + table_share->tmp_table != NO_TMP_TABLE || + table_share->table_category != TABLE_CATEGORY_USER) + continue; + + if (force_reload || !table_share->stats_cb || + table->stats_cb != table_share->stats_cb || + (want_histograms && table->stats_cb->histograms_exists() && + !(table->stats_cb->stats_available & TABLE_STAT_HISTOGRAM))) { - if (!tl->table->stats_is_read) + TABLE_STATISTICS_CB *stats_cb; + DEBUG_SYNC(thd, "read_statistics_for_table_start1"); + DEBUG_SYNC(thd, "read_statistics_for_table_start2"); + + /* + The following lock is here to ensure that if a lot of threads are + accessing the table at the same time after a ANALYZE TABLE, + only one thread is loading the data from the the stats tables + and the others threads are reusing the loaded data. + */ + mysql_mutex_lock(&table_share->LOCK_statistics); + if (!(stats_cb= read_statistics_for_table(thd, table, stat_tables, + force_reload, want_histograms))) { - if (!read_statistics_for_table(thd, tl->table, stat_tables)) - dump_stats_from_share_to_table(tl->table); - else - continue; + /* purecov: begin inspected */ + mysql_mutex_unlock(&table_share->LOCK_statistics); + continue; + /* purecov: end */ } - if (thd->variables.optimizer_use_condition_selectivity > 3) - (void) read_histograms_for_table(thd, tl->table, stat_tables); + + if (stats_cb->unused()) + { + /* New object created, update share to use it */ + table_share->update_engine_independent_stats(stats_cb); + table->update_engine_independent_stats(); + } + mysql_mutex_unlock(&table_share->LOCK_statistics); + table->stats_cb->update_stats_in_table(table); + table->stats_is_read= (stats_cb->stats_available != + TABLE_STAT_NO_STATS); } } @@ -3377,7 +3318,8 @@ int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col) @param thd The thread handle @param tab The table the index belongs to - @param key_info The descriptor of the index whose statistics is to be deleted + @param key_info The descriptor of the index whose statistics is to be + deleted @param ext_prefixes_only Delete statistics only on the index prefixes extended by the components of the primary key @@ -3385,7 +3327,8 @@ int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col) The function delete statistics on the index specified by 'key_info' defined on the table 'tab' from the statistical table index_stats. - @retval 0 If all deletions are successful or we couldn't open statistics table + @retval 0 If all deletions are successful or we couldn't open statistics + table @retval 1 Otherwise @note @@ -3625,11 +3568,12 @@ int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col, void set_statistics_for_table(THD *thd, TABLE *table) { - TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb; - Table_statistics *read_stats= stats_cb->table_stats; + TABLE_STATISTICS_CB *stats_cb= table->stats_cb; + + Table_statistics *read_stats= stats_cb ? stats_cb->table_stats : 0; table->used_stat_records= (!check_eits_preferred(thd) || - !table->stats_is_read || read_stats->cardinality_is_null) ? + !table->stats_is_read || !read_stats || read_stats->cardinality_is_null) ? table->file->stats.records : read_stats->cardinality; /* diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index 35b3aa33accf5..f5ccb26af7be2 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -116,9 +116,9 @@ bool check_eits_preferred(THD *thd) } int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables); -int read_statistics_for_tables(THD *thd, TABLE_LIST *tables); +int read_statistics_for_tables(THD *thd, TABLE_LIST *tables, + bool force_reload); int collect_statistics_for_table(THD *thd, TABLE *table); -void delete_stat_values_for_table_share(TABLE_SHARE *table_share); int alloc_statistics_for_table(THD *thd, TABLE *table); int update_statistics_for_table(THD *thd, TABLE *table); int delete_statistics_for_table(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *tab); @@ -308,7 +308,7 @@ class Table_statistics /* Array of records per key for index prefixes */ ulonglong *idx_avg_frequency; - uchar *histograms; /* Sequence of histograms */ + uchar *histograms; /* Sequence of histograms */ }; diff --git a/sql/table.cc b/sql/table.cc index 2a6b758a0272a..4badef7d5a19f 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -373,14 +373,13 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name, table_alias_charset->strnncoll(key, 6, "mysql", 6) == 0) share->not_usable_by_query_cache= 1; - init_sql_alloc(PSI_INSTRUMENT_ME, &share->stats_cb.mem_root, - TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0)); - memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root)); mysql_mutex_init(key_TABLE_SHARE_LOCK_share, &share->LOCK_share, MY_MUTEX_INIT_SLOW); mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data, &share->LOCK_ha_data, MY_MUTEX_INIT_FAST); + mysql_mutex_init(key_TABLE_SHARE_LOCK_statistics, + &share->LOCK_statistics, MY_MUTEX_INIT_SLOW); DBUG_EXECUTE_IF("simulate_big_table_id", if (last_table_id < UINT_MAX32) @@ -481,15 +480,19 @@ void TABLE_SHARE::destroy() ha_share= NULL; // Safety } - delete_stat_values_for_table_share(this); + if (stats_cb) + { + stats_cb->usage_count--; + delete stats_cb; + } delete sequence; - free_root(&stats_cb.mem_root, MYF(0)); /* The mutexes are initialized only for shares that are part of the TDC */ if (tmp_table == NO_TMP_TABLE) { mysql_mutex_destroy(&LOCK_share); mysql_mutex_destroy(&LOCK_ha_data); + mysql_mutex_destroy(&LOCK_statistics); } my_hash_free(&name_hash); @@ -4527,6 +4530,72 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, } +/** + Free engine stats + + This is only called from closefrm() when the TABLE object is destroyed +**/ + +void TABLE::free_engine_stats() +{ + bool free_stats= 0; + TABLE_STATISTICS_CB *stats= stats_cb; + mysql_mutex_lock(&s->LOCK_share); + free_stats= --stats->usage_count == 0; + mysql_mutex_unlock(&s->LOCK_share); + if (free_stats) + delete stats; +} + + +/* + Use engine stats from table_share if table_share has been updated +*/ + +void TABLE::update_engine_independent_stats() +{ + bool free_stats= 0; + TABLE_STATISTICS_CB *org_stats= stats_cb; + DBUG_ASSERT(stats_cb != s->stats_cb); + + if (stats_cb != s->stats_cb) + { + mysql_mutex_lock(&s->LOCK_share); + if (org_stats) + free_stats= --org_stats->usage_count == 0; + if ((stats_cb= s->stats_cb)) + stats_cb->usage_count++; + mysql_mutex_unlock(&s->LOCK_share); + if (free_stats) + delete org_stats; + } +} + + +/* + Update engine stats in table share to use new stats +*/ + +void +TABLE_SHARE::update_engine_independent_stats(TABLE_STATISTICS_CB *new_stats) +{ + TABLE_STATISTICS_CB *free_stats= 0; + DBUG_ASSERT(new_stats->usage_count == 0); + + mysql_mutex_lock(&LOCK_share); + if (stats_cb) + { + if (!--stats_cb->usage_count) + free_stats= stats_cb; + } + stats_cb= new_stats; + new_stats->usage_count++; + mysql_mutex_unlock(&LOCK_share); + if (free_stats) + delete free_stats; +} + + /* Free information allocated by openfrm @@ -4565,6 +4634,12 @@ int closefrm(TABLE *table) table->part_info= 0; } #endif + if (table->stats_cb) + { + DBUG_ASSERT(table->s->tmp_table == NO_TMP_TABLE); + table->free_engine_stats(); + } + free_root(&table->mem_root, MYF(0)); DBUG_RETURN(error); } diff --git a/sql/table.h b/sql/table.h index af09b03c9e0e2..382692cb7cade 100644 --- a/sql/table.h +++ b/sql/table.h @@ -624,94 +624,55 @@ enum open_frm_error { from persistent statistical tables */ -class TABLE_STATISTICS_CB -{ - class Statistics_state - { - enum state_codes - { - EMPTY, /** data is not loaded */ - LOADING, /** data is being loaded in some connection */ - READY /** data is loaded and available for use */ - }; - int32 state; - - public: - /** No state copy */ - Statistics_state &operator=(const Statistics_state &) { return *this; } - - /** Checks if data loading have been completed */ - bool is_ready() const - { - return my_atomic_load32_explicit(const_cast(&state), - MY_MEMORY_ORDER_ACQUIRE) == READY; - } - /** - Sets mutual exclusion for data loading +#define TABLE_STAT_NO_STATS 0 +#define TABLE_STAT_TABLE 1 +#define TABLE_STAT_COLUMN 2 +#define TABLE_STAT_INDEX 4 +#define TABLE_STAT_HISTOGRAM 8 - If stats are in LOADING state, waits until state change. +/* + EITS statistics information for a table. - @return - @retval true atomic EMPTY -> LOADING transfer completed, ok to load - @retval false stats are in READY state, no need to load - */ - bool start_load() - { - for (;;) - { - int32 expected= EMPTY; - if (my_atomic_cas32_weak_explicit(&state, &expected, LOADING, - MY_MEMORY_ORDER_RELAXED, - MY_MEMORY_ORDER_RELAXED)) - return true; - if (expected == READY) - return false; - (void) LF_BACKOFF(); - } - } + This data is loaded from mysql.{table|index|column}_stats tables and + then most of the time is owned by table's TABLE_SHARE object. - /** Marks data available for subsequent use */ - void end_load() - { - DBUG_ASSERT(my_atomic_load32_explicit(&state, MY_MEMORY_ORDER_RELAXED) == - LOADING); - my_atomic_store32_explicit(&state, READY, MY_MEMORY_ORDER_RELEASE); - } + Individual TABLE objects also have pointer to this object, and we do + reference counting to know when to free it. See + TABLE::update_engine_stats(), TABLE::free_engine_stats(), + TABLE_SHARE::update_engine_stats(), TABLE_SHARE::destroy(). + These implement a "shared pointer"-like functionality. - /** Restores empty state on error (e.g. OOM) */ - void abort_load() - { - DBUG_ASSERT(my_atomic_load32_explicit(&state, MY_MEMORY_ORDER_RELAXED) == - LOADING); - my_atomic_store32_explicit(&state, EMPTY, MY_MEMORY_ORDER_RELAXED); - } - }; + When new statistics is loaded, we create new TABLE_STATISTICS_CB and make + the TABLE_SHARE point to it. Some TABLE object may still be using older + TABLE_STATISTICS_CB objects. Reference counting allows to free + TABLE_STATISTICS_CB when it is no longer used. +*/ - class Statistics_state stats_state; - class Statistics_state hist_state; +class TABLE_STATISTICS_CB +{ + uint usage_count; // Instances of this stat public: + TABLE_STATISTICS_CB(); + ~TABLE_STATISTICS_CB(); MEM_ROOT mem_root; /* MEM_ROOT to allocate statistical data for the table */ Table_statistics *table_stats; /* Structure to access the statistical data */ ulong total_hist_size; /* Total size of all histograms */ + uint stats_available; - bool histograms_are_ready() const + bool histograms_exists() const { - return !total_hist_size || hist_state.is_ready(); + return total_hist_size != 0; } - - bool start_histograms_load() + bool unused() { - return total_hist_size && hist_state.start_load(); + return usage_count == 0; } - - void end_histograms_load() { hist_state.end_load(); } - void abort_histograms_load() { hist_state.abort_load(); } - bool stats_are_ready() const { return stats_state.is_ready(); } - bool start_stats_load() { return stats_state.start_load(); } - void end_stats_load() { stats_state.end_load(); } - void abort_stats_load() { stats_state.abort_load(); } + /* Copy (latest) state from TABLE_SHARE to TABLE */ + void update_stats_in_table(TABLE *table); + friend struct TABLE; + friend struct TABLE_SHARE; }; /** @@ -734,6 +695,7 @@ struct TABLE_SHARE TYPELIB *intervals; /* pointer to interval info */ mysql_mutex_t LOCK_ha_data; /* To protect access to ha_data */ mysql_mutex_t LOCK_share; /* To protect TABLE_SHARE */ + mysql_mutex_t LOCK_statistics; /* To protect against concurrent load */ TDC_element *tdc; @@ -750,7 +712,17 @@ struct TABLE_SHARE uint *blob_field; /* Index to blobs in Field arrray*/ LEX_CUSTRING vcol_defs; /* definitions of generated columns */ - TABLE_STATISTICS_CB stats_cb; + /* + EITS statistics data from the last time the table was opened or ANALYZE + table was run. + This is typically same as any related TABLE::stats_cb until ANALYZE + table is run. + This pointer is only to be de-referenced under LOCK_share as the + pointer can change by another thread running ANALYZE TABLE. + Without using a LOCK_share one can check if the statistics has been + updated by checking if TABLE::stats_cb != TABLE_SHARE::stats_cb. + */ + TABLE_STATISTICS_CB *stats_cb; uchar *default_values; /* row with default values */ LEX_CSTRING comment; /* Comment about table */ @@ -1174,7 +1146,6 @@ struct TABLE_SHARE void set_overlapped_keys(); void set_ignored_indexes(); key_map usable_indexes(THD *thd); - bool old_long_hash_function() const { return mysql_version < 100428 || @@ -1189,6 +1160,7 @@ struct TABLE_SHARE Item_func_hash *make_long_hash_func(THD *thd, MEM_ROOT *mem_root, List *field_list) const; + void update_engine_independent_stats(TABLE_STATISTICS_CB *stat); }; /* not NULL, but cannot be dereferenced */ @@ -1577,6 +1549,7 @@ struct TABLE and can be useful for range optimizer. */ Item *notnull_cond; + TABLE_STATISTICS_CB *stats_cb; inline void reset() { bzero((void*)this, sizeof(*this)); } void init(THD *thd, TABLE_LIST *tl); @@ -1606,6 +1579,8 @@ struct TABLE void mark_columns_used_by_virtual_fields(void); void mark_check_constraint_columns_for_read(void); int verify_constraints(bool ignore_failure); + void free_engine_stats(); + void update_engine_independent_stats(); inline void column_bitmaps_set(MY_BITMAP *read_set_arg) { read_set= read_set_arg; diff --git a/storage/maria/ma_locking.c b/storage/maria/ma_locking.c index c8e6394179c12..9084be1d29df1 100644 --- a/storage/maria/ma_locking.c +++ b/storage/maria/ma_locking.c @@ -395,7 +395,15 @@ int _ma_mark_file_changed(register MARIA_SHARE *share) if (!share->base.born_transactional) { if (!_MA_ALREADY_MARKED_FILE_CHANGED) - return _ma_mark_file_changed_now(share); + { + int res= _ma_mark_file_changed_now(share); + /* + Ensure that STATE_NOT_ANALYZED is reset on table changes + */ + share->state.changed|= (STATE_CHANGED | STATE_NOT_ANALYZED | + STATE_NOT_OPTIMIZED_KEYS); + return res; + } } else { @@ -409,10 +417,10 @@ int _ma_mark_file_changed(register MARIA_SHARE *share) (STATE_CHANGED | STATE_NOT_ANALYZED | STATE_NOT_OPTIMIZED_KEYS))) { - mysql_mutex_lock(&share->intern_lock); + mysql_mutex_lock(&share->intern_lock); share->state.changed|=(STATE_CHANGED | STATE_NOT_ANALYZED | - STATE_NOT_OPTIMIZED_KEYS); - mysql_mutex_unlock(&share->intern_lock); + STATE_NOT_OPTIMIZED_KEYS); + mysql_mutex_unlock(&share->intern_lock); } } return 0; @@ -430,7 +438,7 @@ int _ma_mark_file_changed_now(register MARIA_SHARE *share) if (! _MA_ALREADY_MARKED_FILE_CHANGED) { share->state.changed|=(STATE_CHANGED | STATE_NOT_ANALYZED | - STATE_NOT_OPTIMIZED_KEYS); + STATE_NOT_OPTIMIZED_KEYS); if (!share->global_changed) { share->changed= share->global_changed= 1; diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index 33a1c86c0d72f..cee1c326b3ef7 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -603,12 +603,15 @@ int _mi_mark_file_changed(MI_INFO *info) { uchar buff[3]; register MYISAM_SHARE *share=info->s; + uint32 state; DBUG_ENTER("_mi_mark_file_changed"); - if (!(share->state.changed & STATE_CHANGED) || ! share->global_changed) + state= share->state.changed; + share->state.changed|= (STATE_CHANGED | STATE_NOT_ANALYZED | + STATE_NOT_OPTIMIZED_KEYS); + + if (!(state & STATE_CHANGED) || ! share->global_changed) { - share->state.changed|=(STATE_CHANGED | STATE_NOT_ANALYZED | - STATE_NOT_OPTIMIZED_KEYS); if (!share->global_changed) { share->global_changed=1; From be5fd3ec3587668d7c1730949a1b6f3504b137d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 21 Aug 2023 13:28:12 +0300 Subject: [PATCH 006/165] Remove a stale comment buf_LRU_block_remove_hashed(): Remove a comment that had been added in mysql/mysql-server@aad1c7d0dd8a152ef6bb685356c68ad9978d686a and apparently referring to buf_LRU_invalidate_tablespace(), which was later replaced with buf_LRU_flush_or_remove_pages() and ultimately with buf_flush_remove_pages() and buf_flush_list_space(). All that code is covered by buf_pool.mutex. The note about releasing the hash_lock for the buf_pool.page_hash slice would actually apply to the last reference to hash_lock in buf_LRU_free_page(), for the case zip=false (retaining a ROW_FORMAT=COMPRESSED page while discarding the uncompressed one). --- storage/innobase/buf/buf0lru.cc | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 19a0b5a3eb52c..0e9f834f12723 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1186,25 +1186,6 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, return true; } - /* Question: If we release hash_lock here - then what protects us against: - 1) Some other thread buffer fixing this page - 2) Some other thread trying to read this page and - not finding it in buffer pool attempting to read it - from the disk. - Answer: - 1) Cannot happen because the page is no longer in the - page_hash. Only possibility is when while invalidating - a tablespace we buffer fix the prev_page in LRU to - avoid relocation during the scan. But that is not - possible because we are holding buf_pool mutex. - - 2) Not possible because in buf_page_init_for_read() - we do a look up of page_hash while holding buf_pool - mutex and since we are holding buf_pool mutex here - and by the time we'll release it in the caller we'd - have inserted the compressed only descriptor in the - page_hash. */ hash_lock->write_unlock(); if (bpage->zip.data) { From c062b351f0133998639199713fc658f6e43e6821 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 21 Aug 2023 13:00:34 +0200 Subject: [PATCH 007/165] Make vgdb call more universal. --- mysql-test/lib/My/Debugger.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/lib/My/Debugger.pm b/mysql-test/lib/My/Debugger.pm index b1a287d660de1..3dcbf1dd73576 100644 --- a/mysql-test/lib/My/Debugger.pm +++ b/mysql-test/lib/My/Debugger.pm @@ -91,7 +91,7 @@ py import subprocess,shlex,time valg=subprocess.Popen(shlex.split("""valgrind --tool=memcheck --show-reachable=yes --leak-check=yes --num-callers=16 --quiet --suppressions=valgrind.supp --vgdb-error=0 {exe} {args} --loose-wait-for-pos-timeout=1500""")) time.sleep(2) -gdb.execute("target remote | /usr/lib64/valgrind/../../bin/vgdb --pid=" + str(valg.pid)) +gdb.execute("target remote | vgdb --pid=" + str(valg.pid)) EEE pre => sub { my $debug_libraries_path= "/usr/lib/debug"; From 6cc88c3db155eedcd2b76651e9e37d227ff6003e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 21 Aug 2023 15:51:10 +0300 Subject: [PATCH 008/165] Clean up buf0buf.inl Let us move some #include directives from buf0buf.inl to the compilation units where they are really used. --- storage/innobase/btr/btr0cur.cc | 1 + storage/innobase/buf/buf0buf.cc | 7 +++++-- storage/innobase/buf/buf0dblwr.cc | 2 +- storage/innobase/buf/buf0dump.cc | 2 +- storage/innobase/fil/fil0crypt.cc | 2 +- storage/innobase/include/buf0buf.inl | 5 ----- storage/innobase/include/dict0mem.h | 1 + storage/innobase/row/row0import.cc | 1 + storage/innobase/row/row0mysql.cc | 1 + storage/innobase/row/row0quiesce.cc | 1 + storage/innobase/srv/srv0mon.cc | 2 +- 11 files changed, 14 insertions(+), 11 deletions(-) diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index dde9e1ce1d277..6dc04de2e33e1 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -51,6 +51,7 @@ Created 10/16/1994 Heikki Tuuri #include "rem0rec.h" #include "rem0cmp.h" #include "buf0lru.h" +#include "buf0rea.h" #include "btr0btr.h" #include "btr0sea.h" #include "row0log.h" diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 14b5f0e682bcc..df25800ad06bf 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -34,18 +34,21 @@ Created 11/5/1995 Heikki Tuuri #include "assume_aligned.h" #include "mtr0types.h" #include "mach0data.h" -#include "buf0buf.h" #include "buf0checksum.h" #include "ut0crc32.h" #include "mariadb_stats.h" #include -#ifndef UNIV_INNOCHECKSUM +#ifdef UNIV_INNOCHECKSUM +# include "buf0buf.h" +#else #include "my_cpu.h" #include "mem0mem.h" #include "btr0btr.h" #include "fil0fil.h" #include "fil0crypt.h" +#include "buf0rea.h" +#include "buf0flu.h" #include "buf0buddy.h" #include "buf0dblwr.h" #include "lock0lock.h" diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 8ed061ececc27..8be28a34de4ea 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -25,7 +25,7 @@ Created 2011/12/19 *******************************************************/ #include "buf0dblwr.h" -#include "buf0buf.h" +#include "buf0flu.h" #include "buf0checksum.h" #include "srv0start.h" #include "srv0srv.h" diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index 03876666f1d51..ef3be719e565a 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -31,7 +31,7 @@ Created April 08, 2011 Vasil Dimov #include "mysql/psi/mysql_stage.h" #include "mysql/psi/psi.h" -#include "buf0buf.h" +#include "buf0rea.h" #include "buf0dump.h" #include "dict0dict.h" #include "os0file.h" diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index fe75b7d58fa71..c1f1157b0d3c4 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -24,13 +24,13 @@ Modified Jan Lindström jan.lindstrom@mariadb.com *******************************************************/ #include "fil0crypt.h" -#include "mtr0types.h" #include "mach0data.h" #include "page0zip.h" #include "buf0checksum.h" #ifdef UNIV_INNOCHECKSUM # include "buf0buf.h" #else +#include "buf0flu.h" #include "buf0dblwr.h" #include "srv0srv.h" #include "srv0start.h" diff --git a/storage/innobase/include/buf0buf.inl b/storage/innobase/include/buf0buf.inl index 3c4da98f83b87..d7ccc65dca86b 100644 --- a/storage/innobase/include/buf0buf.inl +++ b/storage/innobase/include/buf0buf.inl @@ -31,11 +31,7 @@ The database buffer buf_pool Created 11/5/1995 Heikki Tuuri *******************************************************/ -#include "mtr0mtr.h" -#include "buf0flu.h" #include "buf0lru.h" -#include "buf0rea.h" -#include "fsp0types.h" /** Determine if a block is still close enough to the MRU end of the LRU list meaning that it is not in danger of getting evicted and also implying @@ -141,4 +137,3 @@ buf_block_get_modify_clock( ut_ad(block->page.lock.have_any()); return(block->modify_clock); } - diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index c469b9da1c298..48b5ee5bfda3b 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -43,6 +43,7 @@ Created 1/8/1996 Heikki Tuuri #include "trx0types.h" #include "fts0fts.h" #include "buf0buf.h" +#include "mtr0mtr.h" #include "gis0type.h" #include "fil0fil.h" #include "fil0crypt.h" diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 4afe9e874bb49..a61c18b57eefa 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -29,6 +29,7 @@ Created 2012-02-08 by Sunny Bains. #ifdef BTR_CUR_HASH_ADAPT # include "btr0sea.h" #endif +#include "buf0flu.h" #include "que0que.h" #include "dict0boot.h" #include "dict0load.h" diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 8a2eee38b9f65..f86cd0e18e2b0 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -31,6 +31,7 @@ Created 9/17/2000 Heikki Tuuri #include #include "row0mysql.h" +#include "buf0flu.h" #include "btr0sea.h" #include "dict0boot.h" #include "dict0crea.h" diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc index eadb30bfcfa0e..e927096f2967b 100644 --- a/storage/innobase/row/row0quiesce.cc +++ b/storage/innobase/row/row0quiesce.cc @@ -26,6 +26,7 @@ Created 2012-02-08 by Sunny Bains. #include "row0quiesce.h" #include "row0mysql.h" +#include "buf0flu.h" #include "ibuf0ibuf.h" #include "srv0start.h" #include "trx0purge.h" diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index aa14bc009760f..cd0d4475914a5 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -25,7 +25,7 @@ Database monitor counter interfaces Created 12/9/2009 Jimmy Yang *******************************************************/ -#include "buf0buf.h" +#include "buf0flu.h" #include "dict0mem.h" #include "ibuf0ibuf.h" #include "lock0lock.h" From a60462d93e9ae753de620853a9ca86b6d20b4d43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 21 Aug 2023 15:51:16 +0300 Subject: [PATCH 009/165] Remove bogus references to replaced Google contributions In commit 03ca6495df31313c96e38834b9a235245e2ae2a8 and commit ff5d306e296350e7489dd3decb01bad18d135411 we forgot to remove some Google copyright notices related to a contribution of using atomic memory access in the old InnoDB mutex_t and rw_lock_t implementation. The copyright notices had been mostly added in commit c6232c06faff4bf7381958925e1f9c5521a6b8b9 due to commit a1bb700fd2c29b8cbc7f1eeac03f16e0e5887e25. The following Google contributions remain: * some logic related to the parameter innodb_io_capacity * innodb_encrypt_tables, added in MariaDB Server 10.1 --- storage/innobase/btr/btr0cur.cc | 7 ------- storage/innobase/btr/btr0sea.cc | 7 ------- storage/innobase/buf/buf0buf.cc | 7 ------- storage/innobase/handler/ha_innodb.cc | 7 ------- storage/innobase/include/buf0buf.inl | 7 ------- storage/innobase/include/log0log.h | 7 ------- storage/innobase/include/log0types.h | 6 ------ storage/innobase/include/univ.i | 7 ------- storage/innobase/log/log0log.cc | 7 ------- storage/innobase/row/row0sel.cc | 7 ------- storage/innobase/srv/srv0start.cc | 7 ------- 11 files changed, 76 deletions(-) diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 6dc04de2e33e1..6f71ecee5e47c 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -1,16 +1,9 @@ /***************************************************************************** Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. Copyright (c) 2015, 2023, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index a6a05bf3a936c..6905e631e5d8c 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -1,15 +1,8 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2008, Google Inc. Copyright (c) 2017, 2022, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index df25800ad06bf..893c505ab97b1 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1,15 +1,8 @@ /***************************************************************************** Copyright (c) 1995, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2008, Google Inc. Copyright (c) 2013, 2022, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 03bcdf3363e4f..bbade7ff52951 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1,17 +1,10 @@ /***************************************************************************** Copyright (c) 2000, 2020, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, 2023, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are gratefully acknowledged and are described briefly in the InnoDB diff --git a/storage/innobase/include/buf0buf.inl b/storage/innobase/include/buf0buf.inl index d7ccc65dca86b..f1af9963ab0aa 100644 --- a/storage/innobase/include/buf0buf.inl +++ b/storage/innobase/include/buf0buf.inl @@ -1,15 +1,8 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2008, Google Inc. Copyright (c) 2014, 2021, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index 0996b66ef52af..ba1cdb0f0cb65 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -1,15 +1,8 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2009, Google Inc. Copyright (c) 2017, 2022, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/include/log0types.h b/storage/innobase/include/log0types.h index 337fcd3179355..df87968d8f258 100644 --- a/storage/innobase/include/log0types.h +++ b/storage/innobase/include/log0types.h @@ -2,12 +2,6 @@ Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index c5f62f6cf57cb..66544842d26ad 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -2,13 +2,6 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2022, MariaDB Corporation. -Copyright (c) 2008, Google Inc. - -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index f65e812f40f9e..2e63958d4e406 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -1,15 +1,8 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2009, Google Inc. Copyright (c) 2014, 2022, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index d1d264a7e8ae3..5c9e5e6354d18 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -1,15 +1,8 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2008, Google Inc. Copyright (c) 2015, 2023, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index c111120ea0e06..78802c8ab254b 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1,16 +1,9 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2022, MariaDB Corporation. -Portions of this file contain modifications contributed and copyrighted by -Google, Inc. Those modifications are gratefully acknowledged and are described -briefly in the InnoDB documentation. The contributions by Google are -incorporated with their permission, and subject to the conditions contained in -the file COPYING.Google. - Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are gratefully acknowledged and are described briefly in the InnoDB From ff682eada8a9648a2e40fc56c45d45f84cfb39f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 22 Aug 2023 09:00:51 +0300 Subject: [PATCH 010/165] MDEV-20194 test adjustment for s390x The test innodb.row_size_error_log_warnings_3 that was added in commit 372b0e6355fbb6b7dc490b64da13e784c09aeec8 (MDEV-20194) failed to take into account the earlier adjustment in commit cf574cf53b168992b911d5fc32c590a6ee03a56a (MDEV-27634) that is specific to many GNU/Linux distributions for the s390x. --- .../suite/innodb/r/row_size_error_log_warnings_3.result | 7 +++---- .../suite/innodb/t/row_size_error_log_warnings_3.test | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/innodb/r/row_size_error_log_warnings_3.result b/mysql-test/suite/innodb/r/row_size_error_log_warnings_3.result index d00ac93831086..21085e4cd7e71 100644 --- a/mysql-test/suite/innodb/r/row_size_error_log_warnings_3.result +++ b/mysql-test/suite/innodb/r/row_size_error_log_warnings_3.result @@ -51,14 +51,13 @@ set global innodb_compression_level=1; CREATE TABLE t1( f1 INT, f2 CHAR(200), f3 CHAR(200), f4 CHAR(200), f5 CHAR(200), f6 CHAR(200), -f7 CHAR(200), f8 CHAR(200), f9 CHAR(200), -f10 CHAR(200), PRIMARY KEY(f1, f2(20), f3(20), f4(20)) +f7 CHAR(200), f8 CHAR(200), f9 CHAR(106), +PRIMARY KEY(f1, f2(20), f3(20), f4(20)) ) ROW_FORMAT=COMPRESSED, ENGINE=InnoDB; INSERT INTO t1 SELECT seq, repeat('a', 200), repeat('b', 200), repeat('c', 200), repeat('d', 200), repeat('d', 200), repeat('e', 200), -repeat('e', 200), repeat('f', 200), -repeat('g', 200) FROM seq_1_to_20; +repeat('f', 200), repeat('g', 106) FROM seq_1_to_20; DROP TABLE t1; set global innodb_compression_level=default; CREATE TABLE t1(f1 char(200), f2 char(200), f3 char(200), diff --git a/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test b/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test index 209c86a29b5da..39694c05e0f38 100644 --- a/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test +++ b/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test @@ -49,15 +49,14 @@ set global innodb_compression_level=1; CREATE TABLE t1( f1 INT, f2 CHAR(200), f3 CHAR(200), f4 CHAR(200), f5 CHAR(200), f6 CHAR(200), - f7 CHAR(200), f8 CHAR(200), f9 CHAR(200), - f10 CHAR(200), PRIMARY KEY(f1, f2(20), f3(20), f4(20)) + f7 CHAR(200), f8 CHAR(200), f9 CHAR(106), + PRIMARY KEY(f1, f2(20), f3(20), f4(20)) ) ROW_FORMAT=COMPRESSED, ENGINE=InnoDB; INSERT INTO t1 SELECT seq, repeat('a', 200), repeat('b', 200), repeat('c', 200), repeat('d', 200), repeat('d', 200), repeat('e', 200), - repeat('e', 200), repeat('f', 200), - repeat('g', 200) FROM seq_1_to_20; + repeat('f', 200), repeat('g', 106) FROM seq_1_to_20; DROP TABLE t1; set global innodb_compression_level=default; From e9f3ca612528c5f917e27ef6113fd1deda2aef26 Mon Sep 17 00:00:00 2001 From: Yuchen Pei Date: Wed, 5 Jul 2023 17:55:30 +1000 Subject: [PATCH 011/165] MDEV-31117 Fix spider connection info parsing Spider connection string is a comma-separated parameter definitions, where each definition is of the form " ", where is quote delimited on both ends, with backslashes acting as an escaping prefix. Despite the simple syntax, the existing spider connection string parser was poorly-written, complex, hard to reason and error-prone, causing issues like the one described in MDEV-31117. For example it treated param title the same way as param value when assigning, and have nonsensical fields like delim_title_len and delim_title. Thus as part of the bugfix, we clean up the spider comment connection string parsing, including: - Factoring out some code from the parsing function - Simplify the struct `st_spider_param_string_parse` - And any necessary changes caused by the above changes --- .../spider/bugfix/r/mdev_31117.result | 19 + .../spider/bugfix/t/mdev_31117.test | 30 + storage/spider/spd_copy_tables.cc | 213 ++--- storage/spider/spd_direct_sql.cc | 239 ++--- storage/spider/spd_table.cc | 831 ++++++++---------- storage/spider/spd_table.h | 305 +------ 6 files changed, 592 insertions(+), 1045 deletions(-) create mode 100644 storage/spider/mysql-test/spider/bugfix/r/mdev_31117.result create mode 100644 storage/spider/mysql-test/spider/bugfix/t/mdev_31117.test diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_31117.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_31117.result new file mode 100644 index 0000000000000..2b09addac0efb --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_31117.result @@ -0,0 +1,19 @@ +# +# MDEV-31117 Spider UBSAN runtime error: applying non-zero offset x to null pointer in st_spider_param_string_parse::restore_delims +# +for master_1 +for child2 +for child3 +CREATE TABLE t (c INT) ENGINE=Spider COMMENT='abc'; +ERROR HY000: The connect info 'abc' is invalid +ALTER TABLE mysql.help_topic ENGINE=Spider; +ERROR HY000: The connect info 'help topics' is invalid +CREATE SERVER srv FOREIGN DATA WRAPPER MYSQL OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root'); +create table t2 (c int); +create table t1 (c int) ENGINE=Spider COMMENT='WRAPPER "mysql", "srv" "srv",TABLE "t2"'; +ERROR HY000: The connect info '"srv" "srv",TABLE "t2"' is invalid +create table t1 (c int) ENGINE=Spider CONNECTION='WRAPPER "mysql", srv \'srv\',TABLE "t2", password "say \\"hello\\ world!\\""'; +drop table t1, t2; +for master_1 +for child2 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_31117.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_31117.test new file mode 100644 index 0000000000000..e03bc87425483 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_31117.test @@ -0,0 +1,30 @@ +--echo # +--echo # MDEV-31117 Spider UBSAN runtime error: applying non-zero offset x to null pointer in st_spider_param_string_parse::restore_delims +--echo # + +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log + +--error 12501 +CREATE TABLE t (c INT) ENGINE=Spider COMMENT='abc'; + +--error 12501 +ALTER TABLE mysql.help_topic ENGINE=Spider; + +evalp CREATE SERVER srv FOREIGN DATA WRAPPER MYSQL OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root'); +create table t2 (c int); +# param title should not have delimiter +--error 12501 +create table t1 (c int) ENGINE=Spider COMMENT='WRAPPER "mysql", "srv" "srv",TABLE "t2"'; +# test escaping +create table t1 (c int) ENGINE=Spider CONNECTION='WRAPPER "mysql", srv \'srv\',TABLE "t2", password "say \\"hello\\ world!\\""'; +drop table t1, t2; + +--disable_query_log +--disable_result_log +--source ../t/test_deinit.inc +--enable_query_log +--enable_result_log diff --git a/storage/spider/spd_copy_tables.cc b/storage/spider/spd_copy_tables.cc index e00e06087764c..f898bf5d1cce1 100644 --- a/storage/spider/spd_copy_tables.cc +++ b/storage/spider/spd_copy_tables.cc @@ -75,146 +75,69 @@ int spider_udf_set_copy_tables_param_default( #define SPIDER_PARAM_STR_LEN(name) name ## _length #define SPIDER_PARAM_STR(title_name, param_name) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (!copy_tables->param_name) \ { \ - if ((copy_tables->param_name = spider_get_string_between_quote( \ - start_ptr, TRUE, ¶m_string_parse))) \ + if ((copy_tables->param_name = spider_create_string(parse.start_value, \ + value_length))) \ copy_tables->SPIDER_PARAM_STR_LEN(param_name) = \ strlen(copy_tables->param_name); \ else { \ - error_num = param_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%s", copy_tables->param_name)); \ } \ break; \ } -#define SPIDER_PARAM_HINT_WITH_MAX(title_name, param_name, check_length, max_size, min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, check_length)) \ - { \ - DBUG_PRINT("info",("spider " title_name " start")); \ - DBUG_PRINT("info",("spider max_size=%d", max_size)); \ - int hint_num = atoi(tmp_ptr + check_length) - 1; \ - DBUG_PRINT("info",("spider hint_num=%d", hint_num)); \ - DBUG_PRINT("info",("spider copy_tables->param_name=%x", \ - copy_tables->param_name)); \ - if (copy_tables->param_name) \ - { \ - if (hint_num < 0 || hint_num >= max_size) \ - { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } else if (copy_tables->param_name[hint_num] != -1) \ - break; \ - char *hint_str = spider_get_string_between_quote(start_ptr, FALSE); \ - if (hint_str) \ - { \ - copy_tables->param_name[hint_num] = atoi(hint_str); \ - if (copy_tables->param_name[hint_num] < min_val) \ - copy_tables->param_name[hint_num] = min_val; \ - else if (copy_tables->param_name[hint_num] > max_val) \ - copy_tables->param_name[hint_num] = max_val; \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ - DBUG_PRINT("info",("spider " title_name "[%d]=%d", hint_num, \ - copy_tables->param_name[hint_num])); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ - break; \ - } #define SPIDER_PARAM_INT_WITH_MAX(title_name, param_name, min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (copy_tables->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - copy_tables->param_name = atoi(tmp_ptr2); \ - if (copy_tables->param_name < min_val) \ - copy_tables->param_name = min_val; \ - else if (copy_tables->param_name > max_val) \ - copy_tables->param_name = max_val; \ - param_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ + copy_tables->param_name = atoi(parse.start_value); \ + if (copy_tables->param_name < min_val) \ + copy_tables->param_name = min_val; \ + else if (copy_tables->param_name > max_val) \ + copy_tables->param_name = max_val; \ DBUG_PRINT("info",("spider " title_name "=%d", copy_tables->param_name)); \ } \ break; \ } #define SPIDER_PARAM_INT(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (copy_tables->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - copy_tables->param_name = atoi(tmp_ptr2); \ - if (copy_tables->param_name < min_val) \ - copy_tables->param_name = min_val; \ - param_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ + copy_tables->param_name = atoi(parse.start_value); \ + if (copy_tables->param_name < min_val) \ + copy_tables->param_name = min_val; \ DBUG_PRINT("info",("spider " title_name "=%d", copy_tables->param_name)); \ } \ break; \ } #define SPIDER_PARAM_LONGLONG(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (copy_tables->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - copy_tables->param_name = \ - my_strtoll10(tmp_ptr2, (char**) NULL, &error_num); \ - if (copy_tables->param_name < min_val) \ - copy_tables->param_name = min_val; \ - param_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ + copy_tables->param_name = \ + my_strtoll10(parse.start_value, (char**) NULL, &error_num); \ + if (copy_tables->param_name < min_val) \ + copy_tables->param_name = min_val; \ DBUG_PRINT("info",("spider " title_name "=%lld", \ - copy_tables->param_name)); \ + copy_tables->param_name)); \ } \ break; \ } -int spider_udf_parse_copy_tables_param( - SPIDER_COPY_TABLES *copy_tables, - char *param, - int param_length -) { - int error_num = 0; - char *param_string = NULL; - char *sprit_ptr; - char *tmp_ptr, *tmp_ptr2, *start_ptr; - int title_length; - SPIDER_PARAM_STRING_PARSE param_string_parse; - DBUG_ENTER("spider_udf_parse_copy_tables_param"); +static void spider_minus_1(SPIDER_COPY_TABLES *copy_tables) +{ copy_tables->bulk_insert_interval = -1; copy_tables->bulk_insert_rows = -1; copy_tables->use_table_charset = -1; @@ -222,57 +145,49 @@ int spider_udf_parse_copy_tables_param( #ifndef WITHOUT_SPIDER_BG_SEARCH copy_tables->bg_mode = -1; #endif +} +int spider_udf_parse_copy_tables_param( + SPIDER_COPY_TABLES *copy_tables, + char *param, + int param_length +) { + int error_num = 0; + char *param_string = NULL; + char *start_param; + int title_length, value_length; + SPIDER_PARAM_STRING_PARSE parse; + DBUG_ENTER("spider_udf_parse_copy_tables_param"); + spider_minus_1(copy_tables); if (param_length == 0) goto set_default; DBUG_PRINT("info",("spider create param_string string")); - if ( - !(param_string = spider_create_string( - param, - param_length)) - ) { + if (!(param_string = spider_create_string(param, param_length))) + { error_num = HA_ERR_OUT_OF_MEM; my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error_alloc_param_string; } DBUG_PRINT("info",("spider param_string=%s", param_string)); - sprit_ptr = param_string; - param_string_parse.init(param_string, ER_SPIDER_INVALID_UDF_PARAM_NUM); - while (sprit_ptr) + start_param = param_string; + parse.error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; + while (*start_param != '\0') { - tmp_ptr = sprit_ptr; - while (*tmp_ptr == ' ' || *tmp_ptr == '\r' || - *tmp_ptr == '\n' || *tmp_ptr == '\t') - tmp_ptr++; - - if (*tmp_ptr == '\0') - break; - - title_length = 0; - start_ptr = tmp_ptr; - while (*start_ptr != ' ' && *start_ptr != '\'' && - *start_ptr != '"' && *start_ptr != '\0' && - *start_ptr != '\r' && *start_ptr != '\n' && - *start_ptr != '\t') - { - title_length++; - start_ptr++; - } - param_string_parse.set_param_title(tmp_ptr, tmp_ptr + title_length); - if ((error_num = param_string_parse.get_next_parameter_head( - start_ptr, &sprit_ptr))) + if (parse.locate_param_def(start_param)) { + error_num= parse.fail(false); goto error; } + /* Null the end of the parameter value. */ + *parse.end_value= '\0'; + value_length= (int) (parse.end_value - parse.start_value); - switch (title_length) + switch (title_length = (int) (parse.end_title - parse.start_title)) { case 0: - error_num = param_string_parse.print_param_error(); - if (error_num) - goto error; - continue; + error_num= parse.fail(true); + goto error; case 3: #ifndef WITHOUT_SPIDER_BG_SEARCH SPIDER_PARAM_INT_WITH_MAX("bgm", bg_mode, 0, 1); @@ -282,55 +197,45 @@ int spider_udf_parse_copy_tables_param( SPIDER_PARAM_STR("dtb", database); SPIDER_PARAM_INT_WITH_MAX("utc", use_table_charset, 0, 1); SPIDER_PARAM_INT_WITH_MAX("utr", use_transaction, 0, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; #ifndef WITHOUT_SPIDER_BG_SEARCH case 7: SPIDER_PARAM_INT_WITH_MAX("bg_mode", bg_mode, 0, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; #endif case 8: SPIDER_PARAM_STR("database", database); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 15: SPIDER_PARAM_INT_WITH_MAX("use_transaction", use_transaction, 0, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 16: SPIDER_PARAM_LONGLONG("bulk_insert_rows", bulk_insert_rows, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 17: SPIDER_PARAM_INT_WITH_MAX( "use_table_charset", use_table_charset, 0, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 20: SPIDER_PARAM_INT("bulk_insert_interval", bulk_insert_interval, 0); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; default: - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; } - - /* Verify that the remainder of the parameter value is whitespace */ - if ((error_num = param_string_parse.has_extra_parameter_values())) - goto error; + /* Restore delim */ + *parse.end_value= parse.delim_value; } set_default: - if ((error_num = spider_udf_set_copy_tables_param_default( - copy_tables - ))) - goto error; - - if (param_string) - spider_free(spider_current_trx, param_string, MYF(0)); - DBUG_RETURN(0); - + error_num = spider_udf_set_copy_tables_param_default(copy_tables); error: if (param_string) spider_free(spider_current_trx, param_string, MYF(0)); diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc index 6a04488c50f62..dfbb0e04a81a1 100644 --- a/storage/spider/spd_direct_sql.cc +++ b/storage/spider/spd_direct_sql.cc @@ -978,17 +978,16 @@ int spider_udf_direct_sql_get_server( #define SPIDER_PARAM_STR_LEN(name) name ## _length #define SPIDER_PARAM_STR(title_name, param_name) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (!direct_sql->param_name) \ { \ - if ((direct_sql->param_name = spider_get_string_between_quote( \ - start_ptr, TRUE, ¶m_string_parse))) \ - direct_sql->SPIDER_PARAM_STR_LEN(param_name) = \ - strlen(direct_sql->param_name); \ + if ((direct_sql->param_name = spider_create_string(parse.start_value, \ + value_length))) \ + direct_sql->SPIDER_PARAM_STR_LEN(param_name) = strlen(direct_sql->param_name); \ else { \ - error_num = param_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%s", direct_sql->param_name)); \ @@ -996,130 +995,81 @@ int spider_udf_direct_sql_get_server( break; \ } #define SPIDER_PARAM_HINT_WITH_MAX(title_name, param_name, check_length, max_size, min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, check_length)) \ + if (!strncasecmp(parse.start_title, title_name, check_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ DBUG_PRINT("info",("spider max_size=%d", max_size)); \ - int hint_num = atoi(tmp_ptr + check_length) - 1; \ + int hint_num = atoi(parse.start_title + check_length) - 1; \ DBUG_PRINT("info",("spider hint_num=%d", hint_num)); \ DBUG_PRINT("info",("spider direct_sql->param_name=%p", \ - direct_sql->param_name)); \ + direct_sql->param_name)); \ if (direct_sql->param_name) \ { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = param_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } else if (direct_sql->param_name[hint_num] != -1) \ break; \ - char *hint_str = spider_get_string_between_quote(start_ptr, FALSE); \ - if (hint_str) \ - { \ - direct_sql->param_name[hint_num] = atoi(hint_str); \ - if (direct_sql->param_name[hint_num] < min_val) \ - direct_sql->param_name[hint_num] = min_val; \ - else if (direct_sql->param_name[hint_num] > max_val) \ - direct_sql->param_name[hint_num] = max_val; \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ + direct_sql->param_name[hint_num] = atoi(parse.start_value); \ + if (direct_sql->param_name[hint_num] < min_val) \ + direct_sql->param_name[hint_num] = min_val; \ + else if (direct_sql->param_name[hint_num] > max_val) \ + direct_sql->param_name[hint_num] = max_val; \ DBUG_PRINT("info",("spider " title_name "[%d]=%d", hint_num, \ - direct_sql->param_name[hint_num])); \ + direct_sql->param_name[hint_num])); \ } else { \ - error_num = param_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } \ break; \ } #define SPIDER_PARAM_INT_WITH_MAX(title_name, param_name, min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (direct_sql->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - direct_sql->param_name = atoi(tmp_ptr2); \ - if (direct_sql->param_name < min_val) \ - direct_sql->param_name = min_val; \ - else if (direct_sql->param_name > max_val) \ - direct_sql->param_name = max_val; \ - param_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ + direct_sql->param_name = atoi(parse.start_value); \ + if (direct_sql->param_name < min_val) \ + direct_sql->param_name = min_val; \ + else if (direct_sql->param_name > max_val) \ + direct_sql->param_name = max_val; \ DBUG_PRINT("info",("spider " title_name "=%d", \ - (int) direct_sql->param_name)); \ + (int) direct_sql->param_name)); \ } \ break; \ } #define SPIDER_PARAM_INT(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (direct_sql->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - direct_sql->param_name = atoi(tmp_ptr2); \ - if (direct_sql->param_name < min_val) \ - direct_sql->param_name = min_val; \ - param_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ + direct_sql->param_name = atoi(parse.start_value); \ + if (direct_sql->param_name < min_val) \ + direct_sql->param_name = min_val; \ DBUG_PRINT("info",("spider " title_name "=%d", direct_sql->param_name)); \ } \ break; \ } #define SPIDER_PARAM_LONGLONG(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (direct_sql->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - direct_sql->param_name = \ - my_strtoll10(tmp_ptr2, (char**) NULL, &error_num); \ - if (direct_sql->param_name < min_val) \ - direct_sql->param_name = min_val; \ - param_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = param_string_parse.print_param_error(); \ - goto error; \ - } \ - DBUG_PRINT("info",("spider " title_name "=%lld", \ - direct_sql->param_name)); \ + direct_sql->param_name = my_strtoll10(parse.start_value, (char**) NULL, \ + &error_num); \ + if (direct_sql->param_name < min_val) \ + direct_sql->param_name = min_val; \ + DBUG_PRINT("info",("spider " title_name "=%lld", direct_sql->param_name)); \ } \ break; \ } -int spider_udf_parse_direct_sql_param( - SPIDER_TRX *trx, - SPIDER_DIRECT_SQL *direct_sql, - const char *param, - int param_length -) { - int error_num = 0, roop_count; - char *param_string = NULL; - char *sprit_ptr; - char *tmp_ptr, *tmp_ptr2, *start_ptr; - int title_length; - SPIDER_PARAM_STRING_PARSE param_string_parse; - DBUG_ENTER("spider_udf_parse_direct_sql_param"); +static void spider_minus_1(SPIDER_DIRECT_SQL *direct_sql) +{ direct_sql->tgt_port = -1; direct_sql->tgt_ssl_vsc = -1; direct_sql->table_loop_mode = -1; @@ -1137,59 +1087,53 @@ int spider_udf_parse_direct_sql_param( direct_sql->use_real_table = -1; #endif direct_sql->error_rw_mode = -1; - for (roop_count = 0; roop_count < direct_sql->table_count; roop_count++) - direct_sql->iop[roop_count] = -1; + for (int i = 0; i < direct_sql->table_count; i++) + direct_sql->iop[i] = -1; +} +int spider_udf_parse_direct_sql_param( + SPIDER_TRX *trx, + SPIDER_DIRECT_SQL *direct_sql, + const char *param, + int param_length +) { + int error_num = 0; + char *param_string = NULL; + char *start_param; + int title_length, value_length; + SPIDER_PARAM_STRING_PARSE parse; + DBUG_ENTER("spider_udf_parse_direct_sql_param"); + + spider_minus_1(direct_sql); if (param_length == 0) goto set_default; DBUG_PRINT("info",("spider create param_string string")); - if ( - !(param_string = spider_create_string( - param, - param_length)) - ) { + if (!(param_string = spider_create_string(param, param_length))) + { error_num = HA_ERR_OUT_OF_MEM; my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error_alloc_param_string; } DBUG_PRINT("info",("spider param_string=%s", param_string)); - sprit_ptr = param_string; - param_string_parse.init(param_string, ER_SPIDER_INVALID_UDF_PARAM_NUM); - while (sprit_ptr) + start_param = param_string; + parse.error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; + while (*start_param != '\0') { - tmp_ptr = sprit_ptr; - while (*tmp_ptr == ' ' || *tmp_ptr == '\r' || - *tmp_ptr == '\n' || *tmp_ptr == '\t') - tmp_ptr++; - - if (*tmp_ptr == '\0') - break; - - title_length = 0; - start_ptr = tmp_ptr; - while (*start_ptr != ' ' && *start_ptr != '\'' && - *start_ptr != '"' && *start_ptr != '\0' && - *start_ptr != '\r' && *start_ptr != '\n' && - *start_ptr != '\t') - { - title_length++; - start_ptr++; - } - param_string_parse.set_param_title(tmp_ptr, tmp_ptr + title_length); - if ((error_num = param_string_parse.get_next_parameter_head( - start_ptr, &sprit_ptr))) + if (parse.locate_param_def(start_param)) { + error_num= parse.fail(false); goto error; } + /* Null the end of the parameter value. */ + *parse.end_value= '\0'; + value_length= (int) (parse.end_value - parse.start_value); - switch (title_length) + switch (title_length = (int) (parse.end_title - parse.start_title)) { case 0: - error_num = param_string_parse.print_param_error(); - if (error_num) - goto error; - continue; + error_num= parse.fail(true); + goto error; case 3: #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) SPIDER_PARAM_INT_WITH_MAX("acm", access_mode, 0, 2); @@ -1214,112 +1158,97 @@ int spider_udf_parse_direct_sql_param( SPIDER_PARAM_INT_WITH_MAX("urt", use_real_table, 0, 1); #endif SPIDER_PARAM_INT("wto", net_write_timeout, 0); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 4: SPIDER_PARAM_INT_WITH_MAX("erwm", error_rw_mode, 0, 1); SPIDER_PARAM_STR("host", tgt_host); SPIDER_PARAM_INT_WITH_MAX("port", tgt_port, 0, 65535); SPIDER_PARAM_STR("user", tgt_username); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 6: SPIDER_PARAM_STR("server", server_name); SPIDER_PARAM_STR("socket", tgt_socket); SPIDER_PARAM_HINT_WITH_MAX("iop", iop, 3, direct_sql->table_count, 0, 2); SPIDER_PARAM_STR("ssl_ca", tgt_ssl_ca); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 7: SPIDER_PARAM_STR("wrapper", tgt_wrapper); SPIDER_PARAM_STR("ssl_key", tgt_ssl_key); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 8: SPIDER_PARAM_STR("database", tgt_default_db_name); SPIDER_PARAM_STR("password", tgt_password); SPIDER_PARAM_LONGLONG("priority", priority, 0); SPIDER_PARAM_STR("ssl_cert", tgt_ssl_cert); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 10: SPIDER_PARAM_STR("ssl_cipher", tgt_ssl_cipher); SPIDER_PARAM_STR("ssl_capath", tgt_ssl_capath); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 11: #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) SPIDER_PARAM_INT_WITH_MAX("access_mode", access_mode, 0, 2); #endif - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 12: SPIDER_PARAM_STR("default_file", tgt_default_file); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 13: SPIDER_PARAM_STR("default_group", tgt_default_group); SPIDER_PARAM_INT_WITH_MAX("error_rw_mode", error_rw_mode, 0, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 14: #if MYSQL_VERSION_ID < 50500 #else SPIDER_PARAM_INT_WITH_MAX("use_real_table", use_real_table, 0, 1); #endif - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 15: SPIDER_PARAM_INT_WITH_MAX("table_loop_mode", table_loop_mode, 0, 2); SPIDER_PARAM_INT("connect_timeout", connect_timeout, 0); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 16: SPIDER_PARAM_LONGLONG("bulk_insert_rows", bulk_insert_rows, 1); SPIDER_PARAM_INT("net_read_timeout", net_read_timeout, 0); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 17: SPIDER_PARAM_INT("net_write_timeout", net_write_timeout, 0); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 18: SPIDER_PARAM_INT_WITH_MAX( "connection_channel", connection_channel, 0, 63); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; case 22: SPIDER_PARAM_INT_WITH_MAX("ssl_verify_server_cert", tgt_ssl_vsc, 0, 1); - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; default: - error_num = param_string_parse.print_param_error(); + error_num= parse.fail(true); goto error; } - - /* Verify that the remainder of the parameter value is whitespace */ - if ((error_num = param_string_parse.has_extra_parameter_values())) - goto error; + /* Restore delim */ + *parse.end_value= parse.delim_value; } set_default: - if ((error_num = spider_udf_set_direct_sql_param_default( - trx, - direct_sql - ))) - goto error; - - if (param_string) - { - spider_free(spider_current_trx, param_string, MYF(0)); - } - DBUG_RETURN(0); - + error_num = spider_udf_set_direct_sql_param_default(trx, direct_sql); error: if (param_string) - { spider_free(spider_current_trx, param_string, MYF(0)); - } error_alloc_param_string: DBUG_RETURN(error_num); } diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index a07df994e4607..30dc049213d20 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -1046,123 +1046,12 @@ void spider_free_tmp_share_alloc( DBUG_VOID_RETURN; } -char *spider_get_string_between_quote( - char *ptr, - bool alloc, - SPIDER_PARAM_STRING_PARSE *param_string_parse -) { - char *start_ptr, *end_ptr, *tmp_ptr, *esc_ptr; - bool find_flg = FALSE, esc_flg = FALSE; - DBUG_ENTER("spider_get_string_between_quote"); - - start_ptr = strchr(ptr, '\''); - end_ptr = strchr(ptr, '"'); - if (start_ptr && (!end_ptr || start_ptr < end_ptr)) - { - tmp_ptr = ++start_ptr; - while (!find_flg) - { - if (!(end_ptr = strchr(tmp_ptr, '\''))) - DBUG_RETURN(NULL); - esc_ptr = tmp_ptr; - while (!find_flg) - { - esc_ptr = strchr(esc_ptr, '\\'); - if (!esc_ptr || esc_ptr > end_ptr) - find_flg = TRUE; - else if (esc_ptr == end_ptr - 1) - { - esc_flg = TRUE; - tmp_ptr = end_ptr + 1; - break; - } else { - esc_flg = TRUE; - esc_ptr += 2; - } - } - } - } else if (end_ptr) - { - start_ptr = end_ptr; - tmp_ptr = ++start_ptr; - while (!find_flg) - { - if (!(end_ptr = strchr(tmp_ptr, '"'))) - DBUG_RETURN(NULL); - esc_ptr = tmp_ptr; - while (!find_flg) - { - esc_ptr = strchr(esc_ptr, '\\'); - if (!esc_ptr || esc_ptr > end_ptr) - find_flg = TRUE; - else if (esc_ptr == end_ptr - 1) - { - esc_flg = TRUE; - tmp_ptr = end_ptr + 1; - break; - } else { - esc_flg = TRUE; - esc_ptr += 2; - } - } - } - } else - DBUG_RETURN(NULL); - - *end_ptr = '\0'; - if (esc_flg) - { - esc_ptr = start_ptr; - while (TRUE) - { - esc_ptr = strchr(esc_ptr, '\\'); - if (!esc_ptr) - break; - switch(*(esc_ptr + 1)) - { - case 'b': - *esc_ptr = '\b'; - break; - case 'n': - *esc_ptr = '\n'; - break; - case 'r': - *esc_ptr = '\r'; - break; - case 't': - *esc_ptr = '\t'; - break; - default: - *esc_ptr = *(esc_ptr + 1); - break; - } - esc_ptr++; - strcpy(esc_ptr, esc_ptr + 1); - } - } - - if (param_string_parse) - param_string_parse->set_param_value(start_ptr, start_ptr + strlen(start_ptr) + 1); - - if (alloc) - { - DBUG_RETURN( - spider_create_string( - start_ptr, - strlen(start_ptr)) - ); - } else { - DBUG_RETURN(start_ptr); - } -} - int spider_create_string_list( char ***string_list, uint **string_length_list, uint *list_length, char *str, - uint length, - SPIDER_PARAM_STRING_PARSE *param_string_parse + uint length ) { int roop_count; char *tmp_ptr, *tmp_ptr2, *tmp_ptr3, *esc_ptr; @@ -1170,7 +1059,6 @@ int spider_create_string_list( DBUG_ENTER("spider_create_string_list"); *list_length = 0; - param_string_parse->init_param_value(); if (!str) { *string_list = NULL; @@ -1282,9 +1170,6 @@ int spider_create_string_list( DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - param_string_parse->set_param_value(tmp_ptr3, - tmp_ptr3 + strlen(tmp_ptr3) + 1); - DBUG_PRINT("info",("spider string_list[%d]=%s", roop_count, (*string_list)[roop_count])); @@ -1297,15 +1182,13 @@ int spider_create_long_list( char *str, uint length, long min_val, - long max_val, - SPIDER_PARAM_STRING_PARSE *param_string_parse + long max_val ) { int roop_count; char *tmp_ptr; DBUG_ENTER("spider_create_long_list"); *list_length = 0; - param_string_parse->init_param_value(); if (!str) { *long_list = NULL; @@ -1361,9 +1244,6 @@ int spider_create_long_list( (*long_list)[roop_count] = max_val; } - param_string_parse->set_param_value(tmp_ptr, - tmp_ptr + strlen(tmp_ptr) + 1); - #ifndef DBUG_OFF for (roop_count = 0; roop_count < (int) *list_length; roop_count++) { @@ -1381,15 +1261,13 @@ int spider_create_longlong_list( char *str, uint length, longlong min_val, - longlong max_val, - SPIDER_PARAM_STRING_PARSE *param_string_parse + longlong max_val ) { int error_num, roop_count; char *tmp_ptr; DBUG_ENTER("spider_create_longlong_list"); *list_length = 0; - param_string_parse->init_param_value(); if (!str) { *longlong_list = NULL; @@ -1446,9 +1324,6 @@ int spider_create_longlong_list( (*longlong_list)[roop_count] = max_val; } - param_string_parse->set_param_value(tmp_ptr, - tmp_ptr + strlen(tmp_ptr) + 1); - #ifndef DBUG_OFF for (roop_count = 0; roop_count < (int) *list_length; roop_count++) { @@ -1688,50 +1563,18 @@ static int spider_set_ll_value( DBUG_RETURN(error_num); } -/** - Print a parameter string error message. - - @return Error code. -*/ - -int st_spider_param_string_parse::print_param_error() -{ - if (start_title_ptr) - { - /* Restore the input delimiter characters */ - restore_delims(); - - /* Print the error message */ - switch (error_num) - { - case ER_SPIDER_INVALID_UDF_PARAM_NUM: - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), start_title_ptr); - break; - case ER_SPIDER_INVALID_CONNECT_INFO_NUM: - default: - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), start_title_ptr); - } - - return error_num; - } - else - return 0; -} - #define SPIDER_PARAM_STR_LEN(name) name ## _length #define SPIDER_PARAM_STR(title_name, param_name) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (!share->param_name) \ { \ - if ((share->param_name = spider_get_string_between_quote( \ - start_ptr, TRUE, &connect_string_parse))) \ + if ((share->param_name = spider_create_string(parse.start_value, \ + value_length))) \ share->SPIDER_PARAM_STR_LEN(param_name) = strlen(share->param_name); \ else { \ - error_num = connect_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%s", share->param_name)); \ @@ -1741,272 +1584,171 @@ int st_spider_param_string_parse::print_param_error() #define SPIDER_PARAM_STR_LENS(name) name ## _lengths #define SPIDER_PARAM_STR_CHARLEN(name) name ## _charlen #define SPIDER_PARAM_STR_LIST(title_name, param_name) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ - DBUG_PRINT("info",("spider " title_name " start")); \ + DBUG_PRINT("info", ("spider " title_name " start")); \ if (!share->param_name) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - share->SPIDER_PARAM_STR_CHARLEN(param_name) = strlen(tmp_ptr2); \ - if ((error_num = spider_create_string_list( \ - &share->param_name, \ - &share->SPIDER_PARAM_STR_LENS(param_name), \ - &share->SPIDER_PARAM_STR_LEN(param_name), \ - tmp_ptr2, \ - share->SPIDER_PARAM_STR_CHARLEN(param_name), \ - &connect_string_parse))) \ - goto error; \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ + share->SPIDER_PARAM_STR_CHARLEN(param_name)= value_length; \ + if ((error_num= spider_create_string_list( \ + &share->param_name, \ + &share->SPIDER_PARAM_STR_LENS(param_name), \ + &share->SPIDER_PARAM_STR_LEN(param_name), \ + parse.start_value, \ + share->SPIDER_PARAM_STR_CHARLEN(param_name)))) \ goto error; \ - } \ } \ break; \ } #define SPIDER_PARAM_HINT(title_name, param_name, check_length, max_size, append_method) \ - if (!strncasecmp(tmp_ptr, title_name, check_length)) \ + if (!strncasecmp(parse.start_title, title_name, check_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ DBUG_PRINT("info",("spider max_size=%d", max_size)); \ - int hint_num = atoi(tmp_ptr + check_length); \ + int hint_num = atoi(parse.start_title + check_length); \ DBUG_PRINT("info",("spider hint_num=%d", hint_num)); \ - DBUG_PRINT("info",("spider share->param_name=%p", share->param_name)); \ + DBUG_PRINT("info",("spider share->param_name=%p", \ + share->param_name)); \ if (share->param_name) \ { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = connect_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } else if (share->param_name[hint_num].length() > 0) \ break; \ - char *hint_str = spider_get_string_between_quote(start_ptr, FALSE); \ - if ((error_num = \ - append_method(&share->param_name[hint_num], hint_str))) \ + if ((error_num= append_method(&share->param_name[hint_num], \ + parse.start_value))) \ goto error; \ DBUG_PRINT("info",("spider " title_name "[%d]=%s", hint_num, \ - share->param_name[hint_num].ptr())); \ + share->param_name[hint_num].ptr())); \ } else { \ - error_num = connect_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } \ break; \ } #define SPIDER_PARAM_NUMHINT(title_name, param_name, check_length, max_size, append_method) \ - if (!strncasecmp(tmp_ptr, title_name, check_length)) \ + if (!strncasecmp(parse.start_title, title_name, check_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ DBUG_PRINT("info",("spider max_size=%d", max_size)); \ - int hint_num = atoi(tmp_ptr + check_length); \ + int hint_num = atoi(parse.start_title + check_length); \ DBUG_PRINT("info",("spider hint_num=%d", hint_num)); \ DBUG_PRINT("info",("spider share->param_name=%p", share->param_name)); \ if (share->param_name) \ { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = connect_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } else if (share->param_name[hint_num] != -1) \ break; \ - char *hint_str = spider_get_string_between_quote(start_ptr, FALSE); \ if ((error_num = \ - append_method(&share->param_name[hint_num], hint_str))) \ + append_method(&share->param_name[hint_num], parse.start_value))) \ goto error; \ DBUG_PRINT("info",("spider " title_name "[%d]=%lld", hint_num, \ - share->param_name[hint_num])); \ + share->param_name[hint_num])); \ } else { \ - error_num = connect_string_parse.print_param_error(); \ + error_num= parse.fail(true); \ goto error; \ } \ break; \ } #define SPIDER_PARAM_LONG_LEN(name) name ## _length #define SPIDER_PARAM_LONG_LIST_WITH_MAX(title_name, param_name, \ - min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + min_val, max_val) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (!share->param_name) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - if ((error_num = spider_create_long_list( \ - &share->param_name, \ - &share->SPIDER_PARAM_LONG_LEN(param_name), \ - tmp_ptr2, \ - strlen(tmp_ptr2), \ - min_val, max_val, \ - &connect_string_parse))) \ - goto error; \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ + if ((error_num = spider_create_long_list( \ + &share->param_name, \ + &share->SPIDER_PARAM_LONG_LEN(param_name), \ + parse.start_value, \ + value_length, \ + min_val, max_val))) \ goto error; \ - } \ } \ break; \ } #define SPIDER_PARAM_LONGLONG_LEN(name) name ## _length #define SPIDER_PARAM_LONGLONG_LIST_WITH_MAX(title_name, param_name, \ - min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + min_val, max_val) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (!share->param_name) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - if ((error_num = spider_create_longlong_list( \ - &share->param_name, \ - &share->SPIDER_PARAM_LONGLONG_LEN(param_name), \ - tmp_ptr2, \ - strlen(tmp_ptr2), \ - min_val, max_val, \ - &connect_string_parse))) \ - goto error; \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ + if ((error_num = spider_create_longlong_list( \ + &share->param_name, \ + &share->SPIDER_PARAM_LONGLONG_LEN(param_name), \ + parse.start_value, \ + value_length, \ + min_val, max_val))) \ goto error; \ - } \ } \ break; \ } #define SPIDER_PARAM_INT_WITH_MAX(title_name, param_name, min_val, max_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (share->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - share->param_name = atoi(tmp_ptr2); \ - if (share->param_name < min_val) \ - share->param_name = min_val; \ - else if (share->param_name > max_val) \ - share->param_name = max_val; \ - connect_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ - goto error; \ - } \ + share->param_name = atoi(parse.start_value); \ + if (share->param_name < min_val) \ + share->param_name = min_val; \ + else if (share->param_name > max_val) \ + share->param_name = max_val; \ DBUG_PRINT("info",("spider " title_name "=%d", share->param_name)); \ } \ break; \ } #define SPIDER_PARAM_INT(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (share->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - share->param_name = atoi(tmp_ptr2); \ - if (share->param_name < min_val) \ - share->param_name = min_val; \ - connect_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ - goto error; \ - } \ + share->param_name = atoi(parse.start_value); \ + if (share->param_name < min_val) \ + share->param_name = min_val; \ DBUG_PRINT("info",("spider " title_name "=%d", share->param_name)); \ } \ break; \ } #define SPIDER_PARAM_DOUBLE(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (share->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - share->param_name = my_atof(tmp_ptr2); \ - if (share->param_name < min_val) \ - share->param_name = min_val; \ - connect_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ - goto error; \ - } \ + share->param_name = my_atof(parse.start_value); \ + if (share->param_name < min_val) \ + share->param_name = min_val; \ DBUG_PRINT("info",("spider " title_name "=%f", share->param_name)); \ } \ break; \ } #define SPIDER_PARAM_LONGLONG(title_name, param_name, min_val) \ - if (!strncasecmp(tmp_ptr, title_name, title_length)) \ + if (!strncasecmp(parse.start_title, title_name, title_length)) \ { \ DBUG_PRINT("info",("spider " title_name " start")); \ if (share->param_name == -1) \ { \ - if ((tmp_ptr2 = spider_get_string_between_quote( \ - start_ptr, FALSE))) \ - { \ - share->param_name = my_strtoll10(tmp_ptr2, (char**) NULL, &error_num); \ - if (share->param_name < min_val) \ - share->param_name = min_val; \ - connect_string_parse.set_param_value(tmp_ptr2, \ - tmp_ptr2 + \ - strlen(tmp_ptr2) + 1); \ - } else { \ - error_num = connect_string_parse.print_param_error(); \ - goto error; \ - } \ + share->param_name = my_strtoll10(parse.start_value, (char**) NULL, \ + &error_num); \ + if (share->param_name < min_val) \ + share->param_name = min_val; \ DBUG_PRINT("info",("spider " title_name "=%lld", share->param_name)); \ } \ break; \ } -int spider_parse_connect_info( - SPIDER_SHARE *share, - TABLE_SHARE *table_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE - partition_info *part_info, -#endif - uint create_table -) { - int error_num = 0; - char *connect_string = NULL; - char *sprit_ptr; - char *tmp_ptr, *tmp_ptr2, *start_ptr; - int roop_count; - int title_length; - SPIDER_PARAM_STRING_PARSE connect_string_parse; - SPIDER_ALTER_TABLE *share_alter; -#ifdef WITH_PARTITION_STORAGE_ENGINE - partition_element *part_elem; - partition_element *sub_elem; -#endif - DBUG_ENTER("spider_parse_connect_info"); -#ifdef WITH_PARTITION_STORAGE_ENGINE -#if MYSQL_VERSION_ID < 50500 - DBUG_PRINT("info",("spider partition_info=%s", table_share->partition_info)); -#else - DBUG_PRINT("info",("spider partition_info=%s", - table_share->partition_info_str)); -#endif - DBUG_PRINT("info",("spider part_info=%p", part_info)); -#endif - DBUG_PRINT("info",("spider s->db=%s", table_share->db.str)); - DBUG_PRINT("info",("spider s->table_name=%s", table_share->table_name.str)); - DBUG_PRINT("info",("spider s->path=%s", table_share->path.str)); - DBUG_PRINT("info", - ("spider s->normalized_path=%s", table_share->normalized_path.str)); -#ifdef WITH_PARTITION_STORAGE_ENGINE - spider_get_partition_info(share->table_name, share->table_name_length, - table_share, part_info, &part_elem, &sub_elem); -#endif +static void spider_minus_1(SPIDER_SHARE *share, TABLE_SHARE *table_share) +{ #ifndef WITHOUT_SPIDER_BG_SEARCH share->sts_bg_mode = -1; #endif @@ -2092,13 +1834,275 @@ int spider_parse_connect_info( share->delete_all_rows_type = -1; share->static_records_for_status = -1; share->static_mean_rec_length = -1; - for (roop_count = 0; roop_count < (int) table_share->keys; roop_count++) + for (uint i = 0; i < table_share->keys; i++) { - share->static_key_cardinality[roop_count] = -1; + share->static_key_cardinality[i] = -1; } +} + +/** + Get the connect info of a certain type. + + @param type The type of the connect info. + 4: partition; 3: subpartition; 2: comment; + 1: connect_string + @retval 0 Success + @retval 1 Not applicable. That is, the info with the + type is missing + @retval HA_ERR_OUT_OF_MEM Failure +*/ +static int spider_get_connect_info(const int type, + const partition_element *part_elem, + const partition_element *sub_elem, + const TABLE_SHARE* table_share, + char*& out) +{ + switch (type) + { +#ifdef WITH_PARTITION_STORAGE_ENGINE + case 4: + if (!sub_elem || !sub_elem->part_comment) + return 1; + if (!(out = spider_create_string( + sub_elem->part_comment, strlen(sub_elem->part_comment)))) + return HA_ERR_OUT_OF_MEM; + break; + case 3: + if (!part_elem || !part_elem->part_comment) + return 1; + if (!(out = spider_create_string( + part_elem->part_comment, strlen(part_elem->part_comment)))) + return HA_ERR_OUT_OF_MEM; + break; +#endif + case 2: + if (table_share->comment.length == 0) + return 1; + if (!(out = spider_create_string( + table_share->comment.str, table_share->comment.length))) + return HA_ERR_OUT_OF_MEM; + break; + default: + if (table_share->connect_string.length == 0) + return 1; + DBUG_PRINT("info",("spider create out string")); + if (!(out = spider_create_string( + table_share->connect_string.str, table_share->connect_string.length))) + return HA_ERR_OUT_OF_MEM; + break; + } + return 0; +} + +/** + Find the beginning and end of a parameter title + + Skip over whitespace to find the beginning of the parameter + title. Then skip over non-whitespace/quote/nul chars to find the end + of the parameter title + + @param start_title The start of the param definition. Will be + moved to the start of the param title + @param end_title Will be moved to the end of the param title + @retval false Success + @retval true Failure +*/ +static bool spider_parse_find_title(char*& start_title, char*& end_title) +{ + /* Skip leading whitespaces. */ + while (*start_title == ' ' || *start_title == '\r' || + *start_title == '\n' || *start_title == '\t') + start_title++; + + if (*start_title == '\0') + return true; + + end_title = start_title; + /* Move over non-whitespace/comma/nul/quote chars (parameter title). */ + while (*end_title != ' ' && *end_title != '\r' && + *end_title != '\n' && *end_title != '\t' && + *end_title != '\0' && *end_title != ',' && + *end_title != '\'' && *end_title != '"') + end_title++; + + /* Fail on invalid end: there should be at least one space between + title and value, and the value should be non-empty. */ + if (*end_title == '\'' || *end_title == '"' || + *end_title == '\0' || *end_title == ',') + return true; + + return false; +} + +/** + Find the beginning and the end of a paramter value, and the value + delimiter + + Skip over whitespaces to find the start delimiter, then skip over + the param value to find the end delimiter + + @param start_value The end of the param title. Will be moved to + the start of the param value, just after the + delimiter + @param end_value Will be moved to the end of the param value, at + the delimiter + @param delim Will be assigned the param value delimiter, + either the single or double quote + @retval false Success + @retval true Failure +*/ +static bool spider_parse_find_value(char*& start_value, char*& end_value, + char& delim) +{ + /* Skip over whitespaces */ + while (*start_value == ' ' || *start_value == '\r' || + *start_value == '\n' || *start_value == '\t') + start_value++; + if (*start_value != '"' && *start_value != '\'') + return true; + delim= *start_value; + end_value= start_value++; + + while (1) + { + end_value++; + /* Escaping */ + if (*end_value == '\\') + { + end_value++; + /* The backslash cannot be at the end */ + if (*end_value == '\0') + return true; + } + else if (*end_value == delim) + return false; + else if (*end_value == '\0') + return true; + } +} + +/** + Find the beginning of the next parameter + + Skip over whitespaces, then check that the first non-whitespace char + is a comma or the end of string + + @param start_param The end of the param value. Will be moved to + the start of the next param definition, just + after the comma, if there's one; otherwise will + be moved to the end of the string + @retval false Success + @retval true Failure +*/ +static bool spider_parse_find_next(char*& start_param) +{ + /* Skip over whitespaces */ + while (*start_param == ' ' || *start_param == '\r' || + *start_param == '\n' || *start_param == '\t') + start_param++; + /* No more param definitions */ + if (*start_param == '\0') + return false; + else if (*start_param == ',') + { + start_param++; + return false; + } + else + return true; +} + +/** + Find the start and end of the current param title and value and the + value deliminator. + + @param start_param The beginning of the current param + definition. Will be mutated to the beginning + of the next param definition. + @retval false success + @retval true failure +*/ +bool st_spider_param_string_parse::locate_param_def(char*& start_param) +{ + DBUG_ENTER("parse::locate_param_def"); + start_title= start_param; + if (spider_parse_find_title(start_title, end_title)) + DBUG_RETURN(TRUE); + start_value= end_title; + if (spider_parse_find_value(start_value, end_value, delim_value)) + DBUG_RETURN(TRUE); + /* skip the delim */ + start_param= end_value + 1; + if (spider_parse_find_next(start_param)) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); +} + +/** + Handle parsing failure. + + Print error and optionally restore param value end delimiter that + has been nulled before. + + @param restore_delim If true, restore the end value delimiter + @return The error number +*/ +int st_spider_param_string_parse::fail(bool restore_delim) +{ + DBUG_ENTER("spider_parse_print_param_error"); + DBUG_ASSERT(error_num != 0); + /* Print the error message */ + switch (error_num) + { + case ER_SPIDER_INVALID_UDF_PARAM_NUM: + my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, + MYF(0), start_title); + break; + case ER_SPIDER_INVALID_CONNECT_INFO_NUM: + default: + my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, + MYF(0), start_title); + } + if (restore_delim) + *end_value = delim_value; + DBUG_RETURN(error_num); +} + +/* + Parse connection information specified by COMMENT, CONNECT, or engine-defined + options. + TODO: Deprecate the connection specification by COMMENT and CONNECT, + and then solely utilize engine-defined options. +*/ +int spider_parse_connect_info( + SPIDER_SHARE *share, + TABLE_SHARE *table_share, + partition_info *part_info, + uint create_table +) { + int error_num = 0; + char *connect_string = NULL; + char *start_param; + int title_length, value_length; + SPIDER_PARAM_STRING_PARSE parse; + SPIDER_ALTER_TABLE *share_alter; + partition_element *part_elem; + partition_element *sub_elem; + DBUG_ENTER("spider_parse_connect_info"); + DBUG_PRINT("info",("spider partition_info=%s", + table_share->partition_info_str)); + DBUG_PRINT("info",("spider part_info=%p", part_info)); + DBUG_PRINT("info",("spider s->db=%s", table_share->db.str)); + DBUG_PRINT("info",("spider s->table_name=%s", table_share->table_name.str)); + DBUG_PRINT("info",("spider s->path=%s", table_share->path.str)); + DBUG_PRINT("info", + ("spider s->normalized_path=%s", table_share->normalized_path.str)); + spider_get_partition_info(share->table_name, share->table_name_length, + table_share, part_info, &part_elem, &sub_elem); + spider_minus_1(share, table_share); #ifdef WITH_PARTITION_STORAGE_ENGINE - for (roop_count = 4; roop_count > 0; roop_count--) + for (int i = 4; i > 0; i--) #else for (roop_count = 2; roop_count > 0; roop_count--) #endif @@ -2108,104 +2112,35 @@ int spider_parse_connect_info( spider_free(spider_current_trx, connect_string, MYF(0)); connect_string = NULL; } - switch (roop_count) + + int error_num_1 = spider_get_connect_info(i, part_elem, sub_elem, + table_share, connect_string); + if (error_num_1 == 1) + continue; + if (error_num_1 == HA_ERR_OUT_OF_MEM) { -#ifdef WITH_PARTITION_STORAGE_ENGINE - case 4: - if (!sub_elem || !sub_elem->part_comment) - continue; - DBUG_PRINT("info",("spider create sub comment string")); - if ( - !(connect_string = spider_create_string( - sub_elem->part_comment, - strlen(sub_elem->part_comment))) - ) { - error_num = HA_ERR_OUT_OF_MEM; - goto error_alloc_conn_string; - } - DBUG_PRINT("info",("spider sub comment string=%s", connect_string)); - break; - case 3: - if (!part_elem || !part_elem->part_comment) - continue; - DBUG_PRINT("info",("spider create part comment string")); - if ( - !(connect_string = spider_create_string( - part_elem->part_comment, - strlen(part_elem->part_comment))) - ) { - error_num = HA_ERR_OUT_OF_MEM; - goto error_alloc_conn_string; - } - DBUG_PRINT("info",("spider part comment string=%s", connect_string)); - break; -#endif - case 2: - if (table_share->comment.length == 0) - continue; - DBUG_PRINT("info",("spider create comment string")); - if ( - !(connect_string = spider_create_string( - table_share->comment.str, - table_share->comment.length)) - ) { - error_num = HA_ERR_OUT_OF_MEM; - goto error_alloc_conn_string; - } - DBUG_PRINT("info",("spider comment string=%s", connect_string)); - break; - default: - if (table_share->connect_string.length == 0) - continue; - DBUG_PRINT("info",("spider create connect_string string")); - if ( - !(connect_string = spider_create_string( - table_share->connect_string.str, - table_share->connect_string.length)) - ) { - error_num = HA_ERR_OUT_OF_MEM; - goto error_alloc_conn_string; - } - DBUG_PRINT("info",("spider connect_string=%s", connect_string)); - break; + error_num= HA_ERR_OUT_OF_MEM; + goto error_alloc_conn_string; } + DBUG_ASSERT(error_num_1 == 0); - sprit_ptr = connect_string; - connect_string_parse.init(connect_string, ER_SPIDER_INVALID_CONNECT_INFO_NUM); - while (sprit_ptr) + start_param = connect_string; + parse.error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; + while (*start_param != '\0') { - tmp_ptr = sprit_ptr; - while (*tmp_ptr == ' ' || *tmp_ptr == '\r' || - *tmp_ptr == '\n' || *tmp_ptr == '\t') - tmp_ptr++; - - if (*tmp_ptr == '\0') - break; - - title_length = 0; - start_ptr = tmp_ptr; - while (*start_ptr != ' ' && *start_ptr != '\'' && - *start_ptr != '"' && *start_ptr != '\0' && - *start_ptr != '\r' && *start_ptr != '\n' && - *start_ptr != '\t') - { - title_length++; - start_ptr++; - } - connect_string_parse.set_param_title(tmp_ptr, tmp_ptr + title_length); - if ((error_num = connect_string_parse.get_next_parameter_head( - start_ptr, &sprit_ptr))) + if (parse.locate_param_def(start_param)) { + error_num= parse.fail(false); goto error; } - - switch (title_length) + /* Null the end of the parameter value. */ + *parse.end_value= '\0'; + value_length= (int) (parse.end_value - parse.start_value); + switch (title_length = (int) (parse.end_title - parse.start_title)) { case 0: - error_num = connect_string_parse.print_param_error(); - if (error_num) - goto error; - continue; + error_num= parse.fail(true); + goto error; case 3: SPIDER_PARAM_LONG_LIST_WITH_MAX("abl", access_balances, 0, 2147483647); @@ -2345,17 +2280,17 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_INT_WITH_MAX("upu", use_pushdown_udf, 0, 1); SPIDER_PARAM_INT_WITH_MAX("utc", use_table_charset, 0, 1); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 4: SPIDER_PARAM_STR_LIST("host", tgt_hosts); SPIDER_PARAM_STR_LIST("user", tgt_usernames); SPIDER_PARAM_LONG_LIST_WITH_MAX("port", tgt_ports, 0, 65535); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 5: SPIDER_PARAM_STR_LIST("table", tgt_table_names); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 6: SPIDER_PARAM_STR_LIST("server", server_names); @@ -2365,13 +2300,13 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("ssl_ca", tgt_ssl_cas); SPIDER_PARAM_NUMHINT("skc", static_key_cardinality, 3, (int) table_share->keys, spider_set_ll_value); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 7: SPIDER_PARAM_STR_LIST("wrapper", tgt_wrappers); SPIDER_PARAM_STR_LIST("ssl_key", tgt_ssl_keys); SPIDER_PARAM_STR_LIST("pk_name", tgt_pk_names); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 8: SPIDER_PARAM_STR_LIST("database", tgt_dbs); @@ -2391,14 +2326,14 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_STR_LIST("ssl_cert", tgt_ssl_certs); SPIDER_PARAM_INT_WITH_MAX("bka_mode", bka_mode, 0, 2); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 9: SPIDER_PARAM_INT("max_order", max_order, 0); SPIDER_PARAM_INT("bulk_size", bulk_size, 0); SPIDER_PARAM_DOUBLE("scan_rate", scan_rate, 0); SPIDER_PARAM_DOUBLE("read_rate", read_rate, 0); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 10: SPIDER_PARAM_DOUBLE("crd_weight", crd_weight, 1); @@ -2408,7 +2343,7 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("ssl_capath", tgt_ssl_capaths); SPIDER_PARAM_STR("bka_engine", bka_engine); SPIDER_PARAM_LONGLONG("first_read", first_read, 0); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 11: SPIDER_PARAM_INT_WITH_MAX("query_cache", query_cache, 0, 2); @@ -2422,7 +2357,7 @@ int spider_parse_connect_info( SPIDER_PARAM_LONG_LIST_WITH_MAX("use_hs_read", use_hs_reads, 0, 1); #endif SPIDER_PARAM_INT_WITH_MAX("casual_read", casual_read, 0, 63); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 12: SPIDER_PARAM_DOUBLE("sts_interval", sts_interval, 0); @@ -2435,7 +2370,7 @@ int spider_parse_connect_info( SPIDER_PARAM_LONG_LIST_WITH_MAX( "hs_read_port", hs_read_ports, 0, 65535); #endif - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 13: SPIDER_PARAM_STR_LIST("default_group", tgt_default_groups); @@ -2444,7 +2379,7 @@ int spider_parse_connect_info( "hs_write_port", hs_write_ports, 0, 65535); #endif SPIDER_PARAM_STR_LIST("sequence_name", tgt_sequence_names); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 14: SPIDER_PARAM_LONGLONG("internal_limit", internal_limit, 0); @@ -2461,7 +2396,7 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("static_link_id", static_link_ids); SPIDER_PARAM_INT_WITH_MAX("store_last_crd", store_last_crd, 0, 1); SPIDER_PARAM_INT_WITH_MAX("store_last_sts", store_last_sts, 0, 1); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 15: SPIDER_PARAM_LONGLONG("internal_offset", internal_offset, 0); @@ -2482,7 +2417,7 @@ int spider_parse_connect_info( SPIDER_PARAM_LONG_LIST_WITH_MAX("connect_timeout", connect_timeouts, 0, 2147483647); SPIDER_PARAM_INT_WITH_MAX("error_read_mode", error_read_mode, 0, 1); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 16: SPIDER_PARAM_INT_WITH_MAX( @@ -2512,7 +2447,7 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_INT_WITH_MAX( "query_cache_sync", query_cache_sync, 0, 3); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 17: SPIDER_PARAM_INT_WITH_MAX( @@ -2532,7 +2467,7 @@ int spider_parse_connect_info( SPIDER_PARAM_INT_WITH_MAX( "force_bulk_update", force_bulk_update, 0, 1); #endif - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 18: SPIDER_PARAM_INT_WITH_MAX( @@ -2545,7 +2480,7 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_LONGLONG( "direct_order_limit", direct_order_limit, 0); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 19: SPIDER_PARAM_INT("init_sql_alloc_size", init_sql_alloc_size, 0); @@ -2560,7 +2495,7 @@ int spider_parse_connect_info( "load_crd_at_startup", load_crd_at_startup, 0, 1); SPIDER_PARAM_INT_WITH_MAX( "load_sts_at_startup", load_sts_at_startup, 0, 1); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 20: SPIDER_PARAM_LONGLONG_LIST_WITH_MAX( @@ -2569,12 +2504,12 @@ int spider_parse_connect_info( "delete_all_rows_type", delete_all_rows_type, 0, 1); SPIDER_PARAM_INT_WITH_MAX( "skip_parallel_search", skip_parallel_search, 0, 3); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 21: SPIDER_PARAM_LONGLONG( "semi_split_read_limit", semi_split_read_limit, 0); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 22: SPIDER_PARAM_LONG_LIST_WITH_MAX( @@ -2587,38 +2522,36 @@ int spider_parse_connect_info( "skip_default_condition", skip_default_condition, 0, 1); SPIDER_PARAM_LONGLONG( "static_mean_rec_length", static_mean_rec_length, 0); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 23: SPIDER_PARAM_INT_WITH_MAX( "internal_optimize_local", internal_optimize_local, 0, 1); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 25: SPIDER_PARAM_LONGLONG("static_records_for_status", static_records_for_status, 0); SPIDER_PARAM_NUMHINT("static_key_cardinality", static_key_cardinality, 3, (int) table_share->keys, spider_set_ll_value); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 26: SPIDER_PARAM_INT_WITH_MAX( "semi_table_lock_connection", semi_table_lock_conn, 0, 1); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; case 32: SPIDER_PARAM_LONG_LIST_WITH_MAX("monitoring_binlog_pos_at_failing", monitoring_binlog_pos_at_failing, 0, 2); - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; default: - error_num = connect_string_parse.print_param_error(); + error_num = parse.fail(true); goto error; } - - /* Verify that the remainder of the parameter value is whitespace */ - if ((error_num = connect_string_parse.has_extra_parameter_values())) - goto error; + /* Restore delim */ + *parse.end_value= parse.delim_value; } } @@ -3199,8 +3132,8 @@ int spider_parse_connect_info( if (create_table) { - for (roop_count = 0; roop_count < (int) share->all_link_count; - roop_count++) + for (int roop_count = 0; roop_count < (int) share->all_link_count; + roop_count++) { int roop_count2; for (roop_count2 = 0; roop_count2 < SPIDER_DBTON_SIZE; roop_count2++) diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h index be3d526e4a2bc..b56360e8acd64 100644 --- a/storage/spider/spd_table.h +++ b/storage/spider/spd_table.h @@ -30,281 +30,21 @@ typedef struct st_spider_param_string_parse { - char *start_ptr; /* Pointer to the start of the parameter string */ - char *end_ptr; /* Pointer to the end of the parameter string */ - char *start_title_ptr; /* Pointer to the start of the current parameter - title */ - char *end_title_ptr; /* Pointer to the end of the current parameter - title */ - char *start_value_ptr; /* Pointer to the start of the current parameter - value */ - char *end_value_ptr; /* Pointer to the end of the current parameter - value */ - int error_num; /* Error code of the error message to print when - an error is detected */ - uint delim_title_len; /* Length of the paramater title's delimiter */ - uint delim_value_len; /* Length of the paramater value's delimiter */ - char delim_title; /* Current parameter title's delimiter character */ - char delim_value; /* Current parameter value's delimiter character */ - - /** - Initialize the parameter string parse information. - - @param param_string Pointer to the parameter string being parsed. - @param error_code Error code of the error message to print when - an error is detected. - */ - - inline void init(char *param_string, int error_code) - { - start_ptr = param_string; - end_ptr = start_ptr + strlen(start_ptr); - - init_param_title(); - init_param_value(); - - error_num = error_code; - } - - /** - Initialize the current parameter title. - */ - - inline void init_param_title() - { - start_title_ptr = end_title_ptr = NULL; - delim_title_len = 0; - delim_title = '\0'; - } - - /** - Save pointers to the start and end positions of the current parameter - title in the parameter string. Also save the parameter title's - delimiter character. - - @param start_value Pointer to the start position of the current - parameter title. - @param end_value Pointer to the end position of the current - parameter title. - */ - - inline void set_param_title(char *start_title, char *end_title) - { - start_title_ptr = start_title; - end_title_ptr = end_title; - - if (*start_title == '"' || - *start_title == '\'') - { - delim_title = *start_title; - - if (start_title >= start_ptr && *--start_title == '\\') - delim_title_len = 2; - else - delim_title_len = 1; - } - } - - /** - Initialize the current parameter value. - */ - - inline void init_param_value() - { - start_value_ptr = end_value_ptr = NULL; - delim_value_len = 0; - delim_value = '\0'; - } - - /** - Save pointers to the start and end positions of the current parameter - value in the parameter string. Also save the parameter value's - delimiter character. - - @param start_value Pointer to the start position of the current - parameter value. - @param end_value Pointer to the end position of the current - parameter value. - */ - - inline void set_param_value(char *start_value, char *end_value) - { - start_value_ptr = start_value--; - end_value_ptr = end_value; - - if (*start_value == '"' || - *start_value == '\'') - { - delim_value = *start_value; - - if (*--start_value == '\\') - delim_value_len = 2; - else - delim_value_len = 1; - } - } - - /** - Determine whether the current parameter in the parameter string has - extra parameter values. - - @return 0 Current parameter value in the parameter string - does not have extra parameter values. - <> 0 Error code indicating that the current parameter - value in the parameter string has extra - parameter values. - */ - - inline int has_extra_parameter_values() - { - int error_num = 0; - DBUG_ENTER("has_extra_parameter_values"); - - if (end_value_ptr) - { - /* There is a current parameter value */ - char *end_param_ptr = end_value_ptr; - - while (end_param_ptr < end_ptr && - (*end_param_ptr == ' ' || *end_param_ptr == '\r' || - *end_param_ptr == '\n' || *end_param_ptr == '\t')) - end_param_ptr++; - - if (end_param_ptr < end_ptr && *end_param_ptr != '\0') - { - /* Extra values in parameter definition */ - error_num = print_param_error(); - } - } - - DBUG_RETURN(error_num); - } - - inline int get_next_parameter_head(char *st, char **nx) - { - DBUG_ENTER("get_next_parameter_head"); - char *sq = strchr(st, '\''); - char *dq = strchr(st, '"'); - if (!sq && !dq) - { - DBUG_RETURN(print_param_error()); - } - - if (dq && (!sq || sq > dq)) - { - while (1) - { - ++dq; - if (*dq == '\\') - { - ++dq; - } - else if (*dq == '"') - { - break; - } - else if (*dq == '\0') - { - DBUG_RETURN(print_param_error()); - } - } - while (1) - { - ++dq; - if (*dq == '\0') - { - *nx = dq; - break; - } - else if (*dq == ',') - { - *dq = '\0'; - *nx = dq + 1; - break; - } - else if (*dq != ' ' && *dq != '\r' && *dq != '\n' && *dq != '\t') - { - DBUG_RETURN(print_param_error()); - } - } - } - else /* sq && (!dq || sq <= dq) */ - { - while (1) - { - ++sq; - if (*sq == '\\') - { - ++sq; - } - else if (*sq == '\'') - { - break; - } - else if (*sq == '\0') - { - DBUG_RETURN(print_param_error()); - } - } - while (1) - { - ++sq; - if (*sq == '\0') - { - *nx = sq; - break; - } - else if (*sq == ',') - { - *sq = '\0'; - *nx = sq + 1; - break; - } - else if (*sq != ' ' && *sq != '\r' && *sq != '\n' && *sq != '\t') - { - DBUG_RETURN(print_param_error()); - } - } - } - DBUG_RETURN(0); - } - - /** - Restore the current parameter's input delimiter characters in the - parameter string. They were NULLed during parameter parsing. - */ - - inline void restore_delims() - { - char *end = end_title_ptr - 1; - - switch (delim_title_len) - { - case 2: - *end++ = '\\'; - /* Fall through */ - case 1: - *end = delim_title; - } - - end = end_value_ptr - 1; - switch (delim_value_len) - { - case 2: - *end++ = '\\'; - /* Fall through */ - case 1: - *end = delim_value; - } - } - - /** - Print a parameter string error message. - - @return Error code. - */ - - int print_param_error(); + char *start_title; /* Pointer to the start of the current parameter + title */ + char *end_title; /* Pointer to the end of the current + parameter value */ + char *start_value; /* Pointer to the start of the current parameter + value */ + char *end_value; /* Pointer to the end of the current parameter + value */ + char delim_value; /* Current parameter value's delimiter + character, either a single or a double quote */ + int error_num; /* Error code of the error message to print when + an error is detected */ + + int fail(bool restore_delim); + bool locate_param_def(char*& start_param); } SPIDER_PARAM_STRING_PARSE; uchar *spider_tbl_get_key( @@ -352,19 +92,12 @@ void spider_free_tmp_share_alloc( SPIDER_SHARE *share ); -char *spider_get_string_between_quote( - char *ptr, - bool alloc, - SPIDER_PARAM_STRING_PARSE *param_string_parse = NULL -); - int spider_create_string_list( char ***string_list, uint **string_length_list, uint *list_length, char *str, - uint length, - SPIDER_PARAM_STRING_PARSE *param_string_parse + uint length ); int spider_create_long_list( @@ -373,8 +106,7 @@ int spider_create_long_list( char *str, uint length, long min_val, - long max_val, - SPIDER_PARAM_STRING_PARSE *param_string_parse + long max_val ); int spider_create_longlong_list( @@ -383,8 +115,7 @@ int spider_create_longlong_list( char *str, uint length, longlong min_val, - longlong max_val, - SPIDER_PARAM_STRING_PARSE *param_string_parse + longlong max_val ); int spider_increase_string_list( From 02878f128e12448f995efd2551be65dc13c458a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 24 Aug 2023 10:08:51 +0300 Subject: [PATCH 012/165] MDEV-31813 SET GLOBAL innodb_max_purge_lag_wait hangs if innodb_read_only innodb_max_purge_lag_wait_update(): Return immediately if we are in high_level_read_only mode. srv_wake_purge_thread_if_not_active(): Relax a debug assertion. If srv_read_only_mode holds, purge_sys.enabled() will not hold and this function will do nothing. trx_t::commit_in_memory(): Remove a redundant condition before invoking srv_wake_purge_thread_if_not_active(). --- mysql-test/suite/innodb/r/read_only_recovery.result | 1 + mysql-test/suite/innodb/t/read_only_recovery.test | 1 + storage/innobase/handler/ha_innodb.cc | 2 ++ storage/innobase/srv/srv0srv.cc | 3 +-- storage/innobase/trx/trx0trx.cc | 3 +-- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/innodb/r/read_only_recovery.result b/mysql-test/suite/innodb/r/read_only_recovery.result index e83bf66432e42..7f9854983ef97 100644 --- a/mysql-test/suite/innodb/r/read_only_recovery.result +++ b/mysql-test/suite/innodb/r/read_only_recovery.result @@ -35,6 +35,7 @@ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT * FROM t; a 3 +SET GLOBAL innodb_max_purge_lag_wait=0; # restart SELECT * FROM t; a diff --git a/mysql-test/suite/innodb/t/read_only_recovery.test b/mysql-test/suite/innodb/t/read_only_recovery.test index 7da012efb749f..7cb6335a1bb0f 100644 --- a/mysql-test/suite/innodb/t/read_only_recovery.test +++ b/mysql-test/suite/innodb/t/read_only_recovery.test @@ -37,6 +37,7 @@ UPDATE t SET a=3 WHERE a=1; SELECT * FROM t; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT * FROM t; +SET GLOBAL innodb_max_purge_lag_wait=0; --let $restart_parameters= --source include/restart_mysqld.inc SELECT * FROM t; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 3a8d562f70599..1bbc7af2520cc 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -238,6 +238,8 @@ static uint innodb_max_purge_lag_wait; static void innodb_max_purge_lag_wait_update(THD *thd, st_mysql_sys_var *, void *, const void *limit) { + if (high_level_read_only) + return; const uint l= *static_cast(limit); if (trx_sys.rseg_history_len <= l) return; diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 47e0fe1305350..a83b327faf47f 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1902,8 +1902,7 @@ srv_active_wake_master_thread_low() void srv_wake_purge_thread_if_not_active() { - ut_ad(!srv_read_only_mode); - ut_ad(!mutex_own(&srv_sys.mutex)); + ut_ad(srv_read_only_mode || !mutex_own(&srv_sys.mutex)); if (purge_sys.enabled() && !purge_sys.paused() && !srv_sys.n_threads_active[SRV_PURGE] diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index b742d1c3686d8..00d8506c5e73e 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1477,8 +1477,7 @@ inline void trx_t::commit_in_memory(const mtr_t *mtr) trx_mutex_exit(this); ut_a(error_state == DB_SUCCESS); - if (!srv_read_only_mode) - srv_wake_purge_thread_if_not_active(); + srv_wake_purge_thread_if_not_active(); } /** Commit the transaction in a mini-transaction. From f4bbea90f11d16930255784d7efe1f09416ee6bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 25 Aug 2023 13:16:54 +0300 Subject: [PATCH 013/165] MDEV-30100 preparation: Simplify InnoDB transaction commit trx_commit_cleanup(): Clean up any temporary undo log. Replaces trx_undo_commit_cleanup() and trx_undo_seg_free(). trx_write_serialisation_history(): Commit the mini-transaction. Do not touch temporary undo logs. Assume that a persistent rollback segment has been assigned. trx_serialise(): Merged into trx_write_serialisation_history(). trx_t::commit_low(): Correct some comments and assertions. trx_t::commit_persist(): Only invoke commit_low() on a mini-transaction if the persistent state needs to change. --- storage/innobase/handler/handler0alter.cc | 2 +- storage/innobase/include/trx0undo.h | 6 - storage/innobase/trx/trx0trx.cc | 208 +++++++++++----------- storage/innobase/trx/trx0undo.cc | 62 ------- 4 files changed, 104 insertions(+), 174 deletions(-) diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index d62fea9584758..fcc6f46c87486 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -11498,7 +11498,7 @@ ha_innobase::commit_inplace_alter_table( DEBUG_SYNC(m_user_thd, "innodb_alter_inplace_before_commit"); if (new_clustered) { - ut_ad(trx->has_logged()); + ut_ad(trx->has_logged_persistent()); for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { auto ctx= static_cast(*pctx); diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index c143593055161..1a5dfbd874f02 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -226,12 +226,6 @@ void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback, mtr_t *mtr) MY_ATTRIBUTE((nonnull)); -/** Free temporary undo log after commit or rollback. -The information is not needed after a commit or rollback, therefore -the data can be discarded. -@param undo temporary undo log */ -void trx_undo_commit_cleanup(trx_undo_t *undo); - /** At shutdown, frees the undo logs of a transaction. */ void trx_undo_free_at_shutdown(trx_t *trx); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index b96ababb1f3cf..a35a035a7f832 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -969,93 +969,50 @@ trx_start_low( ut_a(trx->error_state == DB_SUCCESS); } -/** Set the serialisation number for a persistent committed transaction. -@param[in,out] trx committed transaction with persistent changes */ -static -void -trx_serialise(trx_t* trx) -{ - trx_rseg_t *rseg = trx->rsegs.m_redo.rseg; - ut_ad(rseg); - - if (rseg->last_page_no == FIL_NULL) { - mysql_mutex_lock(&purge_sys.pq_mutex); - } - - trx_sys.assign_new_trx_no(trx); - - /* If the rollback segment is not empty then the - new trx_t::no can't be less than any trx_t::no - already in the rollback segment. User threads only - produce events when a rollback segment is empty. */ - if (rseg->last_page_no == FIL_NULL) { - purge_sys.purge_queue.push(TrxUndoRsegs(trx->rw_trx_hash_element->no, - *rseg)); - mysql_mutex_unlock(&purge_sys.pq_mutex); - } -} - -/****************************************************************//** -Assign the transaction its history serialisation number and write the -update UNDO log record to the assigned rollback segment. */ -static -void -trx_write_serialisation_history( -/*============================*/ - trx_t* trx, /*!< in/out: transaction */ - mtr_t* mtr) /*!< in/out: mini-transaction */ +/** Assign the transaction its history serialisation number and write the +UNDO log to the assigned rollback segment. +@param trx persistent transaction +@param mtr mini-transaction */ +static void trx_write_serialisation_history(trx_t *trx, mtr_t *mtr) { - /* Change the undo log segment states from TRX_UNDO_ACTIVE to some - other state: these modifications to the file data structure define - the transaction as committed in the file based domain, at the - serialization point of the log sequence number lsn obtained below. */ - - /* We have to hold the rseg mutex because update log headers have - to be put to the history list in the (serialisation) order of the - UNDO trx number. This is required for the purge in-memory data - structures too. */ - - if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) { - /* Undo log for temporary tables is discarded at transaction - commit. There is no purge for temporary tables, and also no - MVCC, because they are private to a session. */ - - mtr_t temp_mtr; - temp_mtr.start(); - temp_mtr.set_log_mode(MTR_LOG_NO_REDO); - buf_block_t* block= buf_page_get(page_id_t(SRV_TMP_SPACE_ID, - undo->hdr_page_no), - 0, RW_X_LATCH, mtr); - ut_a(block); - temp_mtr.write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE - + block->page.frame, TRX_UNDO_TO_PURGE); - undo->state = TRX_UNDO_TO_PURGE; - temp_mtr.commit(); - } - - trx_rseg_t* rseg = trx->rsegs.m_redo.rseg; - if (!rseg) { - ut_ad(!trx->rsegs.m_redo.undo); - return; - } - - trx_undo_t*& undo = trx->rsegs.m_redo.undo; - - ut_ad(!trx->read_only); - - /* Assign the transaction serialisation number and add any - undo log to the purge queue. */ - if (undo) { - rseg->latch.wr_lock(SRW_LOCK_CALL); - ut_ad(undo->rseg == rseg); - trx_serialise(trx); - UT_LIST_REMOVE(rseg->undo_list, undo); - trx_purge_add_undo_to_history(trx, undo, mtr); - MONITOR_INC(MONITOR_TRX_COMMIT_UNDO); - rseg->latch.wr_unlock(); - } - - rseg->release(); + ut_ad(!trx->read_only); + trx_rseg_t *rseg= trx->rsegs.m_redo.rseg; + trx_undo_t *&undo= trx->rsegs.m_redo.undo; + if (UNIV_LIKELY(undo != nullptr)) + { + MONITOR_INC(MONITOR_TRX_COMMIT_UNDO); + + /* We have to hold exclusive rseg->latch because undo log headers have + to be put to the history list in the (serialisation) order of the + UNDO trx number. This is required for purge_sys too. */ + rseg->latch.wr_lock(SRW_LOCK_CALL); + ut_ad(undo->rseg == rseg); + /* Assign the transaction serialisation number and add any + undo log to the purge queue. */ + if (rseg->last_page_no == FIL_NULL) + { + mysql_mutex_lock(&purge_sys.pq_mutex); + trx_sys.assign_new_trx_no(trx); + /* If the rollback segment is not empty, trx->no cannot be less + than any trx_t::no already in rseg. User threads only produce + events when a rollback segment is empty. */ + purge_sys.purge_queue.push(TrxUndoRsegs(trx->rw_trx_hash_element->no, + *rseg)); + mysql_mutex_unlock(&purge_sys.pq_mutex); + } + else + trx_sys.assign_new_trx_no(trx); + UT_LIST_REMOVE(rseg->undo_list, undo); + /* Change the undo log segment state from TRX_UNDO_ACTIVE, to + define the transaction as committed in the file based domain, + at mtr->commit_lsn() obtained in mtr->commit() below. */ + trx_purge_add_undo_to_history(trx, undo, mtr); + rseg->release(); + rseg->latch.wr_unlock(); + } + else + rseg->release(); + mtr->commit(); } /******************************************************************** @@ -1229,6 +1186,55 @@ void trx_t::evict_table(table_id_t table_id, bool reset_only) } } +/** Free temporary undo log after commit or rollback. +@param undo temporary undo log */ +ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo) +{ + trx_rseg_t *const rseg= undo->rseg; + ut_ad(rseg->space == fil_system.temp_space); + rseg->latch.wr_lock(SRW_LOCK_CALL); + UT_LIST_REMOVE(rseg->undo_list, undo); + ut_ad(undo->state == TRX_UNDO_ACTIVE || undo->state == TRX_UNDO_PREPARED); + ut_ad(undo->id < TRX_RSEG_N_SLOTS); + /* Delete first the undo log segment in the file */ + bool finished; + mtr_t mtr; + do + { + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); + + finished= true; + + if (buf_block_t *block= + buf_page_get(page_id_t(SRV_TMP_SPACE_ID, undo->hdr_page_no), 0, + RW_X_LATCH, &mtr)) + { + fseg_header_t *file_seg= TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + + block->page.frame; + + finished= fseg_free_step(file_seg, &mtr); + + if (!finished); + else if (buf_block_t *rseg_header= rseg->get(&mtr, nullptr)) + { + static_assert(FIL_NULL == 0xffffffff, "compatibility"); + memset(rseg_header->page.frame + TRX_RSEG + TRX_RSEG_UNDO_SLOTS + + undo->id * TRX_RSEG_SLOT_SIZE, 0xff, 4); + } + } + + mtr.commit(); + } + while (!finished); + + ut_ad(rseg->curr_size > undo->size); + rseg->curr_size-= undo->size; + rseg->latch.wr_unlock(); + ut_free(undo); + undo= nullptr; +} + TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr) { /* We already detached from rseg in trx_write_serialisation_history() */ @@ -1300,15 +1306,14 @@ TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr) release_locks(); } - if (mtr) + if (trx_undo_t *&undo= rsegs.m_noredo.undo) { - if (trx_undo_t *&undo= rsegs.m_noredo.undo) - { - ut_ad(undo->rseg == rsegs.m_noredo.rseg); - trx_undo_commit_cleanup(undo); - undo= nullptr; - } + ut_ad(undo->rseg == rsegs.m_noredo.rseg); + trx_commit_cleanup(undo); + } + if (mtr) + { /* NOTE that we could possibly make a group commit more efficient here: call std::this_thread::yield() here to allow also other trxs to come to commit! */ @@ -1346,8 +1351,6 @@ TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr) trx_flush_log_if_needed(commit_lsn, this); } - ut_ad(!rsegs.m_noredo.undo); - savepoints_discard(); if (fts_trx) @@ -1390,7 +1393,7 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr) { ut_ad(!mtr || mtr->is_active()); ut_d(bool aborted= in_rollback && error_state == DB_DEADLOCK); - ut_ad(!mtr == (aborted || !has_logged())); + ut_ad(!mtr == (aborted || !has_logged_persistent())); ut_ad(!mtr || !aborted); if (fts_trx && undo_no) @@ -1416,7 +1419,6 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr) { if (UNIV_UNLIKELY(apply_online_log)) apply_log(); - trx_write_serialisation_history(this, mtr); /* The following call commits the mini-transaction, making the whole transaction committed in the file-based world, at this log @@ -1424,16 +1426,12 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr) the log to disk, but in the logical sense the commit in the file-based data structures (undo logs etc.) happens here. - NOTE that transaction numbers, which are assigned only to - transactions with an update undo log, do not necessarily come in + NOTE that transaction numbers do not necessarily come in exactly the same order as commit lsn's, if the transactions have - different rollback segments. To get exactly the same order we - should hold the kernel mutex up to this point, adding to the - contention of the kernel mutex. However, if a transaction T2 is + different rollback segments. However, if a transaction T2 is able to see modifications made by a transaction T1, T2 will always get a bigger transaction number and a bigger commit lsn than T1. */ - - mtr->commit(); + trx_write_serialisation_history(this, mtr); } else if (trx_rseg_t *rseg= rsegs.m_redo.rseg) { @@ -1456,7 +1454,7 @@ void trx_t::commit_persist() mtr_t *mtr= nullptr; mtr_t local_mtr; - if (has_logged()) + if (has_logged_persistent()) { mtr= &local_mtr; local_mtr.start(); diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index 4811d2380aad3..8354d04edc6c5 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -961,47 +961,6 @@ trx_undo_truncate_start( goto loop; } -/** Frees an undo log segment which is not in the history list. -@param undo temporary undo log */ -static void trx_undo_seg_free(const trx_undo_t *undo) -{ - ut_ad(undo->id < TRX_RSEG_N_SLOTS); - - trx_rseg_t *const rseg= undo->rseg; - bool finished; - mtr_t mtr; - ut_ad(rseg->space == fil_system.temp_space); - - do - { - mtr.start(); - mtr.set_log_mode(MTR_LOG_NO_REDO); - - finished= true; - - if (buf_block_t *block= - buf_page_get(page_id_t(SRV_TMP_SPACE_ID, undo->hdr_page_no), 0, - RW_X_LATCH, &mtr)) - { - fseg_header_t *file_seg= TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + - block->page.frame; - - finished= fseg_free_step(file_seg, &mtr); - - if (!finished); - else if (buf_block_t* rseg_header = rseg->get(&mtr, nullptr)) - { - static_assert(FIL_NULL == 0xffffffff, "compatibility"); - mtr.memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS + - undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff); - } - } - - mtr.commit(); - } - while (!finished); -} - /*========== UNDO LOG MEMORY COPY INITIALIZATION =====================*/ /** Read an undo log when starting up the database. @@ -1508,27 +1467,6 @@ void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback, trx_undo_write_xid(block, offset, undo->xid, mtr); } -/** Free temporary undo log after commit or rollback. -The information is not needed after a commit or rollback, therefore -the data can be discarded. -@param undo temporary undo log */ -void trx_undo_commit_cleanup(trx_undo_t *undo) -{ - trx_rseg_t *rseg= undo->rseg; - ut_ad(rseg->space == fil_system.temp_space); - rseg->latch.wr_lock(SRW_LOCK_CALL); - - UT_LIST_REMOVE(rseg->undo_list, undo); - ut_ad(undo->state == TRX_UNDO_TO_PURGE); - /* Delete first the undo log segment in the file */ - trx_undo_seg_free(undo); - ut_ad(rseg->curr_size > undo->size); - rseg->curr_size-= undo->size; - - rseg->latch.wr_unlock(); - ut_free(undo); -} - /** At shutdown, frees the undo logs of a transaction. */ void trx_undo_free_at_shutdown(trx_t *trx) { From 4ff5311decd8c239bc5c2b8aabd0f44004aa2fdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 25 Aug 2023 13:23:21 +0300 Subject: [PATCH 014/165] MDEV-30100 preparation: Simplify InnoDB transaction commit further trx_commit_complete_for_mysql(): Remove some conditions. We will rely on trx_t::commit_lsn. trx_t::must_flush_log_later: Remove. trx_commit_complete_for_mysql() can simply check for trx_t::flush_log_later. trx_t::commit_in_memory(): Set commit_lsn=0 if the log was written. trx_flush_log_if_needed_low(): Renamed to trx_flush_log_if_needed(). Assert that innodb_flush_log_at_trx_commit!=0 was checked by the caller and that the transaction is not in XA PREPARE state. Unconditionally flush the log for data dictionary transactions, to ensure the correct processing of ddl_recovery.log. trx_write_serialisation_history(): Move some code from trx_purge_add_undo_to_history(). trx_prepare(): Invoke log_write_up_to() directly if needed. innobase_commit_ordered_2(): Simplify some conditions. A read-write transaction will always carry nonzero trx_t::id. Let us unconditionally reset mysql_log_file_name, flush_log_later after trx_t::commit() was invoked. --- storage/innobase/handler/ha_innodb.cc | 11 +-- storage/innobase/include/trx0trx.h | 16 +---- storage/innobase/trx/trx0purge.cc | 7 +- storage/innobase/trx/trx0trx.cc | 99 ++++++++++----------------- 4 files changed, 45 insertions(+), 88 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 9dc602f84e93b..0db27a1e2d04b 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4462,9 +4462,7 @@ innobase_commit_ordered_2( { DBUG_ENTER("innobase_commit_ordered_2"); - const bool read_only = trx->read_only || trx->id == 0; - - if (!read_only) { + if (trx->id) { /* The following call reads the binary log position of the transaction being committed. @@ -4494,11 +4492,8 @@ innobase_commit_ordered_2( #endif /* WITH_WSREP */ innobase_commit_low(trx); - - if (!read_only) { - trx->mysql_log_file_name = NULL; - trx->flush_log_later = false; - } + trx->mysql_log_file_name = NULL; + trx->flush_log_later = false; DBUG_VOID_RETURN; } diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 5b2b2264a468d..388958836079a 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -181,13 +181,9 @@ note that the trx may have been committed before the caller acquires trx_t::mutex @retval NULL if no match */ trx_t* trx_get_trx_by_xid(const XID* xid); -/**********************************************************************//** -If required, flushes the log to disk if we called trx_commit_for_mysql() -with trx->flush_log_later == TRUE. */ -void -trx_commit_complete_for_mysql( -/*==========================*/ - trx_t* trx); /*!< in/out: transaction */ +/** Durably write log until trx->commit_lsn +(if trx_t::commit_in_memory() was invoked with flush_log_later=true). */ +void trx_commit_complete_for_mysql(trx_t *trx); /**********************************************************************//** Marks the latest SQL statement ended. */ void @@ -772,12 +768,6 @@ struct trx_t : ilist_node<> defer flush of the logs to disk until after we release the mutex. */ - bool must_flush_log_later;/*!< set in commit() - if flush_log_later was - set and redo log was written; - in that case we will - flush the log in - trx_commit_complete_for_mysql() */ ulint duplicates; /*!< TRX_DUP_IGNORE | TRX_DUP_REPLACE */ /** whether this modifies InnoDB dictionary tables */ bool dict_operation; diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index c2fdda33090ac..9c464901e1482 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -268,12 +268,7 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1); ut_ad(rseg->needs_purge > trx->id); - - if (rseg->last_page_no == FIL_NULL) - { - rseg->last_page_no= undo->hdr_page_no; - rseg->set_last_commit(undo->hdr_offset, trx->rw_trx_hash_element->no); - } + ut_ad(rseg->last_page_no != FIL_NULL); rseg->history_size++; diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index a35a035a7f832..ebffd87647de2 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -419,7 +419,6 @@ void trx_t::free() MEM_NOACCESS(&active_commit_ordered, sizeof active_commit_ordered); MEM_NOACCESS(&check_unique_secondary, sizeof check_unique_secondary); MEM_NOACCESS(&flush_log_later, sizeof flush_log_later); - MEM_NOACCESS(&must_flush_log_later, sizeof must_flush_log_later); MEM_NOACCESS(&duplicates, sizeof duplicates); MEM_NOACCESS(&dict_operation, sizeof dict_operation); MEM_NOACCESS(&dict_operation_lock_mode, sizeof dict_operation_lock_mode); @@ -993,12 +992,14 @@ static void trx_write_serialisation_history(trx_t *trx, mtr_t *mtr) { mysql_mutex_lock(&purge_sys.pq_mutex); trx_sys.assign_new_trx_no(trx); + const trx_id_t end{trx->rw_trx_hash_element->no}; /* If the rollback segment is not empty, trx->no cannot be less than any trx_t::no already in rseg. User threads only produce events when a rollback segment is empty. */ - purge_sys.purge_queue.push(TrxUndoRsegs(trx->rw_trx_hash_element->no, - *rseg)); + purge_sys.purge_queue.push(TrxUndoRsegs{end, *rseg}); mysql_mutex_unlock(&purge_sys.pq_mutex); + rseg->last_page_no= undo->hdr_page_no; + rseg->set_last_commit(undo->hdr_offset, end); } else trx_sys.assign_new_trx_no(trx); @@ -1086,50 +1087,30 @@ extern "C" void thd_decrement_pending_ops(MYSQL_THD); @param trx transaction; if trx->state is PREPARED, the function will also wait for the flush to complete. */ -static void trx_flush_log_if_needed_low(lsn_t lsn, const trx_t *trx) +static void trx_flush_log_if_needed(lsn_t lsn, trx_t *trx) { - if (!srv_flush_log_at_trx_commit) - return; + ut_ad(srv_flush_log_at_trx_commit); + ut_ad(trx->state != TRX_STATE_PREPARED); if (log_sys.get_flushed_lsn() > lsn) return; - const bool flush= srv_file_flush_method != SRV_NOSYNC && - (srv_flush_log_at_trx_commit & 1); - - if (trx->state == TRX_STATE_PREPARED) - { - /* XA, which is used with binlog as well. - Be conservative, use synchronous wait.*/ -sync: - log_write_up_to(lsn, flush); - return; - } + const bool flush= trx->dict_operation || + (srv_file_flush_method != SRV_NOSYNC && + (srv_flush_log_at_trx_commit & 1)); completion_callback cb; - if ((cb.m_param = thd_increment_pending_ops(trx->mysql_thd))) + if ((cb.m_param= thd_increment_pending_ops(trx->mysql_thd))) { cb.m_callback = (void (*)(void *)) thd_decrement_pending_ops; log_write_up_to(lsn, flush, false, &cb); } else - goto sync; -} - -/**********************************************************************//** -If required, flushes the log to disk based on the value of -innodb_flush_log_at_trx_commit. */ -static -void -trx_flush_log_if_needed( -/*====================*/ - lsn_t lsn, /*!< in: lsn up to which logs are to be - flushed. */ - trx_t* trx) /*!< in/out: transaction */ -{ - trx->op_info = "flushing log"; - trx_flush_log_if_needed_low(lsn, trx); - trx->op_info = ""; + { + trx->op_info= "flushing log"; + log_write_up_to(lsn, flush); + trx->op_info= ""; + } } /** Process tables that were modified by the committing transaction. */ @@ -1239,7 +1220,6 @@ TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr) { /* We already detached from rseg in trx_write_serialisation_history() */ ut_ad(!rsegs.m_redo.undo); - must_flush_log_later= false; read_view.close(); if (is_autocommit_non_locking()) @@ -1342,13 +1322,11 @@ TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr) gathering. */ commit_lsn= undo_no || !xid.is_null() ? mtr->commit_lsn() : 0; - if (!commit_lsn) - /* Nothing to be done. */; - else if (flush_log_later) - /* Do nothing yet */ - must_flush_log_later= true; - else if (srv_flush_log_at_trx_commit) + if (commit_lsn && !flush_log_later && srv_flush_log_at_trx_commit) + { trx_flush_log_if_needed(commit_lsn, this); + commit_lsn= 0; + } } savepoints_discard(); @@ -1595,24 +1573,21 @@ trx_commit_for_mysql( return(DB_CORRUPTION); } -/**********************************************************************//** -If required, flushes the log to disk if we called trx_commit_for_mysql() -with trx->flush_log_later == TRUE. */ -void -trx_commit_complete_for_mysql( -/*==========================*/ - trx_t* trx) /*!< in/out: transaction */ +/** Durably write log until trx->commit_lsn +(if trx_t::commit_in_memory() was invoked with flush_log_later=true). */ +void trx_commit_complete_for_mysql(trx_t *trx) { - if (trx->id != 0 - || !trx->must_flush_log_later - || (srv_flush_log_at_trx_commit == 1 && trx->active_commit_ordered)) { - - return; - } - - trx_flush_log_if_needed(trx->commit_lsn, trx); - - trx->must_flush_log_later = false; + const lsn_t lsn= trx->commit_lsn; + if (!lsn) + return; + switch (srv_flush_log_at_trx_commit) { + case 0: + return; + case 1: + if (trx->active_commit_ordered) + return; + } + trx_flush_log_if_needed(lsn, trx); } /**********************************************************************//** @@ -1873,8 +1848,10 @@ trx_prepare( gather behind one doing the physical log write to disk. We must not be holding any mutexes or latches here. */ - - trx_flush_log_if_needed(lsn, trx); + if (auto f = srv_flush_log_at_trx_commit) { + log_write_up_to(lsn, (f & 1) && srv_file_flush_method + != SRV_NOSYNC); + } if (!UT_LIST_GET_LEN(trx->lock.trx_locks) || trx->isolation_level == TRX_ISO_SERIALIZABLE) { From f7780a8eb8aaa7e9ea9b1bb5b3b214b07eb4190f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 25 Aug 2023 13:41:54 +0300 Subject: [PATCH 015/165] MDEV-30100: Assertion purge_sys.tail.trx_no <= purge_sys.rseg->last_trx_no() trx_t::commit_empty(): A special case of transaction "commit" when the transaction was actually rolled back or the persistent undo log is empty. In this case, we need to change the undo log header state to TRX_UNDO_CACHED and move the undo log from rseg->undo_list to rseg->undo_cached for fast reuse. Furthermore, unless this is the only undo log record in the page, we will remove the record and rewind TRX_UNDO_PAGE_START, TRX_UNDO_PAGE_FREE, TRX_UNDO_LAST_LOG. We must also ensure that the system-wide transaction identifier will be persisted up to this->id, so that there will not be warnings or errors due to a PAGE_MAX_TRX_ID being too large. We might have modified secondary index pages before being rolled back, and any changes of PAGE_MAX_TRX_ID are never rolled back. Even though it is not going to be written persistently anywhere, we will invoke trx_sys.assign_new_trx_no(this), so that in the test innodb.instant_alter everything will be purged as expected. trx_t::write_serialisation_history(): Renamed from trx_write_serialisation_history(). If there is no undo log, invoke commit_empty(). trx_purge_add_undo_to_history(): Simplify an assertion and remove a comment. This function will not be invoked on an empty undo log anymore. trx_undo_header_create(): Add a debug assertion. trx_undo_mem_create_at_db_start(): Remove a duplicated assignment. Reviewed by: Vladislav Lesin Tested by: Matthias Leich --- .../suite/innodb/r/instant_alter_debug.result | 2 +- .../suite/innodb/t/instant_alter_debug.test | 2 +- storage/innobase/include/trx0trx.h | 10 +- storage/innobase/trx/trx0purge.cc | 12 +- storage/innobase/trx/trx0trx.cc | 171 ++++++++++++++++-- storage/innobase/trx/trx0undo.cc | 5 +- 6 files changed, 172 insertions(+), 30 deletions(-) diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result index 0b6d44aedc3ec..7c378aa62e3a4 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug.result +++ b/mysql-test/suite/innodb/r/instant_alter_debug.result @@ -182,7 +182,7 @@ ROLLBACK; connection stop_purge; COMMIT; connection default; -InnoDB 2 transactions not purged +InnoDB 1 transactions not purged SET DEBUG_SYNC='now SIGNAL logged'; connection ddl; connection default; diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test index 917226a6c26af..11d6961f918a1 100644 --- a/mysql-test/suite/innodb/t/instant_alter_debug.test +++ b/mysql-test/suite/innodb/t/instant_alter_debug.test @@ -200,7 +200,7 @@ COMMIT; connection default; # Wait for purge to empty the table. -let $wait_all_purged=2; +let $wait_all_purged=1; --source include/wait_all_purged.inc let $wait_all_purged=0; diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 388958836079a..36dbde99c68e7 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -959,11 +959,19 @@ struct trx_t : ilist_node<> /** Commit the transaction in a mini-transaction. @param mtr mini-transaction (if there are any persistent modifications) */ void commit_low(mtr_t *mtr= nullptr); + /** Commit an empty transaction. + @param mtr mini-transaction */ + void commit_empty(mtr_t *mtr); + /** Commit an empty transaction. + @param mtr mini-transaction */ + /** Assign the transaction its history serialisation number and write the + UNDO log to the assigned rollback segment. + @param mtr mini-transaction */ + inline void write_serialisation_history(mtr_t *mtr); public: /** Commit the transaction. */ void commit(); - /** Try to drop a persistent table. @param table persistent table @param fk whether to drop FOREIGN KEY metadata diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 9c464901e1482..b1960e5892447 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -307,11 +307,7 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) undo= nullptr; - /* After the purge thread has been given permission to exit, - we may roll back transactions (trx->undo_no==0) - in THD::cleanup() invoked from unlink_thd() in fast shutdown, - or in trx_rollback_recovered() in slow shutdown. - + /* Before any transaction-generating background threads or the purge have been started, we can start transactions in row_merge_drop_temp_indexes(), and roll back recovered transactions. @@ -323,12 +319,10 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) During fast shutdown, we may also continue to execute user transactions. */ - ut_ad(srv_undo_sources || trx->undo_no == 0 || + ut_ad(srv_undo_sources || srv_fast_shutdown || (!purge_sys.enabled() && (srv_is_being_started || - trx_rollback_is_active || - srv_force_recovery >= SRV_FORCE_NO_BACKGROUND)) || - srv_fast_shutdown); + srv_force_recovery >= SRV_FORCE_NO_BACKGROUND))); #ifdef WITH_WSREP if (wsrep_is_wsrep_xid(&trx->xid)) diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index ebffd87647de2..75bbd9889c1aa 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -968,15 +968,150 @@ trx_start_low( ut_a(trx->error_state == DB_SUCCESS); } +/** Release an empty undo log that was associated with a transaction. */ +ATTRIBUTE_COLD +void trx_t::commit_empty(mtr_t *mtr) +{ + trx_rseg_t *rseg= rsegs.m_redo.rseg; + trx_undo_t *&undo= rsegs.m_redo.undo; + + ut_ad(undo->state == TRX_UNDO_ACTIVE || undo->state == TRX_UNDO_PREPARED); + ut_ad(undo->size == 1); + + if (buf_block_t *u= + buf_page_get(page_id_t(rseg->space->id, undo->hdr_page_no), 0, + RW_X_LATCH, mtr)) + { + ut_d(const uint16_t state= + mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + u->page.frame)); + ut_ad(state == undo->state || state == TRX_UNDO_ACTIVE); + static_assert(TRX_UNDO_PAGE_START + 2 == TRX_UNDO_PAGE_FREE, + "compatibility"); + ut_ad(!memcmp(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START + u->page.frame, + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + u->page.frame, 2)); + ut_ad(mach_read_from_4(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + FLST_PREV + + FIL_ADDR_PAGE + u->page.frame) == FIL_NULL); + ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + FLST_PREV + + FIL_ADDR_BYTE + u->page.frame) == 0); + ut_ad(!memcmp(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + FLST_PREV + + u->page.frame, + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + FLST_NEXT + + u->page.frame, FIL_ADDR_SIZE)); + + /* Delete the last undo log header, which must be for this transaction. + + An undo segment can be reused (TRX_UNDO_CACHED) only if it + comprises of one page and that single page contains enough space + for the undo log header of a subsequent transaction. See + trx_purge_add_undo_to_history(), which is executed when committing + a nonempty transaction. + + If we simply changed the undo page state to TRX_UNDO_CACHED, + then trx_undo_reuse_cached() could run out of space. We will + release the space consumed by our empty undo log to avoid that. */ + for (byte *last= &u->page.frame[TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE], + *prev= nullptr;;) + { + /* TRX_UNDO_PREV_LOG is only being read in debug assertions, and + written in trx_undo_header_create(). To remain compatible with + possibly corrupted old data files, we will not read the field + TRX_UNDO_PREV_LOG but instead rely on TRX_UNDO_NEXT_LOG. */ + ut_ad(mach_read_from_2(TRX_UNDO_PREV_LOG + last) == + (reinterpret_cast(prev) & (srv_page_size - 1))); + + if (uint16_t next= mach_read_from_2(TRX_UNDO_NEXT_LOG + last)) + { + ut_ad(ulint{next} + TRX_UNDO_LOG_XA_HDR_SIZE < srv_page_size - 100); + ut_ad(&u->page.frame[next] > last); + ut_ad(mach_read_from_2(TRX_UNDO_LOG_START + last) <= next); + prev= last; + last= &u->page.frame[next]; + continue; + } + + ut_ad(mach_read_from_8(TRX_UNDO_TRX_ID + last) == id); + ut_ad(!mach_read_from_8(TRX_UNDO_TRX_NO + last)); + ut_ad(!memcmp(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START + u->page.frame, + TRX_UNDO_LOG_START + last, 2)); + + if (prev) + { + mtr->memcpy(*u, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START + + u->page.frame, prev + TRX_UNDO_LOG_START, 2); + const ulint free= page_offset(last); + mtr->write<2>(*u, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + u->page.frame, free); + mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + u->page.frame, + TRX_UNDO_CACHED); + mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG + u->page.frame, + page_offset(prev)); + mtr->write<2>(*u, prev + TRX_UNDO_NEXT_LOG, 0U); + mtr->memset(u, free, srv_page_size - FIL_PAGE_DATA_END - free, 0); + + /* We may have updated PAGE_MAX_TRX_ID on secondary index pages + to this->id. Ensure that trx_sys.m_max_trx_id will be recovered + correctly, even though we removed our undo log record along + with the TRX_UNDO_TRX_ID above. */ + + /* Below, we are acquiring rseg_header->page.lock after + u->page.lock (the opposite of trx_purge_add_undo_to_history()). + This is fine, because both functions are holding exclusive + rseg->latch. */ + + if (mach_read_from_8(prev + TRX_UNDO_TRX_NO) >= id); + else if (buf_block_t *rseg_header= rseg->get(mtr, nullptr)) + { + byte *m= TRX_RSEG + TRX_RSEG_MAX_TRX_ID + rseg_header->page.frame; + + do + { + if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT + + rseg_header->page.frame))) + /* This must have been upgraded from before MariaDB 10.3.5. */ + trx_rseg_format_upgrade(rseg_header, mtr); + else if (mach_read_from_8(m) >= id) + continue; + mtr->write<8>(*rseg_header, m, id); + } + while (0); + } + } + else + /* Our undo log header was right after the undo log segment header. + This page should have been created by trx_undo_create(), not + returned by trx_undo_reuse_cached(). + + We retain the dummy empty log in order to remain compatible with + trx_undo_mem_create_at_db_start(). This page will remain available + to trx_undo_reuse_cached(), and it will eventually be freed by + trx_purge_truncate_rseg_history(). */ + mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + u->page.frame, + TRX_UNDO_CACHED); + break; + } + } + else + ut_ad("undo log page was not found" == 0); + + UT_LIST_REMOVE(rseg->undo_list, undo); + UT_LIST_ADD_FIRST(rseg->undo_cached, undo); + undo->state= TRX_UNDO_CACHED; + undo= nullptr; + + /* We must assign an "end" identifier even though we are not going + to persistently write it anywhere, to make sure that the purge of + history will not be stuck. */ + trx_sys.assign_new_trx_no(this); +} + /** Assign the transaction its history serialisation number and write the UNDO log to the assigned rollback segment. -@param trx persistent transaction @param mtr mini-transaction */ -static void trx_write_serialisation_history(trx_t *trx, mtr_t *mtr) +inline void trx_t::write_serialisation_history(mtr_t *mtr) { - ut_ad(!trx->read_only); - trx_rseg_t *rseg= trx->rsegs.m_redo.rseg; - trx_undo_t *&undo= trx->rsegs.m_redo.undo; + ut_ad(!read_only); + trx_rseg_t *rseg= rsegs.m_redo.rseg; + trx_undo_t *&undo= rsegs.m_redo.undo; if (UNIV_LIKELY(undo != nullptr)) { MONITOR_INC(MONITOR_TRX_COMMIT_UNDO); @@ -988,26 +1123,32 @@ static void trx_write_serialisation_history(trx_t *trx, mtr_t *mtr) ut_ad(undo->rseg == rseg); /* Assign the transaction serialisation number and add any undo log to the purge queue. */ - if (rseg->last_page_no == FIL_NULL) + if (UNIV_UNLIKELY(!undo_no)) + { + /* The transaction was rolled back. */ + commit_empty(mtr); + goto done; + } + else if (rseg->last_page_no == FIL_NULL) { mysql_mutex_lock(&purge_sys.pq_mutex); - trx_sys.assign_new_trx_no(trx); - const trx_id_t end{trx->rw_trx_hash_element->no}; - /* If the rollback segment is not empty, trx->no cannot be less - than any trx_t::no already in rseg. User threads only produce - events when a rollback segment is empty. */ + trx_sys.assign_new_trx_no(this); + const trx_id_t end{rw_trx_hash_element->no}; + /* end cannot be less than anything in rseg. User threads only + produce events when a rollback segment is empty. */ purge_sys.purge_queue.push(TrxUndoRsegs{end, *rseg}); mysql_mutex_unlock(&purge_sys.pq_mutex); rseg->last_page_no= undo->hdr_page_no; rseg->set_last_commit(undo->hdr_offset, end); } else - trx_sys.assign_new_trx_no(trx); + trx_sys.assign_new_trx_no(this); UT_LIST_REMOVE(rseg->undo_list, undo); /* Change the undo log segment state from TRX_UNDO_ACTIVE, to define the transaction as committed in the file based domain, at mtr->commit_lsn() obtained in mtr->commit() below. */ - trx_purge_add_undo_to_history(trx, undo, mtr); + trx_purge_add_undo_to_history(this, undo, mtr); + done: rseg->release(); rseg->latch.wr_unlock(); } @@ -1218,7 +1359,7 @@ ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo) TRANSACTIONAL_INLINE inline void trx_t::commit_in_memory(const mtr_t *mtr) { - /* We already detached from rseg in trx_write_serialisation_history() */ + /* We already detached from rseg in write_serialisation_history() */ ut_ad(!rsegs.m_redo.undo); read_view.close(); @@ -1409,7 +1550,7 @@ TRANSACTIONAL_TARGET void trx_t::commit_low(mtr_t *mtr) different rollback segments. However, if a transaction T2 is able to see modifications made by a transaction T1, T2 will always get a bigger transaction number and a bigger commit lsn than T1. */ - trx_write_serialisation_history(this, mtr); + write_serialisation_history(mtr); } else if (trx_rseg_t *rseg= rsegs.m_redo.rseg) { diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index 8354d04edc6c5..657a7c48ff194 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -497,8 +497,7 @@ trx_undo_seg_create(fil_space_t *space, buf_block_t *rseg_hdr, ulint *id, ut_ad(slot_no < TRX_RSEG_N_SLOTS); - *err = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_UNDO, - mtr); + *err = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_UNDO, mtr); if (UNIV_UNLIKELY(*err != DB_SUCCESS)) { return NULL; } @@ -569,6 +568,7 @@ static uint16_t trx_undo_header_create(buf_block_t *undo_page, trx_id_t trx_id, start, 2); uint16_t prev_log= mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG + undo_page->page.frame); + ut_ad(prev_log < free); alignas(4) byte buf[4]; mach_write_to_2(buf, TRX_UNDO_ACTIVE); mach_write_to_2(buf + 2, free); @@ -1022,7 +1022,6 @@ trx_undo_mem_create_at_db_start(trx_rseg_t *rseg, ulint id, uint32_t page_no) case TRX_UNDO_ACTIVE: case TRX_UNDO_PREPARED: if (UNIV_LIKELY(type != 1)) { - trx_no = trx_id + 1; break; } sql_print_error("InnoDB: upgrade from older version than" From 08a549c33d0530280b77e42bd5b13dbee1ca6d85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 25 Aug 2023 13:44:59 +0300 Subject: [PATCH 016/165] Clean up buf_LRU_remove_hashed() buf_LRU_block_remove_hashed(): Test for "not ROW_FORMAT=COMPRESSED" first, because in that case we can assume that an uncompressed page exists. This removes a condition from the likely code branch. --- storage/innobase/buf/buf0lru.cc | 92 ++++++++++++++++----------------- 1 file changed, 45 insertions(+), 47 deletions(-) diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 9aa5a7d73dcf0..6c86114a8d07f 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1092,59 +1092,57 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, buf_pool.freed_page_clock += 1; - if (UNIV_LIKELY(bpage->frame != nullptr)) { + if (UNIV_LIKELY(!bpage->zip.data)) { MEM_CHECK_ADDRESSABLE(bpage, sizeof(buf_block_t)); MEM_CHECK_ADDRESSABLE(bpage->frame, srv_page_size); buf_block_modify_clock_inc((buf_block_t*) bpage); - if (UNIV_LIKELY_NULL(bpage->zip.data)) { - const page_t* page = bpage->frame; - - ut_a(!zip || !bpage->oldest_modification()); - ut_ad(bpage->zip_size()); - /* Skip consistency checks if the page was freed. - In recovery, we could get a sole FREE_PAGE record - and nothing else, for a ROW_FORMAT=COMPRESSED page. - Its contents would be garbage. */ - if (!bpage->is_freed()) - switch (fil_page_get_type(page)) { - case FIL_PAGE_TYPE_ALLOCATED: - case FIL_PAGE_INODE: - case FIL_PAGE_IBUF_BITMAP: - case FIL_PAGE_TYPE_FSP_HDR: - case FIL_PAGE_TYPE_XDES: - /* These are essentially uncompressed pages. */ - if (!zip) { - /* InnoDB writes the data to the - uncompressed page frame. Copy it - to the compressed page, which will - be preserved. */ - memcpy(bpage->zip.data, page, - bpage->zip_size()); - } - break; - case FIL_PAGE_TYPE_ZBLOB: - case FIL_PAGE_TYPE_ZBLOB2: - case FIL_PAGE_INDEX: - case FIL_PAGE_RTREE: - break; - default: - ib::error() << "The compressed page to be" - " evicted seems corrupt:"; - ut_print_buf(stderr, page, srv_page_size); - - ib::error() << "Possibly older version of" - " the page:"; - - ut_print_buf(stderr, bpage->zip.data, - bpage->zip_size()); - putc('\n', stderr); - ut_error; + } else if (const page_t *page = bpage->frame) { + MEM_CHECK_ADDRESSABLE(bpage, sizeof(buf_block_t)); + MEM_CHECK_ADDRESSABLE(bpage->frame, srv_page_size); + buf_block_modify_clock_inc((buf_block_t*) bpage); + + ut_a(!zip || !bpage->oldest_modification()); + ut_ad(bpage->zip_size()); + /* Skip consistency checks if the page was freed. + In recovery, we could get a sole FREE_PAGE record + and nothing else, for a ROW_FORMAT=COMPRESSED page. + Its contents would be garbage. */ + if (!bpage->is_freed()) + switch (fil_page_get_type(page)) { + case FIL_PAGE_TYPE_ALLOCATED: + case FIL_PAGE_INODE: + case FIL_PAGE_IBUF_BITMAP: + case FIL_PAGE_TYPE_FSP_HDR: + case FIL_PAGE_TYPE_XDES: + /* These are essentially uncompressed pages. */ + if (!zip) { + /* InnoDB writes the data to the + uncompressed page frame. Copy it + to the compressed page, which will + be preserved. */ + memcpy(bpage->zip.data, page, + bpage->zip_size()); } - } else { - goto evict_zip; + break; + case FIL_PAGE_TYPE_ZBLOB: + case FIL_PAGE_TYPE_ZBLOB2: + case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: + break; + default: + ib::error() << "The compressed page to be" + " evicted seems corrupt:"; + ut_print_buf(stderr, page, srv_page_size); + + ib::error() << "Possibly older version of" + " the page:"; + + ut_print_buf(stderr, bpage->zip.data, + bpage->zip_size()); + putc('\n', stderr); + ut_error; } } else { -evict_zip: ut_a(!bpage->oldest_modification()); MEM_CHECK_ADDRESSABLE(bpage->zip.data, bpage->zip_size()); } From c438284863db2ccba8a04437c941a5c8a2d9225b Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Fri, 25 Aug 2023 17:22:17 +0530 Subject: [PATCH 017/165] MDEV-31835 Remove unnecesary extra HA_EXTRA_IGNORE_INSERT call - HA_EXTRA_IGNORE_INSERT call is being called for every inserted row, and on partitioned tables on every row * every partition. This leads to slowness during load..data operation - Under bulk operation, multiple insert statement error handling will end up emptying the table. This behaviour introduced by the commit 8ea923f55b7666a359ac2c54f6c10e8609d16846 (MDEV-24818). This makes the HA_EXTRA_IGNORE_INSERT call redundant. We can use the same behavior for insert..ignore statement as well. - Removed the extra call HA_EXTRA_IGNORE_INSERT as the solution to improve the performance of load command. --- include/my_base.h | 4 +--- sql/ha_partition.cc | 1 - sql/sql_insert.cc | 3 --- sql/sql_table.cc | 3 --- storage/innobase/handler/ha_innodb.cc | 7 ------- storage/mroonga/ha_mroonga.cpp | 3 --- 6 files changed, 1 insertion(+), 20 deletions(-) diff --git a/include/my_base.h b/include/my_base.h index 038045dde2a47..1d59262f680be 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -218,9 +218,7 @@ enum ha_extra_function { /** Start writing rows during ALTER TABLE...ALGORITHM=COPY. */ HA_EXTRA_BEGIN_ALTER_COPY, /** Finish writing rows during ALTER TABLE...ALGORITHM=COPY. */ - HA_EXTRA_END_ALTER_COPY, - /** IGNORE is being used for the insert statement */ - HA_EXTRA_IGNORE_INSERT + HA_EXTRA_END_ALTER_COPY }; /* Compatible option, to be deleted in 6.0 */ diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 66418679794ca..91f9c5360960d 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -9474,7 +9474,6 @@ int ha_partition::extra(enum ha_extra_function operation) case HA_EXTRA_STARTING_ORDERED_INDEX_SCAN: case HA_EXTRA_BEGIN_ALTER_COPY: case HA_EXTRA_END_ALTER_COPY: - case HA_EXTRA_IGNORE_INSERT: DBUG_RETURN(loop_partitions(extra_cb, &operation)); default: { diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 73044c60c2054..b5c8542ca9210 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2176,9 +2176,6 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink) goto after_trg_or_ignored_err; } - /* Notify the engine about insert ignore operation */ - if (info->handle_duplicates == DUP_ERROR && info->ignore) - table->file->extra(HA_EXTRA_IGNORE_INSERT); after_trg_n_copied_inc: info->copied++; thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f7588890a4d2f..15c853d8b7b00 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -11301,9 +11301,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, } else { - /* In case of alter ignore, notify the engine about it. */ - if (ignore) - to->file->extra(HA_EXTRA_IGNORE_INSERT); DEBUG_SYNC(thd, "copy_data_between_tables_before"); found_count++; mysql_stage_set_work_completed(thd->m_stage_progress_psi, found_count); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 0db27a1e2d04b..b440613c13fc4 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -15732,13 +15732,6 @@ ha_innobase::extra( case HA_EXTRA_RESET_STATE: reset_template(); trx->duplicates = 0; - /* fall through */ - case HA_EXTRA_IGNORE_INSERT: - /* HA_EXTRA_IGNORE_INSERT is very similar to - HA_EXTRA_IGNORE_DUP_KEY, but with one crucial difference: - we want !trx->duplicates for INSERT IGNORE so that - row_ins_duplicate_error_in_clust() will acquire a - shared lock instead of an exclusive lock. */ stmt_boundary: trx->end_bulk_insert(*m_prebuilt->table); trx->bulk_insert = false; diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index 78a48b32f1024..a39de1d991bcc 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -596,9 +596,6 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation) inspected = "HA_EXTRA_NO_AUTOINC_LOCKING"; break; #endif - case HA_EXTRA_IGNORE_INSERT: - inspected = "HA_EXTRA_IGNORE_INSERT"; - break; } return inspected; } From 1fde785315ec6d575d0cd5c3e33d53a5d83e3e00 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 11 Aug 2023 12:32:01 +0700 Subject: [PATCH 018/165] MDEV-31890: Compilation failing on MacOS (unknown warning option -Wno-unused-but-set-variable) For clang compiler the compiler's flag -Wno-unused-but-set-variable was set based on compiler version. This approach could result in false positive detection for presence of compiler option since only first three groups of digits in compiler version taken into account and it could lead to inaccuracy in determining of supported compiler's features. Correct way to detect options supported by a compiler is to use the macros MY_CHECK_CXX_COMPILER_FLAG and to check the result of variable with prefix have_CXX__ So, to check whether compiler does support the option -Wno-unused-but-set-variable the macros MY_CHECK_CXX_COMPILER_FLAG(-Wno-unused-but-set-variable) should be called and the result variable have_CXX__Wno_unused_but_set_variable be tested for assigned value. --- sql/CMakeLists.txt | 4 ++-- storage/innobase/CMakeLists.txt | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 968cab85e52e9..9656245b8c0e3 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -153,8 +153,8 @@ SET (SQL_SOURCE ${MYSYS_LIBWRAP_SOURCE} ) -IF(CMAKE_C_COMPILER_ID MATCHES "Clang" AND - NOT CMAKE_C_COMPILER_VERSION VERSION_LESS "13.0.0") +MY_CHECK_CXX_COMPILER_FLAG(-Wno-unused-but-set-variable) +IF(have_CXX__Wno_unused_but_set_variable) ADD_COMPILE_FLAGS(${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.cc COMPILE_FLAGS "-Wno-unused-but-set-variable") diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index ae19773d98b57..514c14557c2aa 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -358,8 +358,9 @@ IF(MSVC) # on generated file. TARGET_COMPILE_OPTIONS(innobase PRIVATE "/wd4065") ENDIF() -IF(CMAKE_C_COMPILER_ID MATCHES "Clang" AND - NOT CMAKE_C_COMPILER_VERSION VERSION_LESS "13.0.0") + +MY_CHECK_CXX_COMPILER_FLAG(-Wno-unused-but-set-variable) +IF(have_CXX__Wno_unused_but_set_variable) ADD_COMPILE_FLAGS(pars/pars0grm.cc fts/fts0pars.cc COMPILE_FLAGS "-Wno-unused-but-set-variable") ENDIF() From e938d7c18f6f117b63fcfc50dd975029d92409a7 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 28 Aug 2023 19:02:23 +0530 Subject: [PATCH 019/165] MDEV-32028 InnoDB scrubbing doesn't write zero while freeing the extent Problem: ======== InnoDB fails to mark the page status as FREED during freeing of an extent of a segment. This behaviour affects scrubbing and doesn't write all zeroes in file even though pages are freed. Solution: ======== InnoDB should mark the page status as FREED before reinitialize the extent descriptor entry. --- mysql-test/suite/innodb/r/scrub_debug.result | 22 ++++++++++++++ mysql-test/suite/innodb/t/scrub_debug.test | 31 ++++++++++++++++++++ storage/innobase/fsp/fsp0fsp.cc | 4 +-- 3 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 mysql-test/suite/innodb/r/scrub_debug.result create mode 100644 mysql-test/suite/innodb/t/scrub_debug.test diff --git a/mysql-test/suite/innodb/r/scrub_debug.result b/mysql-test/suite/innodb/r/scrub_debug.result new file mode 100644 index 0000000000000..5fbf1250b212a --- /dev/null +++ b/mysql-test/suite/innodb/r/scrub_debug.result @@ -0,0 +1,22 @@ +SET @save_debug=@@GLOBAL.INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG; +SET @save_scrub=@@GLOBAL.INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED; +SET @save_freq=@@GLOBAL.INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY; +SET GLOBAL INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY=1; +SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=1; +SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=2; +CREATE TABLE t1(f1 INT AUTO_INCREMENT PRIMARY KEY, +f2 VARCHAR(256) GENERATED ALWAYS as('repairman'), +INDEX idx(f2))ENGINE= InnoDB; +INSERT INTO t1(f1) SELECT seq FROM seq_1_to_50; +FLUSH TABLE t1 FOR EXPORT; +FOUND 108 /repairman/ in t1.ibd +UNLOCK TABLES; +ALTER TABLE t1 DROP INDEX idx; +InnoDB 0 transactions not purged +FLUSH TABLE t1 FOR EXPORT; +NOT FOUND /repairman/ in t1.ibd +UNLOCK TABLES; +DROP TABLE t1; +SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; +SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; +SET GLOBAL INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY = @save_freq; diff --git a/mysql-test/suite/innodb/t/scrub_debug.test b/mysql-test/suite/innodb/t/scrub_debug.test new file mode 100644 index 0000000000000..141b0f0c5baaf --- /dev/null +++ b/mysql-test/suite/innodb/t/scrub_debug.test @@ -0,0 +1,31 @@ +--source include/have_innodb.inc +--source include/have_sequence.inc +--source include/have_debug.inc + +SET @save_debug=@@GLOBAL.INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG; +SET @save_scrub=@@GLOBAL.INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED; +SET @save_freq=@@GLOBAL.INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY; + +SET GLOBAL INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY=1; +SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=1; +SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=2; +let $MYSQLD_DATADIR=`select @@datadir`; +CREATE TABLE t1(f1 INT AUTO_INCREMENT PRIMARY KEY, + f2 VARCHAR(256) GENERATED ALWAYS as('repairman'), + INDEX idx(f2))ENGINE= InnoDB; +INSERT INTO t1(f1) SELECT seq FROM seq_1_to_50; +FLUSH TABLE t1 FOR EXPORT; +let SEARCH_PATTERN= repairman; +let SEARCH_FILE= $MYSQLD_DATADIR/test/t1.ibd; +-- source include/search_pattern_in_file.inc +UNLOCK TABLES; + +ALTER TABLE t1 DROP INDEX idx; +-- source include/wait_all_purged.inc +FLUSH TABLE t1 FOR EXPORT; +-- source include/search_pattern_in_file.inc +UNLOCK TABLES; +DROP TABLE t1; +SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; +SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; +SET GLOBAL INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY = @save_freq; diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 7532ce85b6bb1..64ca0457313f1 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -2683,14 +2683,14 @@ fseg_free_extent( not_full_n_used - descr_n_used); } - fsp_free_extent(space, page, mtr); - for (uint32_t i = 0; i < FSP_EXTENT_SIZE; i++) { if (!xdes_is_free(descr, i)) { buf_page_free(space, first_page_in_extent + i, mtr, __FILE__, __LINE__); } } + + fsp_free_extent(space, page, mtr); } /**********************************************************************//** From 53499cd1ea1c8092460924224d78a286d617492d Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 29 Aug 2023 11:51:01 +0400 Subject: [PATCH 020/165] MDEV-31303 Key not used when IN clause has both signed and usigned values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: This patch enables possible index optimization when the WHERE clause has an IN condition of the form: signed_or_unsigned_column IN (signed_or_unsigned_constant, signed_or_unsigned_constant [,signed_or_unsigned_constant]*) when the IN list constants are of different signess, e.g.: WHERE signed_column IN (signed_constant, unsigned_constant ...) WHERE unsigned_column IN (signed_constant, unsigned_constant ...) Details: In a condition like: WHERE unsigned_predicant IN (1, LONGLONG_MAX + 1) comparison handlers for individual (predicant,value) pairs are calculated as follows: * unsigned_predicant and 1 produce &type_handler_newdecimal * unsigned_predicant and (LONGLONG_MAX + 1) produce &type_handler_slonglong The old code decided that it could not use bisection because the two pairs had different comparison handlers. As a result, bisection was not allowed, and, in case of an indexed integer column predicant the index on the column was not used. The new code catches special cases like: signed_predicant IN (signed_constant, unsigned_constant) unsigned_predicant IN (signed_constant, unsigned_constant) It enables bisection using in_longlong, which supports a mixture of predicant and values of different signess. In case when the predicant is an indexed column this change automatically enables index range optimization. Thanks to Vicențiu Ciorbaru for proposing the idea and for preparing MTR tests. --- mysql-test/main/func_debug.result | 9 +++++--- mysql-test/main/func_in.result | 37 +++++++++++++++++++++++++++++++ mysql-test/main/func_in.test | 34 ++++++++++++++++++++++++++++ sql/item_cmpfunc.cc | 36 ++++++++++++++++++++++++++++++ 4 files changed, 113 insertions(+), 3 deletions(-) diff --git a/mysql-test/main/func_debug.result b/mysql-test/main/func_debug.result index c8efcf09d41ef..37f2a19fc6c28 100644 --- a/mysql-test/main/func_debug.result +++ b/mysql-test/main/func_debug.result @@ -912,7 +912,8 @@ a IN (CAST(1 AS SIGNED), CAST(1 AS UNSIGNED)) Warnings: Note 1105 DBUG: [0] arg=1 handler=0 (bigint) Note 1105 DBUG: [1] arg=2 handler=1 (decimal) -Note 1105 DBUG: types_compatible=no bisect=no +Note 1105 DBUG: found a mix of UINT and SINT +Note 1105 DBUG: types_compatible=yes bisect=yes SELECT a IN (CAST(1 AS SIGNED), CAST(1 AS UNSIGNED),NULL) FROM t1; a IN (CAST(1 AS SIGNED), CAST(1 AS UNSIGNED),NULL) Warnings: @@ -950,7 +951,8 @@ a NOT IN (CAST(1 AS SIGNED), CAST(1 AS UNSIGNED)) Warnings: Note 1105 DBUG: [0] arg=1 handler=0 (bigint) Note 1105 DBUG: [1] arg=2 handler=1 (decimal) -Note 1105 DBUG: types_compatible=no bisect=no +Note 1105 DBUG: found a mix of UINT and SINT +Note 1105 DBUG: types_compatible=yes bisect=yes SELECT a NOT IN (CAST(1 AS SIGNED), CAST(1 AS UNSIGNED),NULL) FROM t1; a NOT IN (CAST(1 AS SIGNED), CAST(1 AS UNSIGNED),NULL) Warnings: @@ -1624,7 +1626,8 @@ a b Warnings: Note 1105 DBUG: [0] arg=1 handler=0 (bigint) Note 1105 DBUG: [1] arg=2 handler=1 (decimal) -Note 1105 DBUG: types_compatible=no bisect=no +Note 1105 DBUG: found a mix of UINT and SINT +Note 1105 DBUG: types_compatible=yes bisect=no DROP TABLE t1; # # MDEV-11554 Wrong result for CASE on a mixture of signed and unsigned expressions diff --git a/mysql-test/main/func_in.result b/mysql-test/main/func_in.result index dd7125c393a89..e81ececf75239 100644 --- a/mysql-test/main/func_in.result +++ b/mysql-test/main/func_in.result @@ -944,3 +944,40 @@ Warning 1292 Truncated incorrect DECIMAL value: '0x' # # End of 10.4 tests # +# +# Start of 10.5 tests +# +# +# MDEV-31303: Key not used +# +CREATE TABLE `a` ( +`id` bigint AUTO_INCREMENT PRIMARY KEY, +`c1` bigint unsigned, +KEY (`c1`) +); +INSERT INTO `a` VALUES (1,9223382399205928659),(2,9223384207280813348), +(3,9223385953115437234),(4,9223387250780556749),(5,9223387354282558788), +(6,9223387603870501596),(7,9223389270813433667),(8,9223389903231468827), +(9,9223390280789586779),(10,9223391591398222899),(11,9223391875473564350), +(12,9223393152250049433),(13,9223393939696790223),(14,9223394417225350415), +(15,9223397646397141015),(16,9223398025879291243),(17,9223399038671098072), +(18,9223399534968874556),(19,9223400449518009285),(20,9223400860292643549), +(21,9223400940692256924),(22,9223401073791948119),(23,9223402820804649616), +(24,9223403470951992681),(25,9223405581879567267),(26,9223405754978563829), +(27,9223405972966828221), (28, 9223372036854775808), (29, 9223372036854775807) ; +explain SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775807 ); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE a range c1 c1 9 NULL 2 Using where; Using index +explain SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775808 ); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE a range c1 c1 9 NULL 2 Using where; Using index +SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775807 ); +c1 +9223372036854775807 +SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775808 ); +c1 +9223372036854775808 +drop table `a`; +# +# End of 10.5 tests +# diff --git a/mysql-test/main/func_in.test b/mysql-test/main/func_in.test index 2581a07ac0971..6cddf1dea5c3b 100644 --- a/mysql-test/main/func_in.test +++ b/mysql-test/main/func_in.test @@ -726,3 +726,37 @@ SELECT ('0x',1) IN ((0,1),(1,1)); --echo # End of 10.4 tests --echo # +--echo # +--echo # Start of 10.5 tests +--echo # + +--echo # +--echo # MDEV-31303: Key not used +--echo # +CREATE TABLE `a` ( + `id` bigint AUTO_INCREMENT PRIMARY KEY, + `c1` bigint unsigned, + KEY (`c1`) +); + +INSERT INTO `a` VALUES (1,9223382399205928659),(2,9223384207280813348), +(3,9223385953115437234),(4,9223387250780556749),(5,9223387354282558788), +(6,9223387603870501596),(7,9223389270813433667),(8,9223389903231468827), +(9,9223390280789586779),(10,9223391591398222899),(11,9223391875473564350), +(12,9223393152250049433),(13,9223393939696790223),(14,9223394417225350415), +(15,9223397646397141015),(16,9223398025879291243),(17,9223399038671098072), +(18,9223399534968874556),(19,9223400449518009285),(20,9223400860292643549), +(21,9223400940692256924),(22,9223401073791948119),(23,9223402820804649616), +(24,9223403470951992681),(25,9223405581879567267),(26,9223405754978563829), +(27,9223405972966828221), (28, 9223372036854775808), (29, 9223372036854775807) ; + +explain SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775807 ); +explain SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775808 ); +SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775807 ); +SELECT c1 FROM a WHERE c1 IN ( 1, 9223372036854775808 ); +drop table `a`; + +--echo # +--echo # End of 10.5 tests +--echo # + diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9903bb60d053e..4a2de58e7488a 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -4442,6 +4442,42 @@ bool Item_func_in::fix_length_and_dec() return TRUE; } + if (!arg_types_compatible && comparator_count() == 2) + { + /* + Catch a special case: a mixture of signed and unsigned integer types. + in_longlong can handle such cases. + + Note, prepare_predicant_and_values() aggregates this mixture as follows: + - signed+unsigned produce &type_handler_newdecimal. + - signed+signed or unsigned+unsigned produce &type_handler_slonglong + So we have extactly two distinct handlers. + + The code below assumes that unsigned longlong is handled + by &type_handler_slonglong in comparison context, + which may change in the future to &type_handler_ulonglong. + The DBUG_ASSERT is needed to address this change here properly. + */ + DBUG_ASSERT(type_handler_ulonglong.type_handler_for_comparison() == + &type_handler_slonglong); + // Let's check if all arguments are of integer types + uint found_int_args= 0; + for (uint i= 0; i < arg_count; i++, found_int_args++) + { + if (args[i]->type_handler_for_comparison() != &type_handler_slonglong) + break; + } + if (found_int_args == arg_count) + { + // All arguments are integers. Switch to integer comparison. + arg_types_compatible= true; + DBUG_EXECUTE_IF("Item_func_in", + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_UNKNOWN_ERROR, "DBUG: found a mix of UINT and SINT");); + m_comparator.set_handler(&type_handler_slonglong); + } + } + if (arg_types_compatible) // Bisection condition #1 { if (m_comparator.type_handler()-> From 9b1b4a6f69f81530b7fe9f1b4a7c517df68652e2 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 30 Aug 2023 15:17:07 +1000 Subject: [PATCH 021/165] MDEV-31545 Revert "Fix gcc warning for wsrep_plug" This reverts commit 38fe266ea9537acce888b210cb233f5854d7560e. The correct fix was pushed to the 10.4 branch (fbc157ab33bb2b7a239f13f8b64ce5935f0bdee9) --- sql/sql_parse.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c3d8664639cb1..abcd690ba8ade 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6320,11 +6320,6 @@ static bool __attribute__ ((noinline)) execute_show_status(THD *thd, TABLE_LIST *all_tables) { bool res; - -#if defined(__GNUC__) && (__GNUC__ >= 13) -#pragma GCC diagnostic ignored "-Wdangling-pointer" -#endif - system_status_var old_status_var= thd->status_var; thd->initial_status_var= &old_status_var; WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_SHOW); From 31ea201eccebfc0b9a14cc6bc9a00b4271d739a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 30 Aug 2023 13:20:27 +0300 Subject: [PATCH 022/165] MDEV-30986 Slow full index scan for I/O bound case buf_page_init_for_read(): Test a condition before acquiring a latch, not while holding it. buf_read_ahead_linear(): Do not use a memory transaction, because it could be too large, leading to frequent retries. Release the hash_lock as early as possible. --- storage/innobase/buf/buf0rea.cc | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index dae1527dde61c..4967641d990c1 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -147,14 +147,18 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id, bpage= &block->page; /* Insert into the hash table of file pages */ + if (hash_page) + { + transactional_lock_guard g + {buf_pool.page_hash.lock_get(chain)}; + bpage->set_state(buf_pool.watch_remove(hash_page, chain) + + (buf_page_t::READ_FIX - buf_page_t::UNFIXED)); + buf_pool.page_hash.append(chain, &block->page); + } + else { transactional_lock_guard g {buf_pool.page_hash.lock_get(chain)}; - - if (hash_page) - bpage->set_state(buf_pool.watch_remove(hash_page, chain) + - (buf_page_t::READ_FIX - buf_page_t::UNFIXED)); - buf_pool.page_hash.append(chain, &block->page); } @@ -561,11 +565,16 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) for (page_id_t i= low; i <= high_1; ++i) { buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(i.fold()); - transactional_shared_lock_guard g - {buf_pool.page_hash.lock_get(chain)}; + page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); + /* It does not make sense to use transactional_lock_guard here, + because we would have many complex conditions inside the memory + transaction. */ + hash_lock.lock_shared(); + const buf_page_t* bpage= buf_pool.page_hash.get(i, chain); if (!bpage) { + hash_lock.unlock_shared(); if (i == page_id) goto fail; failed: @@ -573,6 +582,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) continue; goto fail; } + const unsigned accessed= bpage->is_accessed(); if (i == page_id) { /* Read the natural predecessor and successor page addresses from @@ -583,6 +593,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) const byte *f= bpage->frame ? bpage->frame : bpage->zip.data; uint32_t prev= mach_read_from_4(my_assume_aligned<4>(f + FIL_PAGE_PREV)); uint32_t next= mach_read_from_4(my_assume_aligned<4>(f + FIL_PAGE_NEXT)); + hash_lock.unlock_shared(); if (prev == FIL_NULL || next == FIL_NULL) goto fail; page_id_t id= page_id; @@ -612,8 +623,9 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) /* The area is not whole */ goto fail; } + else + hash_lock.unlock_shared(); - const unsigned accessed= bpage->is_accessed(); if (!accessed) goto failed; /* Note that buf_page_t::is_accessed() returns the time of the From 9d1466522ea92963ac6ca16b597392714280c9f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 30 Aug 2023 14:40:13 +0300 Subject: [PATCH 023/165] MDEV-32029 Assertion failures in log_sort_flush_list upon crash recovery In commit 0d175968d1181a0308ce6caccc2e4fbc972ca6c6 (MDEV-31354) we only waited that no buf_pool.flush_list writes are in progress. The buf_flush_page_cleaner() thread could still initiate page writes from the buf_pool.LRU list while only holding buf_pool.mutex, not buf_pool.flush_list_mutex. This is something that was changed in commit a55b951e6082a4ce9a1f2ed5ee176ea7dbbaf1f2 (MDEV-26827). log_sort_flush_list(): Wait for the buf_flush_page_cleaner() thread to be completely idle, including LRU flushing. buf_flush_page_cleaner(): Always broadcast buf_pool.done_flush_list when becoming idle, so that log_sort_flush_list() will be woken up. Also, ensure that buf_pool.n_flush_inc() or buf_pool.flush_list_set_active() has been invoked before any page writes are initiated. buf_flush_try_neighbors(): Release buf_pool.mutex here and not in the callers, to avoid code duplication. Make innodb_flush_neighbors=ON obey the innodb_io_capacity limit. --- storage/innobase/buf/buf0flu.cc | 120 +++++++++++++++++------------ storage/innobase/include/buf0buf.h | 13 ++++ storage/innobase/log/log0recv.cc | 26 ++++--- 3 files changed, 100 insertions(+), 59 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 59be7349908db..9826320a358cc 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -355,6 +355,13 @@ inline void buf_pool_t::n_flush_dec() mysql_mutex_unlock(&flush_list_mutex); } +inline void buf_pool_t::n_flush_dec_holding_mutex() +{ + mysql_mutex_assert_owner(&flush_list_mutex); + ut_ad(page_cleaner_status >= LRU_FLUSH); + page_cleaner_status-= LRU_FLUSH; +} + /** Complete write of a file page from buf_pool. @param request write request @param error whether the write may have failed */ @@ -1076,6 +1083,8 @@ static ulint buf_flush_try_neighbors(fil_space_t *space, bool contiguous, bool evict, ulint n_flushed, ulint n_to_flush) { + mysql_mutex_unlock(&buf_pool.mutex); + ut_ad(space->id == page_id.space()); ut_ad(bpage->id() == page_id); @@ -1337,24 +1346,22 @@ static void buf_flush_LRU_list_batch(ulint max, bool evict, continue; } - if (neighbors && space->is_rotational()) - { - mysql_mutex_unlock(&buf_pool.mutex); - n->flushed+= buf_flush_try_neighbors(space, page_id, bpage, - neighbors == 1, - do_evict, n->flushed, max); - goto reacquire_mutex; - } - else if (n->flushed >= max && !recv_recovery_is_on()) + if (n->flushed >= max && !recv_recovery_is_on()) { bpage->lock.u_unlock(true); break; } + + if (neighbors && space->is_rotational()) + n->flushed+= buf_flush_try_neighbors(space, page_id, bpage, + neighbors == 1, + do_evict, n->flushed, max); else if (bpage->flush(do_evict, space)) - { ++n->flushed; - goto reacquire_mutex; - } + else + continue; + + goto reacquire_mutex; } else /* Can't evict or dispatch this block. Go to previous. */ @@ -1492,19 +1499,18 @@ static ulint buf_do_flush_list_batch(ulint max_n, lsn_t lsn) else { mysql_mutex_unlock(&buf_pool.flush_list_mutex); - if (neighbors && space->is_rotational()) + do { - mysql_mutex_unlock(&buf_pool.mutex); - count+= buf_flush_try_neighbors(space, page_id, bpage, neighbors == 1, - false, count, max_n); - reacquire_mutex: + if (neighbors && space->is_rotational()) + count+= buf_flush_try_neighbors(space, page_id, bpage, + neighbors == 1, false, count, max_n); + else if (bpage->flush(false, space)) + ++count; + else + continue; mysql_mutex_lock(&buf_pool.mutex); } - else if (bpage->flush(false, space)) - { - ++count; - goto reacquire_mutex; - } + while (0); } mysql_mutex_lock(&buf_pool.flush_list_mutex); @@ -1723,8 +1729,7 @@ The caller must invoke buf_dblwr.flush_buffered_writes() after releasing buf_pool.mutex. @param max_n wished maximum mumber of blocks flushed @param evict whether to evict pages after flushing -@return evict ? number of processed pages : number of pages written -@retval 0 if a buf_pool.LRU batch is already running */ +@return evict ? number of processed pages : number of pages written */ ulint buf_flush_LRU(ulint max_n, bool evict) { mysql_mutex_assert_owner(&buf_pool.mutex); @@ -1867,7 +1872,9 @@ static void buf_flush_wait(lsn_t lsn) { ut_ad(lsn <= log_sys.get_lsn()); - while (buf_pool.get_oldest_modification(lsn) < lsn) + lsn_t oldest_lsn; + + while ((oldest_lsn= buf_pool.get_oldest_modification(lsn)) < lsn) { if (buf_flush_sync_lsn < lsn) { @@ -1876,13 +1883,20 @@ static void buf_flush_wait(lsn_t lsn) pthread_cond_signal(&buf_pool.do_flush_list); my_cond_wait(&buf_pool.done_flush_list, &buf_pool.flush_list_mutex.m_mutex); - if (buf_pool.get_oldest_modification(lsn) >= lsn) + oldest_lsn= buf_pool.get_oldest_modification(lsn); + if (oldest_lsn >= lsn) break; } mysql_mutex_unlock(&buf_pool.flush_list_mutex); os_aio_wait_until_no_pending_writes(false); mysql_mutex_lock(&buf_pool.flush_list_mutex); } + + if (oldest_lsn >= buf_flush_sync_lsn) + { + buf_flush_sync_lsn= 0; + pthread_cond_broadcast(&buf_pool.done_flush_list); + } } /** Wait until all persistent pages are flushed up to a limit. @@ -2272,24 +2286,19 @@ static void buf_flush_page_cleaner() set_timespec(abstime, 1); lsn_limit= buf_flush_sync_lsn; - const lsn_t oldest_lsn= buf_pool.get_oldest_modification(0); + lsn_t oldest_lsn= buf_pool.get_oldest_modification(0); if (!oldest_lsn) { - if (UNIV_UNLIKELY(lsn_limit != 0)) - { - buf_flush_sync_lsn= 0; - /* wake up buf_flush_wait() */ - pthread_cond_broadcast(&buf_pool.done_flush_list); - } - unemployed: - buf_flush_async_lsn= 0; + fully_unemployed: + buf_flush_sync_lsn= 0; set_idle: buf_pool.page_cleaner_set_idle(true); + set_almost_idle: + pthread_cond_broadcast(&buf_pool.done_flush_list); if (UNIV_UNLIKELY(srv_shutdown_state > SRV_SHUTDOWN_INITIATED)) break; mysql_mutex_unlock(&buf_pool.flush_list_mutex); - end_of_batch: buf_dblwr.flush_buffered_writes(); do @@ -2307,6 +2316,7 @@ static void buf_flush_page_cleaner() if (!buf_pool.ran_out()) continue; mysql_mutex_lock(&buf_pool.flush_list_mutex); + oldest_lsn= buf_pool.get_oldest_modification(0); } lsn_t soft_lsn_limit= buf_flush_async_lsn; @@ -2335,7 +2345,17 @@ static void buf_flush_page_cleaner() else if (buf_pool.ran_out()) { buf_pool.page_cleaner_set_idle(false); - buf_pool.get_oldest_modification(0); + buf_pool.n_flush_inc(); + /* Remove clean blocks from buf_pool.flush_list before the LRU scan. */ + for (buf_page_t *p= UT_LIST_GET_FIRST(buf_pool.flush_list); p; ) + { + const lsn_t lsn{p->oldest_modification()}; + ut_ad(lsn > 2 || lsn == 1); + buf_page_t *n= UT_LIST_GET_NEXT(list, p); + if (lsn <= 1) + buf_pool.delete_from_flush_list(p); + p= n; + } mysql_mutex_unlock(&buf_pool.flush_list_mutex); n= srv_max_io_capacity; mysql_mutex_lock(&buf_pool.mutex); @@ -2343,15 +2363,16 @@ static void buf_flush_page_cleaner() n= buf_flush_LRU(n, false); mysql_mutex_unlock(&buf_pool.mutex); last_pages+= n; - - if (pct_lwm == 0.0) - goto end_of_batch; - - /* when idle flushing kicks in page_cleaner is marked active. - reset it back to idle since the it was made active as part of - idle flushing stage. */ + check_oldest_and_set_idle: mysql_mutex_lock(&buf_pool.flush_list_mutex); - goto set_idle; + buf_pool.n_flush_dec_holding_mutex(); + oldest_lsn= buf_pool.get_oldest_modification(0); + if (!oldest_lsn) + goto fully_unemployed; + if (oldest_lsn >= buf_flush_async_lsn) + buf_flush_async_lsn= 0; + buf_pool.page_cleaner_set_idle(false); + goto set_almost_idle; } else if (UNIV_UNLIKELY(srv_shutdown_state > SRV_SHUTDOWN_INITIATED)) break; @@ -2379,6 +2400,7 @@ static void buf_flush_page_cleaner() - page cleaner is idle (dirty_pct < srv_max_dirty_pages_pct_lwm) - there are no pending reads but there are dirty pages to flush */ buf_pool.update_last_activity_count(activity_count); + buf_pool.n_flush_inc(); mysql_mutex_unlock(&buf_pool.flush_list_mutex); goto idle_flush; } @@ -2394,9 +2416,10 @@ static void buf_flush_page_cleaner() else if (dirty_pct < srv_max_buf_pool_modified_pct) possibly_unemployed: if (!soft_lsn_limit && !af_needed_for_redo(oldest_lsn)) - goto unemployed; + goto set_idle; buf_pool.page_cleaner_set_idle(false); + buf_pool.n_flush_inc(); mysql_mutex_unlock(&buf_pool.flush_list_mutex); if (UNIV_UNLIKELY(soft_lsn_limit != 0)) @@ -2434,10 +2457,7 @@ static void buf_flush_page_cleaner() n_flushed); } else if (buf_flush_async_lsn <= oldest_lsn) - { - mysql_mutex_lock(&buf_pool.flush_list_mutex); - goto unemployed; - } + goto check_oldest_and_set_idle; n= n >= n_flushed ? n - n_flushed : 0; goto LRU_flush; diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 4c2f621fb251d..94c6b9101ab1a 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -1753,6 +1753,10 @@ class buf_pool_t /** Decrement the number of pending LRU flush */ inline void n_flush_dec(); + /** Decrement the number of pending LRU flush + while holding flush_list_mutex */ + inline void n_flush_dec_holding_mutex(); + /** @return whether flush_list flushing is active */ bool flush_list_active() const { @@ -1777,6 +1781,15 @@ class buf_pool_t mysql_mutex_assert_owner(&flush_list_mutex); return page_cleaner_status & PAGE_CLEANER_IDLE; } + + /** @return whether the page cleaner may be initiating writes */ + bool page_cleaner_active() const + { + mysql_mutex_assert_owner(&flush_list_mutex); + static_assert(PAGE_CLEANER_IDLE == 1, "efficiency"); + return page_cleaner_status > PAGE_CLEANER_IDLE; + } + /** Wake up the page cleaner if needed. @param for_LRU whether to wake up for LRU eviction */ void page_cleaner_wakeup(bool for_LRU= false); diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 884c92fceaf91..aa7ec6f2d0468 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -3581,16 +3581,24 @@ inline fil_space_t *fil_system_t::find(const char *path) const static void log_sort_flush_list() { /* Ensure that oldest_modification() cannot change during std::sort() */ - for (;;) { - os_aio_wait_until_no_pending_writes(false); - mysql_mutex_lock(&buf_pool.flush_list_mutex); - if (buf_pool.flush_list_active()) - my_cond_wait(&buf_pool.done_flush_list, - &buf_pool.flush_list_mutex.m_mutex); - else if (!os_aio_pending_writes()) - break; - mysql_mutex_unlock(&buf_pool.flush_list_mutex); + const double pct_lwm= srv_max_dirty_pages_pct_lwm; + /* Disable "idle" flushing in order to minimize the wait time below. */ + srv_max_dirty_pages_pct_lwm= 0.0; + + for (;;) + { + os_aio_wait_until_no_pending_writes(false); + mysql_mutex_lock(&buf_pool.flush_list_mutex); + if (buf_pool.page_cleaner_active()) + my_cond_wait(&buf_pool.done_flush_list, + &buf_pool.flush_list_mutex.m_mutex); + else if (!os_aio_pending_writes()) + break; + mysql_mutex_unlock(&buf_pool.flush_list_mutex); + } + + srv_max_dirty_pages_pct_lwm= pct_lwm; } const size_t size= UT_LIST_GET_LEN(buf_pool.flush_list); From 3c86765efef97862ab3ea8021ec9348ea995e915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 31 Aug 2023 12:08:40 +0300 Subject: [PATCH 024/165] MDEV-23974 fixup: Use standard quotes in have_innodb.inc This fixes the following test: set sql_mode=ORACLE; --source include/have_innodb.inc --- mysql-test/include/have_innodb.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/include/have_innodb.inc b/mysql-test/include/have_innodb.inc index 0de070e1994dc..8c9cdb54363d5 100644 --- a/mysql-test/include/have_innodb.inc +++ b/mysql-test/include/have_innodb.inc @@ -3,7 +3,7 @@ # will be skipped unless innodb is enabled # --disable_query_log -if (`select count(*) from information_schema.system_variables where variable_name='have_sanitizer' and global_value like "MSAN%"`) +if (`select count(*) from information_schema.system_variables where variable_name='have_sanitizer' and global_value like 'MSAN%'`) { SET STATEMENT sql_log_bin=0 FOR call mtr.add_suppression("InnoDB: Trying to delete tablespace.*pending operations"); From 2db5f1b2985d2c921e73d19561332246fa43557e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 31 Aug 2023 12:14:49 +0300 Subject: [PATCH 025/165] MDEV-32049 Deadlock due to log_free_check() in trx_purge_truncate_history() The function log_free_check() is not supposed to be invoked while the caller is holding any InnoDB synchronization objects, such as buffer page latches, tablespace latches, index tree latches, or in this case, rseg->mutex (rseg->latch in 10.6 or later). A hang was reported in 10.6 where several threads were waiting for an rseg->latch that had been exclusively acquired in trx_purge_truncate_history(), which invoked log_free_check() inside trx_purge_truncate_rseg_history(). Because the threads that were waiting for the rseg->latch were holding exclusive latches on some index pages, log_free_check() was unable to advance the checkpoint because those index pages could not be written out. trx_purge_truncate_history(): Invoke log_free_check() before acquiring the rseg->mutex and invoking trx_purge_free_segment(). trx_purge_free_segment(): Do not invoke log_free_check() in order to avoid a deadlock. --- storage/innobase/trx/trx0purge.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index c6adaf5f2bf23..a297ca4921e83 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -346,7 +346,6 @@ static void trx_purge_free_segment(mtr_t &mtr, trx_rseg_t* rseg, fil_addr_t hdr_addr) { mtr.commit(); - log_free_check(); mtr.start(); ut_ad(mutex_own(&rseg->mutex)); @@ -376,7 +375,6 @@ void trx_purge_free_segment(mtr_t &mtr, trx_rseg_t* rseg, fil_addr_t hdr_addr) This does not matter when using multiple innodb_undo_tablespaces; innodb_undo_log_truncate=ON will be able to reclaim the space. */ - log_free_check(); mtr.start(); ut_ad(rw_lock_s_lock_nowait(block->debug_latch, __FILE__, __LINE__)); rw_lock_x_lock(&block->lock); @@ -545,6 +543,7 @@ void trx_purge_truncate_history() { ut_ad(rseg->id == i); ut_ad(rseg->is_persistent()); + log_free_check(); mutex_enter(&rseg->mutex); trx_purge_truncate_rseg_history(*rseg, head, !rseg->trx_ref_count && From d1fca0baab272bd4d24b22dd6fa565703eee03ca Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Fri, 1 Sep 2023 17:42:47 +0530 Subject: [PATCH 026/165] MDEV-32060 Server aborts when table doesn't have referenced index - Server aborts when table doesn't have referenced index. This is caused by 5f09b53bdb4e973e7c7ec2c53a24c98321223f98 (MDEV-31086). While iterating the foreign key constraints, we fail to consider that InnoDB doesn't have referenced index for it when foreign key check is disabled. --- mysql-test/suite/innodb/r/fk_col_alter.result | 13 +++++++++++++ mysql-test/suite/innodb/t/fk_col_alter.test | 15 +++++++++++++++ storage/innobase/handler/handler0alter.cc | 2 ++ 3 files changed, 30 insertions(+) diff --git a/mysql-test/suite/innodb/r/fk_col_alter.result b/mysql-test/suite/innodb/r/fk_col_alter.result index a688894352302..a220402db946e 100644 --- a/mysql-test/suite/innodb/r/fk_col_alter.result +++ b/mysql-test/suite/innodb/r/fk_col_alter.result @@ -81,4 +81,17 @@ DROP TABLE t2, t1; CREATE TABLE t (a VARCHAR(40), b INT, C INT) ENGINE=InnoDB; ALTER TABLE t MODIFY a VARCHAR(50), DROP b; DROP TABLE t; +# +# MDEV-32060 Server aborts when table doesn't +# have referenced index +# +SET SESSION FOREIGN_KEY_CHECKS = OFF; +CREATE TABLE t1 (a VARCHAR(16) KEY, FOREIGN KEY(a) REFERENCES t2(b)) ENGINE=InnoDB; +CREATE TABLE t2 (b VARCHAR(8)) ENGINE=InnoDB; +ERROR HY000: Can't create table `test`.`t2` (errno: 150 "Foreign key constraint is incorrectly formed") +SET SESSION FOREIGN_KEY_CHECKS = ON; +ALTER TABLE t2 MODIFY b VARCHAR(16), ADD KEY(b); +ERROR 42S02: Table 'test.t2' doesn't exist +DROP TABLE t2, t1; +ERROR 42S02: Unknown table 'test.t2' # End of 10.4 tests diff --git a/mysql-test/suite/innodb/t/fk_col_alter.test b/mysql-test/suite/innodb/t/fk_col_alter.test index 169210e12c18a..87ccde5652a7d 100644 --- a/mysql-test/suite/innodb/t/fk_col_alter.test +++ b/mysql-test/suite/innodb/t/fk_col_alter.test @@ -111,4 +111,19 @@ DROP TABLE t2, t1; CREATE TABLE t (a VARCHAR(40), b INT, C INT) ENGINE=InnoDB; ALTER TABLE t MODIFY a VARCHAR(50), DROP b; DROP TABLE t; + +--echo # +--echo # MDEV-32060 Server aborts when table doesn't +--echo # have referenced index +--echo # +SET SESSION FOREIGN_KEY_CHECKS = OFF; +CREATE TABLE t1 (a VARCHAR(16) KEY, FOREIGN KEY(a) REFERENCES t2(b)) ENGINE=InnoDB; +# Following SQL is allowed in 10.6 +--error ER_CANT_CREATE_TABLE +CREATE TABLE t2 (b VARCHAR(8)) ENGINE=InnoDB; +SET SESSION FOREIGN_KEY_CHECKS = ON; +--error ER_NO_SUCH_TABLE +ALTER TABLE t2 MODIFY b VARCHAR(16), ADD KEY(b); +--error ER_BAD_TABLE_ERROR +DROP TABLE t2, t1; --echo # End of 10.4 tests diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index a5ccb1957f312..bde3c1fb05eaf 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -7712,6 +7712,8 @@ bool check_col_is_in_fk_indexes( { for (ulint i= 0; i < f->n_fields; i++) { + if (!f->referenced_index) + continue; if (f->referenced_index->fields[i].col == col) { my_error(ER_FK_COLUMN_CANNOT_CHANGE_CHILD, MYF(0), From 0d4be10a8af30cacc038c589792e772cb42683fc Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 1 Sep 2023 19:25:33 +0700 Subject: [PATCH 027/165] MDEV-14959: Control over memory allocated for SP/PS This patch adds support for controlling of memory allocation done by SP/PS that could happen on second and following executions. As soon as SP or PS has been executed the first time its memory root is marked as read only since no further memory allocation should be performed on it. In case such allocation takes place it leads to the assert hit for invariant that force no new memory allocations takes place as soon as the SP/PS has been marked as read only. The feature for control of memory allocation made on behalf SP/PS is turned on when both debug build is on and the cmake option -DWITH_PROTECT_STATEMENT_MEMROOT is set. The reason for introduction of the new cmake option -DWITH_PROTECT_STATEMENT_MEMROOT to control memory allocation of second and following executions of SP/PS is that for the current server implementation there are too many places where such memory allocation takes place. As soon as all such incorrect allocations be fixed the cmake option -DWITH_PROTECT_STATEMENT_MEMROOT can be removed and control of memory allocation made on second and following executions can be turned on only for debug build. Before every incorrect memory allocation be fixed it makes sense to guard the checking of memory allocation on read only memory by extra cmake option else we would get a lot of failing test on buildbot. Moreover, fixing of all incorrect memory allocations could take pretty long period of time, so for introducing the feature without necessary to wait until all places throughout the source code be fixed it makes sense to add the new cmake option. --- CMakeLists.txt | 9 +++++++ include/my_alloc.h | 4 +++ mysys/my_alloc.c | 7 +++++ sql/sp_head.cc | 67 ++++++++++++++++++++++++++++++++++++++++++++++ sql/sp_head.h | 37 +++++++++++++++++++++++++ sql/sql_prepare.cc | 44 ++++++++++++++++++++++++++++++ 6 files changed, 168 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7e5f3e157c33e..9605d786c9350 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -193,6 +193,15 @@ ENDIF() OPTION(NOT_FOR_DISTRIBUTION "Allow linking with GPLv2-incompatible system libraries. Only set it you never plan to distribute the resulting binaries" OFF) +# +# Enable protection of statement's memory root after first SP/PS execution. +# Can be switched on only for debug build. +# +OPTION(WITH_PROTECT_STATEMENT_MEMROOT "Enable protection of statement's memory root after first SP/PS execution. Turned into account only for debug build" OFF) +IF (CMAKE_BUILD_TYPE MATCHES "Debug" AND WITH_PROTECT_STATEMENT_MEMROOT) + ADD_DEFINITIONS(-DPROTECT_STATEMENT_MEMROOT) +ENDIF() + INCLUDE(check_compiler_flag) INCLUDE(check_linker_flag) diff --git a/include/my_alloc.h b/include/my_alloc.h index 9b0aad269563c..6399ce67667c6 100644 --- a/include/my_alloc.h +++ b/include/my_alloc.h @@ -51,6 +51,10 @@ typedef struct st_mem_root */ unsigned int first_block_usage; +#ifdef PROTECT_STATEMENT_MEMROOT + int read_only; +#endif + void (*error_handler)(void); const char *name; } MEM_ROOT; diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c index a6f38dcb14591..4a5e48c9e80dd 100644 --- a/mysys/my_alloc.c +++ b/mysys/my_alloc.c @@ -74,6 +74,9 @@ void init_alloc_root(MEM_ROOT *mem_root, const char *name, size_t block_size, mem_root->first_block_usage= 0; mem_root->total_alloc= 0; mem_root->name= name; +#ifdef PROTECT_STATEMENT_MEMROOT + mem_root->read_only= 0; +#endif #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) if (pre_alloc_size) @@ -218,6 +221,10 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length) DBUG_PRINT("enter",("root: %p name: %s", mem_root, mem_root->name)); DBUG_ASSERT(alloc_root_inited(mem_root)); +#ifdef PROTECT_STATEMENT_MEMROOT + DBUG_ASSERT(mem_root->read_only == 0); +#endif + DBUG_EXECUTE_IF("simulate_out_of_memory", { /* Avoid reusing an already allocated block */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 8801d8880b1ec..b1a697ada2a8b 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -493,6 +493,9 @@ sp_head::sp_head(MEM_ROOT *mem_root_arg, sp_package *parent, :Query_arena(NULL, STMT_INITIALIZED_FOR_SP), Database_qualified_name(&null_clex_str, &null_clex_str), main_mem_root(*mem_root_arg), +#ifdef PROTECT_STATEMENT_MEMROOT + executed_counter(0), +#endif m_parent(parent), m_handler(sph), m_flags(0), @@ -738,6 +741,10 @@ sp_head::init(LEX *lex) */ lex->trg_table_fields.empty(); +#ifdef PROTECT_STATEMENT_MEMROOT + executed_counter= 0; +#endif + DBUG_VOID_RETURN; } @@ -1364,6 +1371,11 @@ sp_head::execute(THD *thd, bool merge_da_on_success) #endif /* WITH_WSREP */ err_status= i->execute(thd, &ip); +#ifdef PROTECT_STATEMENT_MEMROOT + if (!err_status) + i->mark_as_run(); +#endif + #ifdef WITH_WSREP if (WSREP(thd)) { @@ -1460,6 +1472,16 @@ sp_head::execute(THD *thd, bool merge_da_on_success) /* Reset sp_rcontext::end_partial_result_set flag. */ ctx->end_partial_result_set= FALSE; +#ifdef PROTECT_STATEMENT_MEMROOT + if (thd->is_error()) + { + // Don't count a call ended with an error as normal run + executed_counter= 0; + main_mem_root.read_only= 0; + reset_instrs_executed_counter(); + } +#endif + } while (!err_status && likely(!thd->killed) && likely(!thd->is_fatal_error) && !thd->spcont->pause_state); @@ -1571,6 +1593,20 @@ sp_head::execute(THD *thd, bool merge_da_on_success) err_status|= mysql_change_db(thd, (LEX_CSTRING*)&saved_cur_db_name, TRUE) != 0; } + +#ifdef PROTECT_STATEMENT_MEMROOT + if (!err_status) + { + if (!main_mem_root.read_only && + has_all_instrs_executed()) + { + main_mem_root.read_only= 1; + } + ++executed_counter; + DBUG_PRINT("info", ("execute counter: %lu", executed_counter)); + } +#endif + m_flags&= ~IS_INVOKED; if (m_parent) m_parent->m_invoked_subroutine_count--; @@ -3214,6 +3250,37 @@ void sp_head::add_mark_lead(uint ip, List *leads) leads->push_front(i); } +#ifdef PROTECT_STATEMENT_MEMROOT + +int sp_head::has_all_instrs_executed() +{ + sp_instr *ip; + uint count= 0; + + for (uint i= 0; i < m_instr.elements; ++i) + { + get_dynamic(&m_instr, (uchar*)&ip, i); + if (ip->has_been_run()) + ++count; + } + + return count == m_instr.elements; +} + + +void sp_head::reset_instrs_executed_counter() +{ + sp_instr *ip; + + for (uint i= 0; i < m_instr.elements; ++i) + { + get_dynamic(&m_instr, (uchar*)&ip, i); + ip->mark_as_not_run(); + } +} + +#endif + void sp_head::opt_mark() { diff --git a/sql/sp_head.h b/sql/sp_head.h index 06f5d5234aec6..693a5e7870355 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -132,6 +132,16 @@ class sp_head :private Query_arena, protected: MEM_ROOT main_mem_root; +#ifdef PROTECT_STATEMENT_MEMROOT + /* + The following data member is wholly for debugging purpose. + It can be used for possible crash analysis to determine how many times + the stored routine was executed before the mem_root marked read_only + was requested for a memory chunk. Additionally, a value of this data + member is output to the log with DBUG_PRINT. + */ + ulong executed_counter; +#endif public: /** Possible values of m_flags */ enum { @@ -801,6 +811,11 @@ class sp_head :private Query_arena, return ip; } +#ifdef PROTECT_STATEMENT_MEMROOT + int has_all_instrs_executed(); + void reset_instrs_executed_counter(); +#endif + /* Add tables used by routine to the table list. */ bool add_used_tables_to_table_list(THD *thd, TABLE_LIST ***query_tables_last_ptr, @@ -1084,6 +1099,9 @@ class sp_instr :public Query_arena, public Sql_alloc /// Should give each a name or type code for debugging purposes? sp_instr(uint ip, sp_pcontext *ctx) :Query_arena(0, STMT_INITIALIZED_FOR_SP), marked(0), m_ip(ip), m_ctx(ctx) +#ifdef PROTECT_STATEMENT_MEMROOT + , m_has_been_run(false) +#endif {} virtual ~sp_instr() @@ -1173,6 +1191,25 @@ class sp_instr :public Query_arena, public Sql_alloc m_ip= dst; } +#ifdef PROTECT_STATEMENT_MEMROOT + bool has_been_run() const + { + return m_has_been_run; + } + + void mark_as_run() + { + m_has_been_run= true; + } + + void mark_as_not_run() + { + m_has_been_run= false; + } + +private: + bool m_has_been_run; +#endif }; // class sp_instr : public Sql_alloc diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 9c52aea29310d..63ce4d4c26d55 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -171,6 +171,16 @@ class Prepared_statement: public Statement Server_side_cursor *cursor; uchar *packet; uchar *packet_end; +#ifdef PROTECT_STATEMENT_MEMROOT + /* + The following data member is wholly for debugging purpose. + It can be used for possible crash analysis to determine how many times + the stored routine was executed before the mem_root marked read_only + was requested for a memory chunk. Additionally, a value of this data + member is output to the log with DBUG_PRINT. + */ + ulong executed_counter; +#endif uint param_count; uint last_errno; uint flags; @@ -3995,6 +4005,9 @@ Prepared_statement::Prepared_statement(THD *thd_arg) cursor(0), packet(0), packet_end(0), +#ifdef PROTECT_STATEMENT_MEMROOT + executed_counter(0), +#endif param_count(0), last_errno(0), flags((uint) IS_IN_USE), @@ -4070,8 +4083,13 @@ void Prepared_statement::setup_set_params() Prepared_statement::~Prepared_statement() { DBUG_ENTER("Prepared_statement::~Prepared_statement"); +#ifdef PROTECT_STATEMENT_MEMROOT + DBUG_PRINT("enter",("stmt: %p cursor: %p executed_counter: %lu", + this, cursor, executed_counter)); +#else DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor)); +#endif delete cursor; /* We have to call free on the items even if cleanup is called as some items, @@ -4237,6 +4255,10 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) } lex->set_trg_event_type_for_tables(); +#ifdef PROTECT_STATEMENT_MEMROOT + executed_counter= 0; +#endif + /* While doing context analysis of the query (in check_prepared_statement) we allocate a lot of additional memory: for open tables, JOINs, derived @@ -4506,9 +4528,31 @@ Prepared_statement::execute_loop(String *expanded_query, error= reprepare(); if (likely(!error)) /* Success */ + { +#ifdef PROTECT_STATEMENT_MEMROOT + // There was reprepare so the counter of runs should be reset + executed_counter= 0; + mem_root->read_only= 0; +#endif goto reexecute; + } } reset_stmt_params(this); +#ifdef PROTECT_STATEMENT_MEMROOT + if (!error) + { + mem_root->read_only= 1; + ++executed_counter; + + DBUG_PRINT("info", ("execute counter: %lu", executed_counter)); + } + else + { + // Error on call shouldn't be counted as a normal run + executed_counter= 0; + mem_root->read_only= 0; + } +#endif return error; } From d8574dbba3c5b414c496ab9588733d5104eddf1e Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 1 Sep 2023 19:26:24 +0700 Subject: [PATCH 028/165] MDEV-14959: Moved calculation the number of items reserved for exists to in transformation It is done now before call of select_lex->setup_ref_array() in order to avoid allocation of SP/PS's memory on its second invocation. --- sql/sql_select.cc | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5955cbef8dd0a..805ba19f0bcfb 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -727,8 +727,7 @@ setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array, ORDER *group, List &win_specs, List &win_funcs, - bool *hidden_group_fields, - uint *reserved) + bool *hidden_group_fields) { int res; enum_parsing_place save_place; @@ -743,13 +742,6 @@ setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array, thd->lex->allow_sum_func.clear_bit(select->nest_level); res= setup_conds(thd, tables, leaves, conds); - if (thd->lex->current_select->first_cond_optimization) - { - if (!res && *conds && ! thd->lex->current_select->merged_into) - (*reserved)= (*conds)->exists2in_reserved_items(); - else - (*reserved)= 0; - } /* it's not wrong to have non-aggregated columns in a WHERE */ select->set_non_agg_field_used(saved_non_agg_field_used); @@ -1318,6 +1310,15 @@ JOIN::prepare(TABLE_LIST *tables_init, if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num, &select_lex->hidden_bit_fields)) DBUG_RETURN(-1); + + if (thd->lex->current_select->first_cond_optimization) + { + if ( conds && ! thd->lex->current_select->merged_into) + select_lex->select_n_reserved= conds->exists2in_reserved_items(); + else + select_lex->select_n_reserved= 0; + } + if (select_lex->setup_ref_array(thd, real_og_num)) DBUG_RETURN(-1); @@ -1336,8 +1337,7 @@ JOIN::prepare(TABLE_LIST *tables_init, all_fields, &conds, order, group_list, select_lex->window_specs, select_lex->window_funcs, - &hidden_group_fields, - &select_lex->select_n_reserved)) + &hidden_group_fields)) DBUG_RETURN(-1); /* From 1d502a29e56213c23ce8da9cab8296b4f764fef1 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 1 Sep 2023 19:27:01 +0700 Subject: [PATCH 029/165] MDEV-14959: Fixed possible memory leaks that could happen on running PS/SP depending on a trigger Moved call of the function check_and_update_table_version() just before the place where the function extend_table_list() is invoked in order to avoid allocation of memory on a PS/SP memory root marked as read only. It happens by the reason that the function extend_table_list() invokes sp_add_used_routine() to add a trigger created for the table in time frame between execution the statement EXECUTE `stmt_id` . For example, the following test case create table t1 (a int); prepare stmt from "insert into t1 (a) value (1)"; execute stmt; create trigger t1_bi before insert on t1 for each row set @message= new.a; execute stmt; # (*) adds the trigger t1_bi to a list of used routines that involves allocation of a memory on PS memory root that has been already marked as read only on first run of the statement 'execute stmt'. In result, when the statement marked with (*) is executed it results in assert hit. To fix the issue call the function check_and_update_table_version() before invocation of extend_table_list() to force re-compilation of PS/SP that resets read-only flag of its memory root. --- sql/sql_base.cc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 47b9c4c6db997..9b1043393e79a 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -3965,6 +3965,12 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags, if (tables->open_strategy && !tables->table) goto end; + /* Check and update metadata version of a base table. */ + error= check_and_update_table_version(thd, tables, tables->table->s); + + if (unlikely(error)) + goto end; + error= extend_table_list(thd, tables, prelocking_strategy, has_prelocking_list); if (unlikely(error)) goto end; @@ -3972,11 +3978,6 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags, /* Copy grant information from TABLE_LIST instance to TABLE one. */ tables->table->grant= tables->grant; - /* Check and update metadata version of a base table. */ - error= check_and_update_table_version(thd, tables, tables->table->s); - - if (unlikely(error)) - goto end; /* After opening a MERGE table add the children to the query list of tables, so that they are opened too. From be02356206cfe08a6da9ca8ed15e299741210d4b Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 1 Sep 2023 19:27:34 +0700 Subject: [PATCH 030/165] MDEV-14959: Fixed memory leak happened on re-parsing a view that substitutes a table In case a table accessed by a PS/SP is dropped after the first execution of PS/SP and a view created with the same name as a table just dropped then the second execution of PS/SP leads to allocation of a memory on SP/PS memory root already marked as read only on first execution. For example, the following test case: CREATE TABLE t1 (a INT); PREPARE stmt FROM "INSERT INTO t1 VALUES (1)"; EXECUTE stmt; DROP TABLE t1; CREATE VIEW t1 S SELECT 1; --error ER_NON_INSERTABLE_TABLE EXECUTE stmt; # (*) DROP VIEW t1; will hit assert on running the statement 'EXECUTE stmt' marked with (*) when allocation of a memory be performed on parsing the view. Memory allocation is requested inside the function mysql_make_view when a view definition being parsed. In order to avoid an assertion failure, call of the function mysql_make_view() must be moved after invocation of the function check_and_update_table_version(). It will result in re-preparing the whole PS statement or current SP instruction that will free currently allocated items and reset read_only flag for the memory root. --- sql/sql_base.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9b1043393e79a..97d214e1f1715 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2018,10 +2018,6 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx) goto err_lock; } - /* Open view */ - if (mysql_make_view(thd, share, table_list, false)) - goto err_lock; - /* This table is a view. Validate its metadata version: in particular, that it was a view when the statement was prepared. @@ -2029,6 +2025,10 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx) if (check_and_update_table_version(thd, table_list, share)) goto err_lock; + /* Open view */ + if (mysql_make_view(thd, share, table_list, false)) + goto err_lock; + /* TODO: Don't free this */ tdc_release_share(share); From d0a872c20ebb38ed9ac40c13bcf834ecf6618080 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 1 Sep 2023 19:28:12 +0700 Subject: [PATCH 031/165] MDEV-14959: Fixed memory leak relating with view and IS Fixed memory leak taken place on executing a prepared statement or a stored routine that querying a view and this view constructed on an information schema table. For example, Lets consider the following definition of the view 'v1' CREATE VIEW v1 AS SELECT table_name FROM information_schema.views ORDER BY table_name; Querying this view in PS mode result in hit of assert. PREPARE stmt FROM "SELECT * FROM v1"; EXECUTE stmt; EXECUTE stmt; (*) Running the statement marked with (*) leads to a crash in case server build with mode to control allocation of a memory from SP/PS memory root on the second and following executions of PS/SP. The reason of leaking the memory is that a memory allocated on processing of FRM file for the view requested from a PS/PS memory root meaning that this memory be released only when a stored routine be evicted from SP-cache or a prepared statement be deallocated that typically happens on termination of a user session. To fix the issue switch to a memory root specially created for allocation of short-lived objects that requested on parsing FRM. --- sql/sql_show.cc | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index a621c1de29ab9..1917daa27881d 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -4955,7 +4955,8 @@ try_acquire_high_prio_shared_mdl_lock(THD *thd, TABLE_LIST *table, open_tables function for this table */ -static int fill_schema_table_from_frm(THD *thd, TABLE *table, +static int fill_schema_table_from_frm(THD *thd, MEM_ROOT *mem_root, + TABLE *table, ST_SCHEMA_TABLE *schema_table, LEX_CSTRING *db_name, LEX_CSTRING *table_name, @@ -4967,6 +4968,9 @@ static int fill_schema_table_from_frm(THD *thd, TABLE *table, TABLE_LIST table_list; uint res= 0; char db_name_buff[NAME_LEN + 1], table_name_buff[NAME_LEN + 1]; + Query_arena i_s_arena(mem_root, Query_arena::STMT_CONVENTIONAL_EXECUTION); + Query_arena backup_arena, *old_arena; + bool i_s_arena_active= false; bzero((char*) &table_list, sizeof(TABLE_LIST)); bzero((char*) &tbl, sizeof(TABLE)); @@ -5041,6 +5045,11 @@ static int fill_schema_table_from_frm(THD *thd, TABLE *table, goto end; } + old_arena= thd->stmt_arena; + thd->stmt_arena= &i_s_arena; + thd->set_n_backup_active_arena(&i_s_arena, &backup_arena); + i_s_arena_active= true; + share= tdc_acquire_share(thd, &table_list, GTS_TABLE | GTS_VIEW); if (!share) { @@ -5122,7 +5131,16 @@ static int fill_schema_table_from_frm(THD *thd, TABLE *table, savepoint is safe. */ DBUG_ASSERT(thd->open_tables == NULL); + thd->mdl_context.rollback_to_savepoint(open_tables_state_backup->mdl_system_tables_svp); + + if (i_s_arena_active) + { + thd->stmt_arena= old_arena; + thd->restore_active_arena(&i_s_arena, &backup_arena); + i_s_arena.free_items(); + } + if (!thd->is_fatal_error) thd->clear_error(); return res; @@ -5351,7 +5369,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) if (!(table_open_method & ~OPEN_FRM_ONLY) && db_name != &INFORMATION_SCHEMA_NAME) { - if (!fill_schema_table_from_frm(thd, table, schema_table, + if (!fill_schema_table_from_frm(thd, &tmp_mem_root, + table, schema_table, db_name, table_name, &open_tables_state_backup, can_deadlock)) From 91ab8194512500d2d024c916d50d2113ff1338d1 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Thu, 31 Aug 2023 09:13:47 +1000 Subject: [PATCH 032/165] MDEV-25177 Better indication of refusing to start because of ProtectHome Create test for for case insensitive gives a basic warning on creating a test file and the next thing a user might see is an abort. ProtectHome and other systemd setting protect system services from accessing user data. Unfortunately some of our users do put things on /home due space or other reasons. Rather than enumberate the systemd options in a very clunkly fragile way we put an error associated with the "Can't create test file" and hope the user can work it out from there. %M tip thanks Sergei. --- sql/mysqld.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index c110baa9634ca..2a8e4b4c16b85 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -9299,7 +9299,7 @@ static int test_if_case_insensitive(const char *dir_name) buff, 0666, O_RDWR, MYF(0))) < 0) { if (!opt_abort) - sql_print_warning("Can't create test file %s", buff); + sql_print_warning("Can't create test file '%s' (Errcode: %M)", buff, my_errno); DBUG_RETURN(-1); } mysql_file_close(file, MYF(0)); From 92fb31f0b185dfc45f8632ac06333ec2e5e1a5c2 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Fri, 4 Aug 2023 12:10:34 +0300 Subject: [PATCH 033/165] MDEV-30836 MTR misc improvements 1. Better logging and error reporting; 2. Worker process title; 3. Some comments Worker process title example: 446209 pts/2 R+ 0:00 mysql-test-run.pl worker[01] :42146 -> :35027 versioning.view 446210 pts/2 S+ 0:00 mysql-test-run.pl worker[02] :42150 -> :35027 versioning.view 446211 pts/2 S+ 0:00 mysql-test-run.pl worker[03] :42154 -> :35027 versioning.foreign 446212 pts/2 S+ 0:00 mysql-test-run.pl worker[04] :42160 -> :35027 versioning.autoinc Manager-worker localhost socket connection is represented by a pair :source -> :destination ports. -vv Now adds --verbose to mysqltest as well, see var/mysqltest.log for the output. --- mysql-test/lib/My/Test.pm | 2 +- mysql-test/lib/mtr_report.pm | 1 + mysql-test/mysql-test-run.pl | 52 +++++++++++++++++++++++------------- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/mysql-test/lib/My/Test.pm b/mysql-test/lib/My/Test.pm index 49ce2fb5af9f6..56e7cf6d8a4c1 100644 --- a/mysql-test/lib/My/Test.pm +++ b/mysql-test/lib/My/Test.pm @@ -111,7 +111,7 @@ sub read_test { $serialized =~ s/\\([0-9a-fA-F]{2})/chr(hex($1))/eg; my $test= Storable::thaw($serialized); use Data::Dumper; - die "wrong class (hack attempt?): ".ref($test)."\n".Dumper(\$test, $serialized) + confess "Not My::Test: ". ref($test). "\n". Dumper(\$test, $serialized) unless ref($test) eq 'My::Test'; resfile_from_test($test) if $::opt_resfile; return $test; diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm index 2a8ed65eb2c19..7d944ade71a62 100644 --- a/mysql-test/lib/mtr_report.pm +++ b/mysql-test/lib/mtr_report.pm @@ -48,6 +48,7 @@ our $timestamp= 0; our $timediff= 0; our $name; our $verbose; +# TODO: no option for that? Why is it different from $verbose? our $verbose_restart= 0; our $timer= 1; our $tests_total; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 4d1b3cd6aa531..e2c025bd57c94 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -579,6 +579,7 @@ ($$$) my $s= IO::Select->new(); my $childs= 0; + my %names; $s->add($server); while (1) { if ($opt_stop_file) @@ -592,14 +593,18 @@ ($$$) mark_time_used('admin'); my @ready = $s->can_read(1); # Wake up once every second - mtr_debug("Got ". (0 + @ready). " connection(s)"); + if (@ready > 0) { + mtr_verbose2("Got ". (0 + @ready). " connection(s)"); + } mark_time_idle(); + my $i= 0; foreach my $sock (@ready) { + ++$i; if ($sock == $server) { # New client connected ++$childs; my $child= $sock->accept(); - mtr_verbose2("Client connected (got ${childs} childs)"); + mtr_verbose2("Connection ${i}: Worker connected (got ${childs} childs)"); $s->add($child); print $child "HELLO\n"; } @@ -608,7 +613,7 @@ ($$$) if (!defined $line) { # Client disconnected --$childs; - mtr_verbose2("Child closed socket (left ${childs} childs)"); + mtr_verbose2((exists $names{$sock} ? $names{$sock} : "Worker"). " closed socket (left ${childs} childs)"); $s->remove($sock); $sock->close; next; @@ -744,8 +749,9 @@ ($$$) push(@$completed, $result); } - elsif ($line eq 'START'){ - ; # Send first test + elsif ($line=~ /^START (.*)$/){ + # Send first test + $names{$sock}= $1; } elsif ($line eq 'WARNINGS'){ my $fake_test= My::Test::read_test($sock); @@ -828,7 +834,7 @@ ($$$) # At this point we have found next suitable test $next= splice(@$tests, $i, 1); last; - } + } # for(my $i= 0; $i <= @$tests; $i++) # Use second best choice if no other test has been found if (!$next and defined $second_best){ @@ -847,11 +853,11 @@ ($$$) } else { # No more test, tell child to exit - #mtr_report("Saying BYE to child"); + mtr_verbose2("Saying BYE to ". $names{$sock}); print $sock "BYE\n"; - } - } - } + } # else (!$next) + } # else ($sock != $server) + } # foreach my $sock (@ready) if (!IS_WINDOWS) { foreach my $pid (keys %$children) @@ -911,7 +917,10 @@ ($) # -------------------------------------------------------------------------- # Set worker name # -------------------------------------------------------------------------- - report_option('name',"worker[$thread_num]"); + report_option('name',"worker[". sprintf("%02d", $thread_num). "]"); + my $proc_title= basename($0). " ${mtr_report::name} :". $server->sockport(). " -> :${server_port}"; + $0= $proc_title; + mtr_verbose2("Running at PID $$"); # -------------------------------------------------------------------------- # Set different ports per thread @@ -937,7 +946,7 @@ ($) } # Ask server for first test - print $server "START\n"; + print $server "START ${mtr_report::name}\n"; mark_time_used('init'); @@ -945,6 +954,7 @@ ($) chomp($line); if ($line eq 'TESTCASE'){ my $test= My::Test::read_test($server); + $0= $proc_title. " ". $test->{name}; # Clear comment and logfile, to avoid # reusing them from previous test @@ -961,11 +971,12 @@ ($) run_testcase($test, $server); #$test->{result}= 'MTR_RES_PASSED'; # Send it back, now with results set + mtr_verbose2('Writing TESTRESULT'); $test->write_test($server, 'TESTRESULT'); mark_time_used('restart'); } elsif ($line eq 'BYE'){ - mtr_report("Server said BYE"); + mtr_verbose2("Manager said BYE"); # We need to gracefully shut down the servers to see any # Valgrind memory leak errors etc. since last server restart. if ($opt_warnings) { @@ -1856,7 +1867,7 @@ ($$) if ( $opt_debug ) { mtr_add_arg($args, - "--loose-debug=$debug_d:t:A,%s/log/%s.trace", + "--loose-debug=d,info,warning,warnings:t:A,%s/log/%s.trace", $path_vardir_trace, $client_name) } } @@ -4046,9 +4057,8 @@ ($$) } return ($res == 62) ? 0 : $res; - } - - if ($proc) + } # if ($proc and $proc eq $test) + elsif ($proc) { # It was not mysqltest that exited, add to a wait-to-be-started-again list. $keep_waiting_proc{$proc} = 1; @@ -4077,7 +4087,7 @@ ($$) { # do nothing } - } + } # foreach my $wait_for_proc next; @@ -5457,7 +5467,11 @@ ($) mtr_init_args(\$args); mtr_add_arg($args, "--defaults-file=%s", $path_config_file); - mtr_add_arg($args, "--silent"); + if ($opt_verbose > 1) { + mtr_add_arg($args, "--verbose"); + } else { + mtr_add_arg($args, "--silent"); + } mtr_add_arg($args, "--tmpdir=%s", $opt_tmpdir); mtr_add_arg($args, "--character-sets-dir=%s", $path_charsetsdir); mtr_add_arg($args, "--logdir=%s/log", $opt_vardir); From 0815a3b6b5e103658e70198f7976798167500cc6 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Fri, 4 Aug 2023 18:17:17 +0300 Subject: [PATCH 034/165] MDEV-30836 run_test_server() refactored run_test_server() is actually manager main loop. We move out this function into Manager package and split into run() and parse_protocol(). The latter is needed for the fix. Moving into separate package helps to make some common variables which was local to run_test_server(). Functions from the main package is now prefixed with main:: (should be reorganized somehow later or auto-imported). --- mysql-test/mysql-test-run.pl | 381 +++++++++++++++++++---------------- 1 file changed, 208 insertions(+), 173 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index e2c025bd57c94..94aa6bd098b17 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -483,7 +483,7 @@ sub main { mark_time_used('init'); my ($prefix, $fail, $completed, $extra_warnings)= - run_test_server($server, $tests, \%children); + Manager::run($server, $tests, \%children); exit(0) if $opt_start_exit; @@ -562,24 +562,205 @@ sub main { } -sub run_test_server ($$$) { - my ($server, $tests, $children) = @_; +package Manager; +use POSIX ":sys_wait_h"; +use File::Basename; +use File::Find; +use IO::Socket::INET; +use IO::Select; +use mtr_report; +use My::Platform; + +my $num_saved_datadir; # Number of datadirs saved in vardir/log/ so far. +my $num_failed_test; # Number of tests failed so far +my $test_failure; # Set true if test suite failed +my $extra_warnings; # Warnings found during server shutdowns + +my $completed; +my %running; +my $result; +my $exe_mysqld; # Used as hint to CoreDump +my %names; + +sub parse_protocol($$) { + my $sock= shift; + my $line= shift; + + if ($line eq 'TESTRESULT'){ + mtr_verbose2("Got TESTRESULT from ". $names{$sock}); + $result= My::Test::read_test($sock); - my $num_saved_datadir= 0; # Number of datadirs saved in vardir/log/ so far. - my $num_failed_test= 0; # Number of tests failed so far - my $test_failure= 0; # Set true if test suite failed - my $extra_warnings= []; # Warnings found during server shutdowns + # Report test status + mtr_report_test($result); - my $completed= []; - my %running; - my $result; - my $exe_mysqld= find_mysqld($bindir) || ""; # Used as hint to CoreDump + if ( $result->is_failed() ) { - my $suite_timeout= start_timer(suite_timeout()); + # Save the workers "savedir" in var/log + my $worker_savedir= $result->{savedir}; + my $worker_savename= basename($worker_savedir); + my $savedir= "$opt_vardir/log/$worker_savename"; + + # Move any core files from e.g. mysqltest + foreach my $coref (glob("core*"), glob("*.dmp")) + { + mtr_report(" - found '$coref', moving it to '$worker_savedir'"); + move($coref, $worker_savedir); + } + + find( + { + no_chdir => 1, + wanted => sub + { + My::CoreDump::core_wanted(\$num_saved_cores, + $opt_max_save_core, + @opt_cases == 0, + $exe_mysqld, $opt_parallel); + } + }, + $worker_savedir); + + if ($num_saved_datadir >= $opt_max_save_datadir) + { + mtr_report(" - skipping '$worker_savedir/'"); + main::rmtree($worker_savedir); + } + else + { + mtr_report(" - saving '$worker_savedir/' to '$savedir/'"); + rename($worker_savedir, $savedir); + $num_saved_datadir++; + } + main::resfile_print_test(); + $num_failed_test++ unless ($result->{retries} || + $result->{exp_fail}); + + $test_failure= 1; + if ( !$opt_force ) { + # Test has failed, force is off + push(@$completed, $result); + if ($result->{'dont_kill_server'}) + { + mtr_verbose2("${line}: saying BYE to ". $names{$sock}); + print $sock "BYE\n"; + return 1; + } + return ["Failure", 1, $completed, $extra_warnings]; + } + elsif ($opt_max_test_fail > 0 and + $num_failed_test >= $opt_max_test_fail) { + push(@$completed, $result); + mtr_report("Too many tests($num_failed_test) failed!", + "Terminating..."); + return ["Too many failed", 1, $completed, $extra_warnings]; + } + } + + main::resfile_print_test(); + # Retry test run after test failure + my $retries= $result->{retries} || 2; + my $test_has_failed= $result->{failures} || 0; + if ($test_has_failed and $retries <= $opt_retry){ + # Test should be run one more time unless it has failed + # too many times already + my $tname= $result->{name}; + my $failures= $result->{failures}; + if ($opt_retry > 1 and $failures >= $opt_retry_failure){ + mtr_report("\nTest $tname has failed $failures times,", + "no more retries!\n"); + } + else { + mtr_report("\nRetrying test $tname, ". + "attempt($retries/$opt_retry)...\n"); + #saving the log file as filename.failed in case of retry + if ( $result->is_failed() ) { + my $worker_logdir= $result->{savedir}; + my $log_file_name=dirname($worker_logdir)."/".$result->{shortname}.".log"; + + if (-e $log_file_name) { + $result->{'logfile-failed'} = mtr_lastlinesfromfile($log_file_name, 20); + } else { + $result->{'logfile-failed'} = ""; + } + + rename $log_file_name, $log_file_name.".failed"; + } + { + local @$result{'retries', 'result'}; + delete $result->{result}; + $result->{retries}= $retries+1; + $result->write_test($sock, 'TESTCASE'); + } + push(@$completed, $result); + return 1; + } + } + + # Repeat test $opt_repeat number of times + my $repeat= $result->{repeat} || 1; + if ($repeat < $opt_repeat) + { + $result->{retries}= 0; + $result->{rep_failures}++ if $result->{failures}; + $result->{failures}= 0; + delete($result->{result}); + $result->{repeat}= $repeat+1; + $result->write_test($sock, 'TESTCASE'); + return 1; + } + + # Remove from list of running + mtr_error("'", $result->{name},"' is not known to be running") + unless delete $running{$result->key()}; + + # Save result in completed list + push(@$completed, $result); + + } # if ($line eq 'TESTRESULT') + elsif ($line=~ /^START (.*)$/){ + # Send first test + $names{$sock}= $1; + } + elsif ($line eq 'WARNINGS'){ + my $fake_test= My::Test::read_test($sock); + my $test_list= join (" ", @{$fake_test->{testnames}}); + push @$extra_warnings, $test_list; + my $report= $fake_test->{'warnings'}; + mtr_report("***Warnings generated in error logs during shutdown ". + "after running tests: $test_list\n\n$report"); + $test_failure= 1; + if ( !$opt_force ) { + # Test failure due to warnings, force is off + mtr_verbose2("Socket loop exiting 3"); + return ["Warnings in log", 1, $completed, $extra_warnings]; + } + return 1; + } + elsif ($line =~ /^SPENT/) { + main::add_total_times($line); + } + elsif ($line eq 'VALGREP' && $opt_valgrind) { + $valgrind_reports= 1; + } + else { + mtr_error("Unknown response: '$line' from client"); + } + return 0; +} + +sub run ($$$) { + my ($server, $tests, $children) = @_; + my $suite_timeout= main::start_timer(main::suite_timeout()); + $exe_mysqld= main::find_mysqld($bindir) || ""; # Used as hint to CoreDump + $num_saved_datadir= 0; # Number of datadirs saved in vardir/log/ so far. + $num_failed_test= 0; # Number of tests failed so far + $test_failure= 0; # Set true if test suite failed + $extra_warnings= []; # Warnings found during server shutdowns + $completed= []; my $s= IO::Select->new(); my $childs= 0; - my %names; + $s->add($server); while (1) { if ($opt_stop_file) @@ -587,16 +768,16 @@ ($$$) if (mtr_wait_lock_file($opt_stop_file, $opt_stop_keep_alive)) { # We were waiting so restart timer process - my $suite_timeout= start_timer(suite_timeout()); + my $suite_timeout= main::start_timer(main::suite_timeout()); } } - mark_time_used('admin'); + main::mark_time_used('admin'); my @ready = $s->can_read(1); # Wake up once every second if (@ready > 0) { mtr_verbose2("Got ". (0 + @ready). " connection(s)"); } - mark_time_idle(); + main::mark_time_idle(); my $i= 0; foreach my $sock (@ready) { ++$i; @@ -619,163 +800,14 @@ ($$$) next; } chomp($line); + mtr_verbose2("Connection ${i}". (exists $names{$sock} ? " from $names{$sock}" : "") .": $line"); - if ($line eq 'TESTRESULT'){ - $result= My::Test::read_test($sock); - - # Report test status - mtr_report_test($result); - - if ( $result->is_failed() ) { - - # Save the workers "savedir" in var/log - my $worker_savedir= $result->{savedir}; - my $worker_savename= basename($worker_savedir); - my $savedir= "$opt_vardir/log/$worker_savename"; - - # Move any core files from e.g. mysqltest - foreach my $coref (glob("core*"), glob("*.dmp")) - { - mtr_report(" - found '$coref', moving it to '$worker_savedir'"); - move($coref, $worker_savedir); - } - - find( - { - no_chdir => 1, - wanted => sub - { - My::CoreDump::core_wanted(\$num_saved_cores, - $opt_max_save_core, - @opt_cases == 0, - $exe_mysqld, $opt_parallel); - } - }, - $worker_savedir); - - if ($num_saved_datadir >= $opt_max_save_datadir) - { - mtr_report(" - skipping '$worker_savedir/'"); - rmtree($worker_savedir); - } - else - { - mtr_report(" - saving '$worker_savedir/' to '$savedir/'"); - rename($worker_savedir, $savedir); - $num_saved_datadir++; - } - resfile_print_test(); - $num_failed_test++ unless ($result->{retries} || - $result->{exp_fail}); - - $test_failure= 1; - if ( !$opt_force ) { - # Test has failed, force is off - push(@$completed, $result); - if ($result->{'dont_kill_server'}) - { - print $sock "BYE\n"; - next; - } - return ("Failure", 1, $completed, $extra_warnings); - } - elsif ($opt_max_test_fail > 0 and - $num_failed_test >= $opt_max_test_fail) { - push(@$completed, $result); - mtr_report("Too many tests($num_failed_test) failed!", - "Terminating..."); - return ("Too many failed", 1, $completed, $extra_warnings); - } - } - - resfile_print_test(); - # Retry test run after test failure - my $retries= $result->{retries} || 2; - my $test_has_failed= $result->{failures} || 0; - if ($test_has_failed and $retries <= $opt_retry){ - # Test should be run one more time unless it has failed - # too many times already - my $tname= $result->{name}; - my $failures= $result->{failures}; - if ($opt_retry > 1 and $failures >= $opt_retry_failure){ - mtr_report("\nTest $tname has failed $failures times,", - "no more retries!\n"); - } - else { - mtr_report("\nRetrying test $tname, ". - "attempt($retries/$opt_retry)...\n"); - #saving the log file as filename.failed in case of retry - if ( $result->is_failed() ) { - my $worker_logdir= $result->{savedir}; - my $log_file_name=dirname($worker_logdir)."/".$result->{shortname}.".log"; - - if (-e $log_file_name) { - $result->{'logfile-failed'} = mtr_lastlinesfromfile($log_file_name, 20); - } else { - $result->{'logfile-failed'} = ""; - } - - rename $log_file_name, $log_file_name.".failed"; - } - { - local @$result{'retries', 'result'}; - delete $result->{result}; - $result->{retries}= $retries+1; - $result->write_test($sock, 'TESTCASE'); - } - push(@$completed, $result); - next; - } - } - - # Repeat test $opt_repeat number of times - my $repeat= $result->{repeat} || 1; - if ($repeat < $opt_repeat) - { - $result->{retries}= 0; - $result->{rep_failures}++ if $result->{failures}; - $result->{failures}= 0; - delete($result->{result}); - $result->{repeat}= $repeat+1; - $result->write_test($sock, 'TESTCASE'); - next; - } - - # Remove from list of running - mtr_error("'", $result->{name},"' is not known to be running") - unless delete $running{$result->key()}; - - # Save result in completed list - push(@$completed, $result); - - } - elsif ($line=~ /^START (.*)$/){ - # Send first test - $names{$sock}= $1; - } - elsif ($line eq 'WARNINGS'){ - my $fake_test= My::Test::read_test($sock); - my $test_list= join (" ", @{$fake_test->{testnames}}); - push @$extra_warnings, $test_list; - my $report= $fake_test->{'warnings'}; - mtr_report("***Warnings generated in error logs during shutdown ". - "after running tests: $test_list\n\n$report"); - $test_failure= 1; - if ( !$opt_force ) { - # Test failure due to warnings, force is off - return ("Warnings in log", 1, $completed, $extra_warnings); - } - next; + my $res= parse_protocol($sock, $line); + if (ref $res eq 'ARRAY') { + return @$res; + } elsif ($res == 1) { + next; } - elsif ($line =~ /^SPENT/) { - add_total_times($line); - } - elsif ($line eq 'VALGREP' && $opt_valgrind) { - $valgrind_reports= 1; - } - else { - mtr_error("Unknown response: '$line' from client"); - } # Find next test to schedule # - Try to use same configuration as worker used last time @@ -788,7 +820,7 @@ ($$$) last unless defined $t; - if (run_testcase_check_skip_test($t)){ + if (main::run_testcase_check_skip_test($t)){ # Move the test to completed list #mtr_report("skip - Moving test $i to completed"); push(@$completed, splice(@$tests, $i, 1)); @@ -889,7 +921,7 @@ ($$$) # ---------------------------------------------------- # Check if test suite timer expired # ---------------------------------------------------- - if ( has_expired($suite_timeout) ) + if ( main::has_expired($suite_timeout) ) { mtr_report("Test suite timeout! Terminating..."); return ("Timeout", 1, $completed, $extra_warnings); @@ -897,6 +929,9 @@ ($$$) } } +1; + +package main; sub run_worker ($) { my ($server_port, $thread_num)= @_; From 4ed583031abc47bf990ba09ff3dcbedbde77d2a7 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Wed, 9 Aug 2023 20:11:06 +0300 Subject: [PATCH 035/165] MDEV-30836 MTR Cygwin subshell wrapper fix See "Path-style conflict" in "MDEV-30836 MTR Cygwin fix" for explanation. To install subshell fix use --cygwin-subshell-fix=do To uninstall use --cygwin-subshell-fix=remove This works only from Cygwin environment. As long as perl on PATH is from Cygwin you are on Cygwin environment. Check it with perl --version This is perl 5, version 36, subversion 1 (v5.36.1) built for x86_64-cygwin-threads-multi --- mysql-test/lib/My/Platform.pm | 66 +++++++++++++++++++++++++++++++++++ mysql-test/mysql-test-run.pl | 6 +++- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/mysql-test/lib/My/Platform.pm b/mysql-test/lib/My/Platform.pm index b8bc9f8ec846a..9660366db7add 100644 --- a/mysql-test/lib/My/Platform.pm +++ b/mysql-test/lib/My/Platform.pm @@ -20,6 +20,7 @@ package My::Platform; use strict; use File::Basename; use File::Path; +use Carp; use base qw(Exporter); our @EXPORT= qw(IS_CYGWIN IS_WINDOWS IS_WIN32PERL IS_AIX @@ -219,4 +220,69 @@ sub open_for_append } +sub check_cygwin_subshell +{ + # Only pipe (or sh-expansion) is fed to /bin/sh + my $out= `echo %comspec%|cat`; + return ($out =~ /\bcmd.exe\b/) ? 0 : 1; +} + +sub install_shell_wrapper() +{ + system("rm -f /bin/sh.exe") and die $!; + my $wrapper= <<'EOF'; +#!/bin/bash +if [[ -n "$MTR_PERL" && "$1" = "-c" ]]; then + shift + exec $(cygpath -m "$COMSPEC") /C "$@" +fi +exec /bin/bash "$@" +EOF + open(OUT, '>', "/bin/sh") or die "/bin/sh: $!\n"; + print OUT $wrapper; + close(OUT); + system("chmod +x /bin/sh") and die $!; + print "Cygwin subshell wrapper /bin/sh was installed, please restart MTR!\n"; + exit(0); +} + +sub uninstall_shell_wrapper() +{ + system("rm -f /bin/sh") and die $!; + system("cp /bin/bash.exe /bin/sh.exe") and die $!; +} + +sub cygwin_subshell_fix +{ + my ($opt_name, $opt_value)= @_; + if ($opt_name ne "cygwin-subshell-fix") { + confess "Wrong option name: ${opt_name}"; + } + if ($opt_value eq "do") { + if (check_cygwin_subshell()) { + install_shell_wrapper(); + } else { + print "Cygwin subshell fix was already installed, skipping...\n"; + } + } elsif ($opt_value eq "remove") { + if (check_cygwin_subshell()) { + print "Cygwin subshell fix was already uninstalled, skipping...\n"; + } else { + uninstall_shell_wrapper(); + } + } else { + die "Wrong --cygwin-subshell-fix value: ${opt_value} (expected do/remove)"; + } +} + +sub options +{ + if (IS_CYGWIN) { + return ('cygwin-subshell-fix=s' => \&cygwin_subshell_fix); + } else { + return (); + } +} + + 1; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 94aa6bd098b17..e6fe3cae5f034 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1237,7 +1237,8 @@ sub command_line_setup { 'xml-report=s' => \$opt_xml_report, My::Debugger::options(), - My::CoreDump::options() + My::CoreDump::options(), + My::Platform::options() ); # fix options (that take an optional argument and *only* after = sign @@ -1273,6 +1274,9 @@ sub command_line_setup { } if (IS_CYGWIN) { + if (My::Platform::check_cygwin_subshell()) { + die("Cygwin /bin/sh subshell requires fix with --cygwin-subshell-fix=do\n"); + } # Use mixed path format i.e c:/path/to/ $glob_mysql_test_dir= mixed_path($glob_mysql_test_dir); } From 640cd404af0891e776b3d4c9b65c301691ef1f69 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Mon, 7 Aug 2023 22:33:58 +0300 Subject: [PATCH 036/165] MDEV-30836 MTR Cygwin fix Cygwin is more Unix-oriented. It does not treat \n as \r\n in regexps (fixed by \R), it supplies Unix-style paths (fixed by mixed_path()). It does some cleanup on paths when running exe, so it will be different in exe output (like with $exe_mysqld, comparing basename() is enough). Cygwin installation 1. Just install latest perl version (only base package) and patchutils from cygwin-setup; 2. Don't forget to add c:\cygwin64\bin into system path before any other perl flavors; 3. There is path-style conflict (see below), you must replace c:\cygwin64\bin\sh.exe with the wrapper. Run MTR with --cygwin-subshell-fix=do for that. Make sure you are running Cygwin perl for the option to work. 4. Restart buildbot via net stop buildbot; net start buildbot Path-style conflict of Cygwin-ish Perl Some exe paths are passed to mysqltest which are executed by a native call. This requires native-style paths (\-style). These exe paths also executed by Perl itself. Either by MTR itself which is not so critical, but also by tests' --perl blocks which is impossible to change. And if Perl detects shell-expansion or uses pipe command it passess this exe path to /bin/sh which is Cygwin-compiled bash that cannot work with \-style (or at least in -c processing). Thus we require \-style on some parts of MTR execution and /-style on another parts. The examples of tests which cover these different parts are: main.mysqlbinlog_row_compressed \ main.sp_trans_log That could be great to force Perl to use something different from /bin/sh, but unfortunately /bin/sh is compiled-in into binary. So the only solution left is to overwrite /bin/sh with some wrapper script which passes the command to cmd.exe instead of bash. --- mysql-test/lib/My/Platform.pm | 9 +++++++-- mysql-test/mysql-test-run.pl | 13 +++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/mysql-test/lib/My/Platform.pm b/mysql-test/lib/My/Platform.pm index 9660366db7add..15a08e3891407 100644 --- a/mysql-test/lib/My/Platform.pm +++ b/mysql-test/lib/My/Platform.pm @@ -96,8 +96,13 @@ sub mixed_path { sub native_path { my ($path)= @_; - $path=~ s/\//\\/g - if (IS_CYGWIN or IS_WIN32PERL); + if (IS_CYGWIN) { + # \\\\ protects against 2 expansions (just for the case) + $path=~ s/\/+|\\+/\\\\\\\\/g; + } + elsif (IS_WINDOWS) { + $path=~ s/\/+/\\/g; + } return $path; } diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index e6fe3cae5f034..3c11c0ee4cfc5 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -343,7 +343,7 @@ END main(); sub main { - $ENV{MTR_PERL}=$^X; + $ENV{MTR_PERL}= mixed_path($^X); # Default, verbosity on report_option('verbose', 0); @@ -1758,11 +1758,12 @@ sub collect_mysqld_features { # to simplify the parsing, we'll merge all nicely formatted --help texts $list =~ s/\n {22}(\S)/ $1/g; - my @list= split '\n', $list; + my @list= split '\R', $list; $mysql_version_id= 0; + my $exe= basename($exe_mysqld); while (defined(my $line = shift @list)){ - if ($line =~ /^\Q$exe_mysqld\E\s+Ver\s(\d+)\.(\d+)\.(\d+)(\S*)/ ) { + if ($line =~ /\W\Q$exe\E\s+Ver\s(\d+)\.(\d+)\.(\d+)(\S*)/ ) { $mysql_version_id= $1*10000 + $2*100 + $3; mtr_report("MariaDB Version $1.$2.$3$4"); last; @@ -1786,7 +1787,7 @@ sub collect_mysqld_features { next; } - last if /^$/; # then goes a list of variables, it ends with an empty line + last if /^\r?$/; # then goes a list of variables, it ends with an empty line # Put a variable into hash /^([\S]+)[ \t]+(.*?)\r?$/ or die "Could not parse mysqld --help: $_\n"; @@ -1817,7 +1818,7 @@ () my $list = `$cmd` or mtr_error("Could not connect to extern server using command: '$cmd'"); - foreach my $line (split('\n', $list )) + foreach my $line (split('\R', $list )) { # Put variables into hash if ( $line =~ /^([\S]+)[ \t]+(.*?)\r?$/ ) @@ -2939,7 +2940,7 @@ sub initialize_servers { # sub sql_to_bootstrap { my ($sql) = @_; - my @lines= split(/\n/, $sql); + my @lines= split(/\R/, $sql); my $result= "\n"; my $delimiter= ';'; From 848b3af816872725d9b5332b26783d8284e653df Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Tue, 8 Aug 2023 02:14:55 +0300 Subject: [PATCH 037/165] MDEV-30836 MTR MSYS2 fix attempt MSYS2 is basically Cygwin, except it has more easy installation (but with tools which are not used) and it has some more control of path conversion via MSYS2_ARG_CONV_EXCL and MSYS2_ENV_CONV_EXCL. So it should be more Windows-friendly than Cygwin. Installation Similar to Cygwin, except installing patch requires additional command run from shell: pacman -S patch MSYS2 still doesn't work as it returns wierd "Bad address" when exec-ing forked process from create_process(). Same exec from standalone perl -e runs just fine... :( --- mysql-test/lib/My/Platform.pm | 8 +++++++- mysql-test/lib/My/SafeProcess.pm | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/mysql-test/lib/My/Platform.pm b/mysql-test/lib/My/Platform.pm index 15a08e3891407..2b32ef87b8196 100644 --- a/mysql-test/lib/My/Platform.pm +++ b/mysql-test/lib/My/Platform.pm @@ -23,7 +23,7 @@ use File::Path; use Carp; use base qw(Exporter); -our @EXPORT= qw(IS_CYGWIN IS_WINDOWS IS_WIN32PERL IS_AIX +our @EXPORT= qw(IS_CYGWIN IS_MSYS IS_WINDOWS IS_WIN32PERL IS_AIX native_path posix_path mixed_path check_socket_path_length process_alive open_for_append); @@ -34,9 +34,15 @@ BEGIN { die "Could not execute 'cygpath': $!"; } eval 'sub IS_CYGWIN { 1 }'; + eval 'sub IS_MSYS { 0 }'; + } + elsif ($^O eq "msys") { + eval 'sub IS_CYGWIN { 1 }'; + eval 'sub IS_MSYS { 1 }'; } else { eval 'sub IS_CYGWIN { 0 }'; + eval 'sub IS_MSYS { 0 }'; } if ($^O eq "MSWin32") { eval 'sub IS_WIN32PERL { 1 }'; diff --git a/mysql-test/lib/My/SafeProcess.pm b/mysql-test/lib/My/SafeProcess.pm index 69033649b46cb..e3b46b3709ea2 100644 --- a/mysql-test/lib/My/SafeProcess.pm +++ b/mysql-test/lib/My/SafeProcess.pm @@ -102,7 +102,7 @@ else # Find the safe process binary or script sub find_bin { - if (IS_WIN32PERL or IS_CYGWIN) + if (IS_WINDOWS) { # Use my_safe_process.exe my $exe= my_find_bin($bindir, ["lib/My/SafeProcess", "My/SafeProcess"], From a49b9314c16fe60b6056b50405d28eff69eed868 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Mon, 7 Aug 2023 18:49:47 +0300 Subject: [PATCH 038/165] MDEV-30836 MTR hangs after tests have completed The problem is in manager/worker communication when worker sends WARNINGS and then TESTRESULT. If manager yet didn't read WARNINGS response both responses get into the same buffer, can_read() will indicate we have data only once and we must read all the data from the socket at once. Otherwise TESTRESULT response is lost and manager waits it forever. The fix now instead of single line reads the socket in a loop. But if there is only one response in the buffer the second read will be blocked waiting until new data arrives. That can be overcame by blocking(0) which sets the handle into non-blocking mode. If there is no data second read just returns undef. The problem is non-blocking mode is not supported by all perl flavors on Windows. Strawberry and ActiveState do not support it. Cygwin and MSYS2 do support. There is some ioctl() hack that was known to "work" but it doesn't do what is expected (it does not return data when there is data). So for Windows if it is not Cygwin we disable the fix. --- mysql-test/mysql-test-run.pl | 56 ++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 3c11c0ee4cfc5..40b7a051735f4 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -643,7 +643,7 @@ ($$) { mtr_verbose2("${line}: saying BYE to ". $names{$sock}); print $sock "BYE\n"; - return 1; + return 2; } return ["Failure", 1, $completed, $extra_warnings]; } @@ -692,7 +692,7 @@ ($$) $result->write_test($sock, 'TESTCASE'); } push(@$completed, $result); - return 1; + return 2; } } @@ -706,7 +706,7 @@ ($$) delete($result->{result}); $result->{repeat}= $repeat+1; $result->write_test($sock, 'TESTCASE'); - return 1; + return 2; } # Remove from list of running @@ -779,7 +779,7 @@ ($$$) } main::mark_time_idle(); my $i= 0; - foreach my $sock (@ready) { + sock_loop: foreach my $sock (@ready) { ++$i; if ($sock == $server) { # New client connected @@ -790,23 +790,37 @@ ($$$) print $child "HELLO\n"; } else { - my $line= <$sock>; - if (!defined $line) { - # Client disconnected - --$childs; - mtr_verbose2((exists $names{$sock} ? $names{$sock} : "Worker"). " closed socket (left ${childs} childs)"); - $s->remove($sock); - $sock->close; - next; - } - chomp($line); - mtr_verbose2("Connection ${i}". (exists $names{$sock} ? " from $names{$sock}" : "") .": $line"); - - my $res= parse_protocol($sock, $line); - if (ref $res eq 'ARRAY') { - return @$res; - } elsif ($res == 1) { - next; + my $j= 0; + $sock->blocking(0); + while (my $line= <$sock>) { + ++$j; + chomp($line); + mtr_verbose2("Connection ${i}.${j}". (exists $names{$sock} ? " from $names{$sock}" : "") .": $line"); + + $sock->blocking(1); + my $res= parse_protocol($sock, $line); + $sock->blocking(0); + if (ref $res eq 'ARRAY') { + return @$res; + } elsif ($res == 1) { + next; + } elsif ($res == 2) { + next sock_loop; + } + if (IS_WINDOWS and !IS_CYGWIN) { + # Strawberry and ActiveState don't support blocking(0), the next iteration will be blocked! + # If there is next response now in the buffer and it is TESTRESULT we are affected by MDEV-30836 and the manager will hang. + last; + } + } + $sock->blocking(1); + if ($j == 0) { + # Client disconnected + --$childs; + mtr_verbose2((exists $names{$sock} ? $names{$sock} : "Worker"). " closed socket (left ${childs} childs)"); + $s->remove($sock); + $sock->close; + next; } # Find next test to schedule From b08474435fd1ba28cbd301948aa5554a31e06a07 Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 4 Sep 2023 20:47:38 +0300 Subject: [PATCH 039/165] Fix compression tests for s390x The problem is that s390x is not using the default bzip library we use on other platforms, which causes compressed string lengths to be differnt than what mtr tests expects. Fixed by: - Added have_normal_bzip.inc, which checks if compress() returns the expected length. - Adjust the results to match the expected one - main.func_compress.test & archive.archive - Don't print lengths that depends on compression library - mysqlbinlog compress tests & connect.zip - Don't print DATA_LENGTH for SET column_compression_zlib_level=1 - main.column_compression --- mysql-test/include/have_normal_bzip.inc | 9 +++++ mysql-test/main/column_compression.inc | 3 +- mysql-test/main/column_compression.result | 36 +++++++++---------- mysql-test/main/column_compression.test | 1 + mysql-test/main/column_compression_rpl.test | 1 + mysql-test/main/func_compress.test | 1 + .../main/mysqlbinlog_row_compressed.test | 1 + .../main/mysqlbinlog_stmt_compressed.test | 2 +- mysql-test/suite/archive/archive.test | 2 ++ .../compat/oracle/t/column_compression.test | 1 + .../t/row_size_error_log_warnings_3.test | 1 + .../connect/mysql-test/connect/r/zip.result | 22 ++++++------ storage/connect/mysql-test/connect/t/zip.test | 7 ++-- 13 files changed, 52 insertions(+), 35 deletions(-) create mode 100644 mysql-test/include/have_normal_bzip.inc diff --git a/mysql-test/include/have_normal_bzip.inc b/mysql-test/include/have_normal_bzip.inc new file mode 100644 index 0000000000000..36c06274398c8 --- /dev/null +++ b/mysql-test/include/have_normal_bzip.inc @@ -0,0 +1,9 @@ +--source include/have_compress.inc + +# Test that the system is using the default/standard bzip library. +# If not, we have to skip the test as the compression lengths displayed +# in the test will not match the results from used compression library. + +if (`select length(COMPRESS(space(5000))) != 33`) { + skip Test skipped as standard bzip is needed; +} diff --git a/mysql-test/main/column_compression.inc b/mysql-test/main/column_compression.inc index 13952b739ae8e..27e5fc70837fe 100644 --- a/mysql-test/main/column_compression.inc +++ b/mysql-test/main/column_compression.inc @@ -70,7 +70,8 @@ TRUNCATE TABLE t1; SET column_compression_zlib_level= 1; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_compressions', 'Column_decompressions'); -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +# This is is using < as DATA_LENGTH produces different results on s390x-ubuntu-2004 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; diff --git a/mysql-test/main/column_compression.result b/mysql-test/main/column_compression.result index 12d2629e40465..da6dfdc51f4a8 100644 --- a/mysql-test/main/column_compression.result +++ b/mysql-test/main/column_compression.result @@ -133,9 +133,9 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_c VARIABLE_NAME VARIABLE_VALUE COLUMN_COMPRESSIONS 3 COLUMN_DECOMPRESSIONS 12 -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; -DATA_LENGTH -40 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +DATA_LENGTH < 100 +1 TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); @@ -348,9 +348,9 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_c VARIABLE_NAME VARIABLE_VALUE COLUMN_COMPRESSIONS 3 COLUMN_DECOMPRESSIONS 12 -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; -DATA_LENGTH -40 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +DATA_LENGTH < 100 +1 TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); @@ -563,9 +563,9 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_c VARIABLE_NAME VARIABLE_VALUE COLUMN_COMPRESSIONS 3 COLUMN_DECOMPRESSIONS 12 -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; -DATA_LENGTH -40 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +DATA_LENGTH < 100 +1 TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); @@ -778,9 +778,9 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_c VARIABLE_NAME VARIABLE_VALUE COLUMN_COMPRESSIONS 3 COLUMN_DECOMPRESSIONS 12 -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; -DATA_LENGTH -40 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +DATA_LENGTH < 100 +1 TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); @@ -993,9 +993,9 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_c VARIABLE_NAME VARIABLE_VALUE COLUMN_COMPRESSIONS 3 COLUMN_DECOMPRESSIONS 12 -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; -DATA_LENGTH -60 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +DATA_LENGTH < 100 +1 TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); @@ -1209,9 +1209,9 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN('Column_c VARIABLE_NAME VARIABLE_VALUE COLUMN_COMPRESSIONS 3 COLUMN_DECOMPRESSIONS 12 -SELECT DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; -DATA_LENGTH -36 +SELECT DATA_LENGTH < 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; +DATA_LENGTH < 100 +1 TRUNCATE TABLE t1; SET column_compression_zlib_level= 9; INSERT INTO t1 VALUES(REPEAT('ab', 1000)); diff --git a/mysql-test/main/column_compression.test b/mysql-test/main/column_compression.test index 0a0796532b415..c628e8f9cca86 100644 --- a/mysql-test/main/column_compression.test +++ b/mysql-test/main/column_compression.test @@ -1,5 +1,6 @@ --source include/have_innodb.inc --source include/have_csv.inc +--source include/have_normal_bzip.inc let $MYSQLD_DATADIR= `select @@datadir`; diff --git a/mysql-test/main/column_compression_rpl.test b/mysql-test/main/column_compression_rpl.test index 86c73a77dbd53..df8e889016b8b 100644 --- a/mysql-test/main/column_compression_rpl.test +++ b/mysql-test/main/column_compression_rpl.test @@ -1,5 +1,6 @@ --source include/have_innodb.inc --source include/master-slave.inc +--source include/have_normal_bzip.inc --let $engine_type= myisam --let $engine_type2= innodb diff --git a/mysql-test/main/func_compress.test b/mysql-test/main/func_compress.test index f41a58981548a..221dddd59f558 100644 --- a/mysql-test/main/func_compress.test +++ b/mysql-test/main/func_compress.test @@ -1,4 +1,5 @@ -- source include/have_compress.inc +-- source include/have_normal_bzip.inc # # Test for compress and uncompress functions: # diff --git a/mysql-test/main/mysqlbinlog_row_compressed.test b/mysql-test/main/mysqlbinlog_row_compressed.test index 6b62a42c44923..f28068618304c 100644 --- a/mysql-test/main/mysqlbinlog_row_compressed.test +++ b/mysql-test/main/mysqlbinlog_row_compressed.test @@ -4,6 +4,7 @@ --source include/have_log_bin.inc --source include/have_binlog_format_row.inc +--source include/have_normal_bzip.inc # # diff --git a/mysql-test/main/mysqlbinlog_stmt_compressed.test b/mysql-test/main/mysqlbinlog_stmt_compressed.test index 613a820d3eaff..400f21f5ab926 100644 --- a/mysql-test/main/mysqlbinlog_stmt_compressed.test +++ b/mysql-test/main/mysqlbinlog_stmt_compressed.test @@ -4,7 +4,7 @@ --source include/have_log_bin.inc --source include/have_binlog_format_statement.inc - +--source include/have_normal_bzip.inc # # # mysqlbinlog: compressed query event diff --git a/mysql-test/suite/archive/archive.test b/mysql-test/suite/archive/archive.test index 2d184110a2df9..38bee206ef27a 100644 --- a/mysql-test/suite/archive/archive.test +++ b/mysql-test/suite/archive/archive.test @@ -1612,6 +1612,8 @@ CREATE TABLE t1(a INT, b BLOB) ENGINE=archive; SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; INSERT INTO t1 VALUES(1, 'sampleblob1'),(2, 'sampleblob2'); +# Compression length depends on zip library +--replace_result 583 584 291 292 SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/t/column_compression.test b/mysql-test/suite/compat/oracle/t/column_compression.test index 6fcdd11989053..01d4977ba961f 100644 --- a/mysql-test/suite/compat/oracle/t/column_compression.test +++ b/mysql-test/suite/compat/oracle/t/column_compression.test @@ -1,5 +1,6 @@ --source include/have_innodb.inc --source include/have_csv.inc +--source include/have_normal_bzip.inc SET sql_mode=ORACLE; diff --git a/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test b/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test index 39694c05e0f38..dab9bcfa86429 100644 --- a/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test +++ b/mysql-test/suite/innodb/t/row_size_error_log_warnings_3.test @@ -1,6 +1,7 @@ --source include/have_innodb.inc --source include/have_sequence.inc --source include/innodb_page_size_small.inc +--source include/have_normal_bzip.inc call mtr.add_suppression("InnoDB: Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result index c696252ca436c..3505add9adde6 100644 --- a/storage/connect/mysql-test/connect/r/zip.result +++ b/storage/connect/mysql-test/connect/r/zip.result @@ -70,10 +70,10 @@ cmpsize BIGINT NOT NULL FLAG=1, uncsize BIGINT NOT NULL FLAG=2, method INT NOT NULL FLAG=3) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newdos.zip'; -SELECT * FROM t4; -fn cmpsize uncsize method -new1.dos 67 79 8 -new2.dos 77 112 8 +SELECT fn, uncsize, method FROM t4; +fn uncsize method +new1.dos 79 8 +new2.dos 112 8 DROP TABLE t1,t2,t3,t4; # # Testing zipped CSV tables @@ -161,10 +161,10 @@ cmpsize BIGINT NOT NULL FLAG=1, uncsize BIGINT NOT NULL FLAG=2, method INT NOT NULL FLAG=3) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newcsv.zip'; -SELECT * FROM t4; -fn cmpsize uncsize method -new1.csv 79 83 8 -new2.csv 94 125 8 +SELECT fn,uncsize,method FROM t4; +fn uncsize method +new1.csv 83 8 +new2.csv 125 8 DROP TABLE t1,t2,t3,t4; # # Testing zipped JSON tables @@ -234,7 +234,7 @@ cmpsize BIGINT NOT NULL FLAG=1, uncsize BIGINT NOT NULL FLAG=2, method INT NOT NULL FLAG=3) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='bios.zip'; -SELECT * FROM t4; -fn cmpsize uncsize method -bios.json 1096 6848 8 +SELECT fn,uncsize,method FROM t4; +fn uncsize method +bios.json 6848 8 DROP TABLE t1,t2,t3,t4; diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test index 1f0a4eedee961..abef98498a099 100644 --- a/storage/connect/mysql-test/connect/t/zip.test +++ b/storage/connect/mysql-test/connect/t/zip.test @@ -35,7 +35,7 @@ cmpsize BIGINT NOT NULL FLAG=1, uncsize BIGINT NOT NULL FLAG=2, method INT NOT NULL FLAG=3) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newdos.zip'; -SELECT * FROM t4; +SELECT fn, uncsize, method FROM t4; DROP TABLE t1,t2,t3,t4; --echo # @@ -75,7 +75,7 @@ cmpsize BIGINT NOT NULL FLAG=1, uncsize BIGINT NOT NULL FLAG=2, method INT NOT NULL FLAG=3) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newcsv.zip'; -SELECT * FROM t4; +SELECT fn,uncsize,method FROM t4; DROP TABLE t1,t2,t3,t4; --echo # @@ -123,7 +123,7 @@ cmpsize BIGINT NOT NULL FLAG=1, uncsize BIGINT NOT NULL FLAG=2, method INT NOT NULL FLAG=3) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='bios.zip'; -SELECT * FROM t4; +SELECT fn,uncsize,method FROM t4; DROP TABLE t1,t2,t3,t4; # @@ -133,4 +133,3 @@ DROP TABLE t1,t2,t3,t4; --remove_file $MYSQLD_DATADIR/test/newcsv.zip --remove_file $MYSQLD_DATADIR/test/bios.zip --remove_file $MYSQLD_DATADIR/test/bios.json - From 182a08a8a39c5cd864dd9d66a688adc180d63b45 Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 4 Sep 2023 09:59:18 +0300 Subject: [PATCH 040/165] Removed compiler warning from connect/filamdbf.cpp --- storage/connect/filamdbf.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 5c4f17acc27a1..e2f9069498950 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -643,7 +643,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g) { char c; int rc; - int len; + int len= 0; MODE mode = Tdbp->GetMode(); Buflen = Blksize; From f009c4da915888e0211447bbedb7e3640ec80935 Mon Sep 17 00:00:00 2001 From: Monty Date: Fri, 1 Sep 2023 11:24:42 +0300 Subject: [PATCH 041/165] Small corrections to MDEV-29693 ANALYZE TABLE 32 bit MariaDB crashed in innodb.innodb-16k and a few other tests. Fixed by using correct sizeof() calls. Histograms where not read if first read was without histograms. --- mysql-test/main/stat_tables_flush.result | 73 +++++++++++++++++++++++- mysql-test/main/stat_tables_flush.test | 24 +++++++- sql/sql_statistics.cc | 20 +++++-- sql/table.cc | 16 ++++++ sql/table.h | 1 + 5 files changed, 127 insertions(+), 7 deletions(-) diff --git a/mysql-test/main/stat_tables_flush.result b/mysql-test/main/stat_tables_flush.result index 9a88d5d388dc3..7c1fdc34e64ce 100644 --- a/mysql-test/main/stat_tables_flush.result +++ b/mysql-test/main/stat_tables_flush.result @@ -87,5 +87,76 @@ test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK drop table t1; # -# End of 10.5 tests +# Test that histograms are read after flush +# +create table t1 (a int); +insert into t1 select seq from seq_1_to_10; +insert into t1 select A.seq from seq_10_to_20 A, seq_1_to_9 B; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +explain format=json select * from t1 where a between 2 and 5; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 109, + "filtered": 5, + "attached_condition": "t1.a between 2 and 5" + } + } +} +explain format=json select * from t1 where a between 12 and 15; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 109, + "filtered": 33.59375, + "attached_condition": "t1.a between 12 and 15" + } + } +} +flush tables; +set @@optimizer_use_condition_selectivity=3; +explain format=json select * from t1 where a between 2 and 5; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 109, + "filtered": 15.78947353, + "attached_condition": "t1.a between 2 and 5" + } + } +} +set @@optimizer_use_condition_selectivity=4; +explain format=json select * from t1 where a between 2 and 5; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 109, + "filtered": 5, + "attached_condition": "t1.a between 2 and 5" + } + } +} +drop table t1; +set @@optimizer_use_condition_selectivity=default; +# +# End of 10.6 tests # diff --git a/mysql-test/main/stat_tables_flush.test b/mysql-test/main/stat_tables_flush.test index abbbb0d646735..4c916f47ad76a 100644 --- a/mysql-test/main/stat_tables_flush.test +++ b/mysql-test/main/stat_tables_flush.test @@ -46,5 +46,27 @@ analyze table t1 persistent for all; drop table t1; --echo # ---echo # End of 10.5 tests +--echo # Test that histograms are read after flush +--echo # + +create table t1 (a int); +insert into t1 select seq from seq_1_to_10; + +insert into t1 select A.seq from seq_10_to_20 A, seq_1_to_9 B; +analyze table t1 persistent for all; + +explain format=json select * from t1 where a between 2 and 5; +explain format=json select * from t1 where a between 12 and 15; + +flush tables; +set @@optimizer_use_condition_selectivity=3; +explain format=json select * from t1 where a between 2 and 5; +set @@optimizer_use_condition_selectivity=4; +explain format=json select * from t1 where a between 2 and 5; + +drop table t1; +set @@optimizer_use_condition_selectivity=default; + +--echo # +--echo # End of 10.6 tests --echo # diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index e09370f94c427..9351c1253b026 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -2102,7 +2102,7 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) &column_stats, sizeof(*column_stats) * fields, &index_stats, sizeof(*index_stats) * keys, &idx_avg_frequency, - sizeof(idx_avg_frequency) * key_parts, + sizeof(*idx_avg_frequency) * key_parts, &histogram, hist_size * fields, NullS)) DBUG_RETURN(1); @@ -2145,6 +2145,14 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) key_info->collected_stats->init_avg_frequency(idx_avg_frequency); idx_avg_frequency+= key_info->ext_key_parts; } + /* + idx_avg_frequency can be less than + table_stats->idx_avg_frequency + key_parts + in the case of LONG_UNIQUE_HASH_FIELD as these has a hidden + ext_key_part which is counted in table_share->ext_keyparts but not + in keyinfo->ext_key_parts. + */ + DBUG_ASSERT(idx_avg_frequency <= table_stats->idx_avg_frequency + key_parts); create_min_max_statistical_fields_for_table(thd, table); @@ -2208,7 +2216,7 @@ alloc_engine_independent_statistics(THD *thd, const TABLE_SHARE *table_share, &column_stats, sizeof(Column_statistics) * fields, &index_stats, sizeof(Index_statistics) * keys, &idx_avg_frequency, - sizeof(idx_avg_frequency) * key_parts, + sizeof(*idx_avg_frequency) * key_parts, NullS)) DBUG_RETURN(1); @@ -2232,6 +2240,7 @@ alloc_engine_independent_statistics(THD *thd, const TABLE_SHARE *table_share, index_stats->init_avg_frequency(idx_avg_frequency); idx_avg_frequency+= key_info->ext_key_parts; } + DBUG_ASSERT(idx_avg_frequency <= table_stats->idx_avg_frequency + key_parts); DBUG_RETURN(0); } @@ -2840,11 +2849,12 @@ read_statistics_for_table(THD *thd, TABLE *table, TABLE_SHARE *table_share= table->s; DBUG_ENTER("read_statistics_for_table"); - if (!force_reload && table_share->stats_cb) + if (!force_reload && table_share->stats_cb && + (!want_histograms || !table_share->histograms_exists())) { if (table->stats_cb == table_share->stats_cb) - DBUG_RETURN(table->stats_cb); // Use current - table->update_engine_independent_stats(); // Copy table_share->stats_cb + DBUG_RETURN(table->stats_cb); // Use current + table->update_engine_independent_stats(); // Copy table_share->stats_cb DBUG_RETURN(table->stats_cb); } diff --git a/sql/table.cc b/sql/table.cc index 4badef7d5a19f..53d0945dce978 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -4596,6 +4596,22 @@ TABLE_SHARE::update_engine_independent_stats(TABLE_STATISTICS_CB *new_stats) } +/* Check if we have statistics for histograms */ + +bool TABLE_SHARE::histograms_exists() +{ + bool res= 0; + if (stats_cb) + { + mysql_mutex_lock(&LOCK_share); + if (stats_cb) + res= stats_cb->histograms_exists(); + mysql_mutex_unlock(&LOCK_share); + } + return res; +} + + /* Free information allocated by openfrm diff --git a/sql/table.h b/sql/table.h index 382692cb7cade..b3144ca75d991 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1161,6 +1161,7 @@ struct TABLE_SHARE MEM_ROOT *mem_root, List *field_list) const; void update_engine_independent_stats(TABLE_STATISTICS_CB *stat); + bool histograms_exists(); }; /* not NULL, but cannot be dereferenced */ From 28f7725731b455e8860b176d4fea0c1bd2b8d46e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 2 Aug 2023 20:52:53 +0200 Subject: [PATCH 042/165] wolfssl: enable chacha cyphers and secure negotiation compaitibility with: * chacha - mobile devices * secure negotiation - openssl 3 --- extra/wolfssl/CMakeLists.txt | 5 +++++ extra/wolfssl/user_settings.h.in | 4 ++++ include/mysql/service_my_crypt.h | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt index 826587df24255..69f7801d4e321 100644 --- a/extra/wolfssl/CMakeLists.txt +++ b/extra/wolfssl/CMakeLists.txt @@ -102,6 +102,9 @@ ${WOLFCRYPT_SRCDIR}/rsa.c ${WOLFCRYPT_SRCDIR}/sha.c ${WOLFCRYPT_SRCDIR}/sha256.c ${WOLFCRYPT_SRCDIR}/sha512.c +${WOLFCRYPT_SRCDIR}/poly1305.c +${WOLFCRYPT_SRCDIR}/chacha.c +${WOLFCRYPT_SRCDIR}/chacha20_poly1305.c ${WOLFCRYPT_SRCDIR}/wc_port.c ${WOLFCRYPT_SRCDIR}/wc_encrypt.c ${WOLFCRYPT_SRCDIR}/hash.c @@ -159,6 +162,8 @@ IF(WOLFSSL_X86_64_BUILD) LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/aes_asm.S ${WOLFCRYPT_SRCDIR}/aes_gcm_asm.S + ${WOLFCRYPT_SRCDIR}/chacha_asm.S + ${WOLFCRYPT_SRCDIR}/poly1305_asm.S ${WOLFCRYPT_SRCDIR}/sha512_asm.S ${WOLFCRYPT_SRCDIR}/sha256_asm.S) ADD_DEFINITIONS(-maes -msse4.2 -mpclmul) diff --git a/extra/wolfssl/user_settings.h.in b/extra/wolfssl/user_settings.h.in index 2355fd1691ca6..425f6f154b9f0 100644 --- a/extra/wolfssl/user_settings.h.in +++ b/extra/wolfssl/user_settings.h.in @@ -19,11 +19,15 @@ #define HAVE_TLS_EXTENSIONS #define HAVE_AES_ECB #define HAVE_AESGCM +#define HAVE_CHACHA +#define HAVE_POLY1305 #define WOLFSSL_AES_COUNTER #define NO_WOLFSSL_STUB #define OPENSSL_ALL #define WOLFSSL_ALLOW_TLSV10 #define NO_OLD_TIMEVAL_NAME +#define HAVE_SECURE_RENEGOTIATION +#define HAVE_EXTENDED_MASTER /* TLSv1.3 definitions (all needed to build) */ #define WOLFSSL_TLS13 diff --git a/include/mysql/service_my_crypt.h b/include/mysql/service_my_crypt.h index 2a232117ca15f..ac8e427231df2 100644 --- a/include/mysql/service_my_crypt.h +++ b/include/mysql/service_my_crypt.h @@ -45,7 +45,7 @@ extern "C" { /* The max key length of all supported algorithms */ #define MY_AES_MAX_KEY_LENGTH 32 -#define MY_AES_CTX_SIZE 656 +#define MY_AES_CTX_SIZE 1024 enum my_aes_mode { MY_AES_ECB, MY_AES_CBC From e78ce6329196df600cd2f601603787e6f19d8366 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 10 Aug 2023 11:15:57 +0200 Subject: [PATCH 043/165] MDEV-17711 Assertion `arena_for_set_stmt== 0' failed in LEX::set_arena_for_set_stmt upon SET STATEMENT restore SET STATEMENT variables between statements in a multi-statement --- mysql-test/main/set_statement.result | 12 ++++++++++++ mysql-test/main/set_statement.test | 12 ++++++++++++ sql/sql_parse.cc | 2 ++ 3 files changed, 26 insertions(+) diff --git a/mysql-test/main/set_statement.result b/mysql-test/main/set_statement.result index c5ad11c74c073..625b120c58bbf 100644 --- a/mysql-test/main/set_statement.result +++ b/mysql-test/main/set_statement.result @@ -1271,4 +1271,16 @@ SET sql_mode=ORACLE; SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown'; SET sql_mode=default; SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown'; +# +# MDEV-17711 Assertion `arena_for_set_stmt== 0' failed in LEX::set_arena_for_set_stmt upon SET STATEMENT +# +set rand_seed1=1, rand_seed2=2; +set statement rand_seed1=4 for select 2, @@rand_seed1, @@rand_seed2; +set statement rand_seed2=5 for select 3, @@rand_seed1, @@rand_seed2 $ +2 @@rand_seed1 @@rand_seed2 +2 4 2 +3 @@rand_seed1 @@rand_seed2 +3 1 5 +# # End of 10.4 tests +# diff --git a/mysql-test/main/set_statement.test b/mysql-test/main/set_statement.test index f9d7c0983272f..e6a37d8a051c6 100644 --- a/mysql-test/main/set_statement.test +++ b/mysql-test/main/set_statement.test @@ -1191,4 +1191,16 @@ SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unk SET sql_mode=default; SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown'; +--echo # +--echo # MDEV-17711 Assertion `arena_for_set_stmt== 0' failed in LEX::set_arena_for_set_stmt upon SET STATEMENT +--echo # + +--delimiter $ +set rand_seed1=1, rand_seed2=2; +set statement rand_seed1=4 for select 2, @@rand_seed1, @@rand_seed2; +set statement rand_seed2=5 for select 3, @@rand_seed1, @@rand_seed2 $ +--delimiter ; + +--echo # --echo # End of 10.4 tests +--echo # diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index f8039b7df0978..f5a80e9e5e551 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1901,6 +1901,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, MYSQL_QUERY_DONE(thd->is_error()); } + thd->lex->restore_set_statement_var(); + #if defined(ENABLED_PROFILING) thd->profiling.finish_current_query(); thd->profiling.start_new_query("continuing"); From fe86d04ea74192c75ab5c90e433fa3cc7a58bd0b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 7 Aug 2023 21:02:03 +0200 Subject: [PATCH 044/165] MDEV-30904 "rpm --setugids" breaks PAM authentication move user/group creation from %post to %pre as Fedora packaging guidelines say. This allows to use %attr() to set the correct ownership of files --- plugin/auth_pam/CMakeLists.txt | 10 ++++++++-- support-files/rpm/server-postin.sh | 10 ---------- support-files/rpm/server-prein.sh | 5 +++++ 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/plugin/auth_pam/CMakeLists.txt b/plugin/auth_pam/CMakeLists.txt index 6d2dc72b097c3..49e02b339c793 100644 --- a/plugin/auth_pam/CMakeLists.txt +++ b/plugin/auth_pam/CMakeLists.txt @@ -42,9 +42,15 @@ IF(HAVE_PAM_APPL_H AND HAVE_GETGROUPLIST) IF (TARGET auth_pam) MYSQL_ADD_EXECUTABLE(auth_pam_tool auth_pam_tool.c DESTINATION ${INSTALL_PLUGINDIR}/auth_pam_tool_dir COMPONENT Server) TARGET_LINK_LIBRARIES(auth_pam_tool pam) + IF (CMAKE_MAJOR_VERSION EQUAL 2) + # 2.8.12 bug (in CentOS 7) + SET(user mysql) + ELSE() + SET(user "%{mysqld_user}") + ENDIF() SET(CPACK_RPM_server_USER_FILELIST ${CPACK_RPM_server_USER_FILELIST} - "%attr(700,-,-) ${INSTALL_PLUGINDIRABS}/auth_pam_tool_dir" - "%attr(4755,-,-) ${INSTALL_PLUGINDIRABS}/auth_pam_tool_dir/auth_pam_tool") + "%attr(700,${user},-) ${INSTALL_PLUGINDIRABS}/auth_pam_tool_dir" + "%attr(4755,root,-) ${INSTALL_PLUGINDIRABS}/auth_pam_tool_dir/auth_pam_tool") SET(CPACK_RPM_server_USER_FILELIST ${CPACK_RPM_server_USER_FILELIST} PARENT_SCOPE) ENDIF() IF(TARGET auth_pam OR TARGET auth_pam_v1) diff --git a/support-files/rpm/server-postin.sh b/support-files/rpm/server-postin.sh index 61c417e3e7d31..af19f9df14baf 100644 --- a/support-files/rpm/server-postin.sh +++ b/support-files/rpm/server-postin.sh @@ -37,13 +37,6 @@ if [ $1 = 1 ] ; then fi fi - # Create a MySQL user and group. Do not report any problems if it already - # exists. - groupadd -r %{mysqld_group} 2> /dev/null || true - useradd -M -r --home $datadir --shell /sbin/nologin --comment "MySQL server" --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true - # The user may already exist, make sure it has the proper group nevertheless (BUG#12823) - usermod --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true - # Temporary Workaround for MDEV-11386 - will be corrected in Advance Toolchain 10.0-3 and 8.0-8 for ldconfig in /opt/at*/sbin/ldconfig; do test -x $ldconfig && $ldconfig @@ -69,9 +62,6 @@ if [ $1 = 1 ] ; then chmod -R og-rw $datadir/mysql fi -# Set the correct filesystem ownership for the PAM v2 plugin -chown %{mysqld_user} /usr/lib*/mysql/plugin/auth_pam_tool_dir - # install SELinux files - but don't override existing ones SETARGETDIR=/etc/selinux/targeted/src/policy SEDOMPROG=$SETARGETDIR/domains/program diff --git a/support-files/rpm/server-prein.sh b/support-files/rpm/server-prein.sh index d6f77c29e49a3..200d8bf160fdc 100644 --- a/support-files/rpm/server-prein.sh +++ b/support-files/rpm/server-prein.sh @@ -65,3 +65,8 @@ HERE fi fi +# Create a MySQL user and group. Do not report any problems if it already exists. +groupadd -r %{mysqld_group} 2> /dev/null || true +useradd -M -r --home %{mysqldatadir} --shell /sbin/nologin --comment "MySQL server" --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true +# The user may already exist, make sure it has the proper group nevertheless (BUG#12823) +usermod --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true From 4d96dba76734f1cce81c6433c2430451b072185a Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 17 Aug 2023 12:39:59 +0200 Subject: [PATCH 045/165] MDEV-25369 mysqlbinlog (mariadb-binlog) -T/--table option clarify the help text --- client/mysqlbinlog.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 269ed6a1f00f9..8d03e0680b827 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -1746,7 +1746,7 @@ static struct my_option my_options[] = &stop_position, &stop_position, 0, GET_ULL, REQUIRED_ARG, (longlong)(~(my_off_t)0), BIN_LOG_HEADER_SIZE, (ulonglong)(~(my_off_t)0), 0, 0, 0}, - {"table", 'T', "List entries for just this table (local log only).", + {"table", 'T', "List entries for just this table (affects only row events).", &table, &table, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"to-last-log", 't', "Requires -R. Will not stop at the end of the \ From 382c543f531c2b1228db9a863e967b08caeefa0e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 25 Aug 2023 14:01:43 +0200 Subject: [PATCH 046/165] MDEV-32012 hash unique corrupts index on virtual blobs as always when copying record[0] aside one needs to detach Field_blob::value's from it, and restore them when record[0] is restored from a backup. --- mysql-test/main/long_unique_bugs.result | 24 ++++++++++++++++++++++++ mysql-test/main/long_unique_bugs.test | 24 ++++++++++++++++++++++++ sql/handler.cc | 11 +++++++++++ 3 files changed, 59 insertions(+) diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index eb60f79ac67c5..ebc7caa839d41 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -454,5 +454,29 @@ create table t1 (f text not null, unique (f)); insert into t1 (f) select 'f'; drop table t1; # +# MDEV-32012 hash unique corrupts index on virtual blobs +# +create table t1 ( +f1 varchar(25), +v1 mediumtext generated always as (concat('f1:', f1)) virtual, +unique key (f1) using hash, +key (v1(1000)) +); +flush status; +insert ignore t1 (f1) values (9599),(94410); +show status like 'handler_read_next'; +Variable_name Value +Handler_read_next 1 +# the above MUST BE =1 +check table t1 extended; +Table Op Msg_type Msg_text +test.t1 check status OK +update t1 set f1=100 where f1=9599; +update t1 set f1=9599 where f1=100; +check table t1 extended; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +# # End of 10.4 tests # diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index 0359ac7e7ed24..d9ab36d588e9e 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -455,6 +455,30 @@ create table t1 (f text not null, unique (f)); insert into t1 (f) select 'f'; drop table t1; +--echo # +--echo # MDEV-32012 hash unique corrupts index on virtual blobs +--echo # +create table t1 ( + f1 varchar(25), + v1 mediumtext generated always as (concat('f1:', f1)) virtual, + unique key (f1) using hash, + key (v1(1000)) +); +flush status; +insert ignore t1 (f1) values (9599),(94410); +# handler_read_next must be 1 below, meaning there was a hash collision above. +# if a change in the hash function causes these values not to collide anymore, +# the test must be adjusted to use some other values that collide. +# to find a collision add an assert into check_duplicate_long_entry_key() +# and run, like, insert...select * seq_from_1_to_1000000000 +show status like 'handler_read_next'; +--echo # the above MUST BE =1 +check table t1 extended; +update t1 set f1=100 where f1=9599; +update t1 set f1=9599 where f1=100; +check table t1 extended; +drop table t1; + --echo # --echo # End of 10.4 tests --echo # diff --git a/sql/handler.cc b/sql/handler.cc index 2b081df83264f..ca5a3e28c2835 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6658,6 +6658,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, KEY *key_info= table->key_info + key_no; hash_field= key_info->key_part->field; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; + String *blob_storage; DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) @@ -6675,6 +6676,8 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, result= h->ha_index_init(key_no, 0); if (result) return result; + blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields); + table->remember_blob_values(blob_storage); store_record(table, check_unique_buf); result= h->ha_index_read_map(table->record[0], ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); @@ -6685,6 +6688,13 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr; Item ** arguments= temp->arguments(); uint arg_count= temp->argument_count(); + // restore pointers after swap_values in TABLE::update_virtual_fields() + for (Field **vf= table->vfield; *vf; vf++) + { + if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG && + bitmap_is_set(table->read_set, (*vf)->field_index)) + ((Field_blob*)*vf)->swap_value_and_read_value(); + } do { my_ptrdiff_t diff= table->check_unique_buf - new_rec; @@ -6731,6 +6741,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, } } restore_record(table, check_unique_buf); + table->restore_blob_values(blob_storage); h->ha_index_end(); return error; } From 65b3c89430c38fec0d5adb78755a80b3e6600dce Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 25 Aug 2023 18:13:51 +0200 Subject: [PATCH 047/165] MDEV-32015 insert into an empty table fails with hash unique don't enable bulk insert when table->s->long_unique_table --- mysql-test/main/long_unique_bugs.result | 11 +++++++ mysql-test/main/long_unique_bugs.test | 8 +++++ sql/log_event.cc | 3 +- sql/sql_insert.cc | 7 +++-- sql/sql_load.cc | 3 +- sql/sql_table.cc | 40 ++++++++++++------------- 6 files changed, 46 insertions(+), 26 deletions(-) diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index ebc7caa839d41..6b835bc512a7a 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -478,5 +478,16 @@ Table Op Msg_type Msg_text test.t1 check status OK drop table t1; # +# MDEV-32015 insert into an empty table fails with hash unique +# +create table t1 (f1 varchar(25), unique (f1) using hash); +insert ignore t1 (f1) values ('new york'),('virginia'),('spouse'),(null),('zqekmqpwutxnzddrbjycyo'),('nebraska'),('illinois'),('qe'),('ekmqpwut'),('arizona'),('arizona'); +Warnings: +Warning 1062 Duplicate entry 'arizona' for key 'f1' +check table t1 extended; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +# # End of 10.4 tests # diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index d9ab36d588e9e..0455c5a40cfd6 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -479,6 +479,14 @@ update t1 set f1=9599 where f1=100; check table t1 extended; drop table t1; +--echo # +--echo # MDEV-32015 insert into an empty table fails with hash unique +--echo # +create table t1 (f1 varchar(25), unique (f1) using hash); +insert ignore t1 (f1) values ('new york'),('virginia'),('spouse'),(null),('zqekmqpwutxnzddrbjycyo'),('nebraska'),('illinois'),('qe'),('ekmqpwut'),('arizona'),('arizona'); +check table t1 extended; +drop table t1; + --echo # --echo # End of 10.4 tests --echo # diff --git a/sql/log_event.cc b/sql/log_event.cc index 23ccaef2a568d..002bf3f29b79c 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -13529,7 +13529,8 @@ Rows_log_event::write_row(rpl_group_info *rgi, DBUG_RETURN(error); } - if (m_curr_row == m_rows_buf && !invoke_triggers) + if (m_curr_row == m_rows_buf && !invoke_triggers && + !table->s->long_unique_table) { /* This table has no triggers so we can do bulk insert. diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index a64314814dd16..659db7b01fb91 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -901,7 +901,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, same table in the same connection. */ if (thd->locked_tables_mode <= LTM_LOCK_TABLES && - values_list.elements > 1) + !table->s->long_unique_table && values_list.elements > 1) { using_bulk_insert= 1; table->file->ha_start_bulk_insert(values_list.elements); @@ -3930,7 +3930,7 @@ int select_insert::prepare2(JOIN *) DBUG_ENTER("select_insert::prepare2"); if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && thd->locked_tables_mode <= LTM_LOCK_TABLES && - !thd->lex->describe) + !table->s->long_unique_table && !thd->lex->describe) table->file->ha_start_bulk_insert((ha_rows) 0); if (table->validate_default_values_of_unset_fields(thd)) DBUG_RETURN(1); @@ -4664,7 +4664,8 @@ select_create::prepare(List &_values, SELECT_LEX_UNIT *u) table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); if (info.handle_duplicates == DUP_UPDATE) table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE); - if (thd->locked_tables_mode <= LTM_LOCK_TABLES) + if (thd->locked_tables_mode <= LTM_LOCK_TABLES && + !table->s->long_unique_table) table->file->ha_start_bulk_insert((ha_rows) 0); thd->abort_on_warning= !info.ignore && thd->is_strict_mode(); if (check_that_all_fields_are_given_values(thd, table, table_list)) diff --git a/sql/sql_load.cc b/sql/sql_load.cc index afc2f1211670f..8264286a0226f 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -641,7 +641,8 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, (!table->triggers || !table->triggers->has_delete_triggers())) table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); - if (thd->locked_tables_mode <= LTM_LOCK_TABLES) + if (thd->locked_tables_mode <= LTM_LOCK_TABLES && + !table->s->long_unique_table) table->file->ha_start_bulk_insert((ha_rows) 0); table->copy_blobs=1; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 100436ae1ef41..6f41b70c69fd1 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -72,11 +72,9 @@ static bool make_unique_constraint_name(THD *, LEX_CSTRING *, const char *, List *, uint *); static const char *make_unique_invisible_field_name(THD *, const char *, List *); -static int copy_data_between_tables(THD *, TABLE *,TABLE *, - List &, bool, uint, ORDER *, - ha_rows *, ha_rows *, - Alter_info::enum_enable_or_disable, - Alter_table_ctx *); +static int copy_data_between_tables(THD *, TABLE *,TABLE *, bool, uint, + ORDER *, ha_rows *, ha_rows *, + Alter_info *, Alter_table_ctx *); static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *, uint *, handler *, KEY **, uint *, int, const LEX_CSTRING db, @@ -10548,10 +10546,8 @@ do_continue:; my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0)); goto err_new_table_cleanup; }); - if (copy_data_between_tables(thd, table, new_table, - alter_info->create_list, ignore, - order_num, order, &copied, &deleted, - alter_info->keys_onoff, + if (copy_data_between_tables(thd, table, new_table, ignore, order_num, + order, &copied, &deleted, alter_info, &alter_ctx)) { goto err_new_table_cleanup; @@ -10877,11 +10873,9 @@ bool mysql_trans_commit_alter_copy_data(THD *thd) static int -copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, - List &create, bool ignore, - uint order_num, ORDER *order, - ha_rows *copied, ha_rows *deleted, - Alter_info::enum_enable_or_disable keys_onoff, +copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, bool ignore, + uint order_num, ORDER *order, ha_rows *copied, + ha_rows *deleted, Alter_info *alter_info, Alter_table_ctx *alter_ctx) { int error= 1; @@ -10930,7 +10924,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, backup_set_alter_copy_lock(thd, from); - alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff); + alter_table_manage_keys(to, from->file->indexes_are_disabled(), + alter_info->keys_onoff); from->default_column_bitmaps(); @@ -10939,10 +10934,14 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, from->file->info(HA_STATUS_VARIABLE); to->file->extra(HA_EXTRA_PREPARE_FOR_ALTER_TABLE); - to->file->ha_start_bulk_insert(from->file->stats.records, - ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT); - bulk_insert_started= 1; - List_iterator it(create); + if (!to->s->long_unique_table || !(alter_info->flags & + (ALTER_ADD_INDEX|ALTER_CHANGE_COLUMN|ALTER_PARSER_DROP_COLUMN))) + { + to->file->ha_start_bulk_insert(from->file->stats.records, + ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT); + bulk_insert_started= 1; + } + List_iterator it(alter_info->create_list); Create_field *def; copy_end=copy; to->s->default_fields= 0; @@ -11197,7 +11196,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, /* We are going to drop the temporary table */ to->file->extra(HA_EXTRA_PREPARE_FOR_DROP); } - if (unlikely(to->file->ha_end_bulk_insert()) && error <= 0) + if (bulk_insert_started && to->file->ha_end_bulk_insert() && error <= 0) { /* Give error, if not already given */ if (!thd->is_error()) @@ -11238,7 +11237,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, { /* This happens if we get an error during initialization of data */ DBUG_ASSERT(error); - to->file->ha_end_bulk_insert(); ha_enable_transaction(thd, TRUE); } From c53cb718b733557fad0b97450b6f60bf1c81af55 Mon Sep 17 00:00:00 2001 From: Sachin Date: Fri, 29 May 2020 22:12:44 +0530 Subject: [PATCH 048/165] MDEV-22722 Assertion "inited==NONE" failed in handler::ha_index_init on the slave during UPDATE test case only --- .../main/long_unique_bugs_replication.result | 12 ++++++++++ .../main/long_unique_bugs_replication.test | 24 +++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 mysql-test/main/long_unique_bugs_replication.result create mode 100644 mysql-test/main/long_unique_bugs_replication.test diff --git a/mysql-test/main/long_unique_bugs_replication.result b/mysql-test/main/long_unique_bugs_replication.result new file mode 100644 index 0000000000000..af583d00ceafa --- /dev/null +++ b/mysql-test/main/long_unique_bugs_replication.result @@ -0,0 +1,12 @@ +include/master-slave.inc +[connection master] +create table t1 (i1 int, a1 text, unique key i1 (a1)) engine=myisam; +insert into t1 values (1,1); +insert into t1 values (2,2); +update t1 set a1 = 'd' limit 1; +update t1 set a1 = 'd2' where i1= 2; +connection slave; +connection slave; +connection master; +drop table t1; +include/rpl_end.inc diff --git a/mysql-test/main/long_unique_bugs_replication.test b/mysql-test/main/long_unique_bugs_replication.test new file mode 100644 index 0000000000000..1cacd088bee6d --- /dev/null +++ b/mysql-test/main/long_unique_bugs_replication.test @@ -0,0 +1,24 @@ +# +# Long unique bugs related to master slave replication +# + +# +# MDEV-22722 Assertion "inited==NONE" failed in handler::ha_index_init on the slave during UPDATE +# + +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +create table t1 (i1 int, a1 text, unique key i1 (a1)) engine=myisam; +insert into t1 values (1,1); +insert into t1 values (2,2); +update t1 set a1 = 'd' limit 1; +update t1 set a1 = 'd2' where i1= 2; + +sync_slave_with_master; +connection slave; + +connection master; +drop table t1; + +--source include/rpl_end.inc From d762e9d943aa444695ebe845a7376fd9cbb0e3dc Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 5 Sep 2023 16:58:55 +0200 Subject: [PATCH 049/165] MDEV-32093 long uniques break old->new replication recalculate long unique hash in Write_rows_log_event and Update_rows_log_event. normally generated columns (stored and indexed virtual) are deterministic and their values don't need to be recalculated on the slave as they're already present in the row image. but the long unique hash function was changed in MDEV-27653, so a row event from the old master will have the old hash, but a table created on the new slave will need a new hash. --- .../main/long_unique_bugs_replication.result | 19 ++++++++++- .../main/long_unique_bugs_replication.test | 33 ++++++++++++++++--- sql/log_event.cc | 16 ++++++++- 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/mysql-test/main/long_unique_bugs_replication.result b/mysql-test/main/long_unique_bugs_replication.result index af583d00ceafa..39b0ebe26d23d 100644 --- a/mysql-test/main/long_unique_bugs_replication.result +++ b/mysql-test/main/long_unique_bugs_replication.result @@ -1,3 +1,6 @@ +# +# MDEV-22722 Assertion "inited==NONE" failed in handler::ha_index_init on the slave during UPDATE +# include/master-slave.inc [connection master] create table t1 (i1 int, a1 text, unique key i1 (a1)) engine=myisam; @@ -6,7 +9,21 @@ insert into t1 values (2,2); update t1 set a1 = 'd' limit 1; update t1 set a1 = 'd2' where i1= 2; connection slave; -connection slave; connection master; drop table t1; +# +# MDEV-32093 long uniques break old->new replication +# +connection slave; +create table t1 (id int not null, b1 varchar(255) not null, b2 varchar(2550) not null, unique (id), unique key (b1,b2) using hash) default charset utf8mb3; +set global slave_exec_mode=idempotent; +binlog 'aRf2ZA8BAAAA/AAAAAABAAAAAAQAMTAuNS4xNS1NYXJpYURCLWxvZwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABpF/ZkEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFRmTlk'; +binlog 'bBf2ZBMBAAAANAAAAJgHAAAAAHEAAAAAAAEABHRlc3QAAnQxAAQDDw8IBP0C4h0AeqMD4A==bBf2ZBcBAAAANAAAAMwHAAAAAHEAAAAAAAEABP/wj6QAAAEAYgEAZa6/VU0JAAAANteqUw=='; +binlog 'bBf2ZBMBAAAANAAAAJgHAAAAAHEAAAAAAAEABHRlc3QAAnQxAAQDDw8IBP0C4h0AeqMD4A==bBf2ZBcBAAAANAAAAMwHAAAAAHEAAAAAAAEABP/wj6QAAAEAYgEAZa6/VU0JAAAANteqUw=='; +binlog 'bBf2ZBMBAAAANAAAAHUkAAAAAHEAAAAAAAEABHRlc3QAAnQxAAQDDw8IBP0C4h0AaTGFIg==bBf2ZBgBAAAASAAAAL0kAAAAAHEAAAAAAAEABP//8I+kAAABAGIBAGWuv1VNCQAAAPBuWwAAAQBiAQBlrr9VTQkAAADxS9Lu'; +drop table t1; +set global slave_exec_mode=default; +# +# End of 10.4 tests +# include/rpl_end.inc diff --git a/mysql-test/main/long_unique_bugs_replication.test b/mysql-test/main/long_unique_bugs_replication.test index 1cacd088bee6d..9c44d13e6a5e3 100644 --- a/mysql-test/main/long_unique_bugs_replication.test +++ b/mysql-test/main/long_unique_bugs_replication.test @@ -2,9 +2,9 @@ # Long unique bugs related to master slave replication # -# -# MDEV-22722 Assertion "inited==NONE" failed in handler::ha_index_init on the slave during UPDATE -# +--echo # +--echo # MDEV-22722 Assertion "inited==NONE" failed in handler::ha_index_init on the slave during UPDATE +--echo # --source include/have_binlog_format_row.inc --source include/master-slave.inc @@ -16,9 +16,34 @@ update t1 set a1 = 'd' limit 1; update t1 set a1 = 'd2' where i1= 2; sync_slave_with_master; -connection slave; connection master; drop table t1; +--echo # +--echo # MDEV-32093 long uniques break old->new replication +--echo # + +# this is techically a bug in replication, but it needs an old master +# so we'll run it as a non-replicated test with BINLOG command +sync_slave_with_master; +create table t1 (id int not null, b1 varchar(255) not null, b2 varchar(2550) not null, unique (id), unique key (b1,b2) using hash) default charset utf8mb3; +set global slave_exec_mode=idempotent; + +# Format_description_log_event, MariaDB-10.5.15 +binlog 'aRf2ZA8BAAAA/AAAAAABAAAAAAQAMTAuNS4xNS1NYXJpYURCLWxvZwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABpF/ZkEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFRmTlk'; + +### INSERT t1 VALUES (42127, 'b', 'e', 39952170926) +binlog 'bBf2ZBMBAAAANAAAAJgHAAAAAHEAAAAAAAEABHRlc3QAAnQxAAQDDw8IBP0C4h0AeqMD4A==bBf2ZBcBAAAANAAAAMwHAAAAAHEAAAAAAAEABP/wj6QAAAEAYgEAZa6/VU0JAAAANteqUw=='; +binlog 'bBf2ZBMBAAAANAAAAJgHAAAAAHEAAAAAAAEABHRlc3QAAnQxAAQDDw8IBP0C4h0AeqMD4A==bBf2ZBcBAAAANAAAAMwHAAAAAHEAAAAAAAEABP/wj6QAAAEAYgEAZa6/VU0JAAAANteqUw=='; + +### UPDATE t1 WHERE (42127, 'b', 'e', 39952170926) SET (23406, 'b', 'e', 39952170926) +binlog 'bBf2ZBMBAAAANAAAAHUkAAAAAHEAAAAAAAEABHRlc3QAAnQxAAQDDw8IBP0C4h0AaTGFIg==bBf2ZBgBAAAASAAAAL0kAAAAAHEAAAAAAAEABP//8I+kAAABAGIBAGWuv1VNCQAAAPBuWwAAAQBiAQBlrr9VTQkAAADxS9Lu'; + +drop table t1; +set global slave_exec_mode=default; + +--echo # +--echo # End of 10.4 tests +--echo # --source include/rpl_end.inc diff --git a/sql/log_event.cc b/sql/log_event.cc index 002bf3f29b79c..014968e31d4bd 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -13563,6 +13563,9 @@ Rows_log_event::write_row(rpl_group_info *rgi, DBUG_PRINT_BITSET("debug", "rpl_write_set: %s", table->rpl_write_set); DBUG_PRINT_BITSET("debug", "read_set: %s", table->read_set); + if (table->s->long_unique_table) + table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE); + if (invoke_triggers && unlikely(process_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE, TRUE))) { @@ -13674,7 +13677,14 @@ Rows_log_event::write_row(rpl_group_info *rgi, Now, record[1] should contain the offending row. That will enable us to update it or, alternatively, delete it (so that we can insert the new row afterwards). - */ + */ + if (table->s->long_unique_table) + { + /* same as for REPLACE/ODKU */ + table->move_fields(table->field, table->record[1], table->record[0]); + table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE); + table->move_fields(table->field, table->record[0], table->record[1]); + } /* If row is incomplete we will use the record found to fill @@ -13684,6 +13694,8 @@ Rows_log_event::write_row(rpl_group_info *rgi, { restore_record(table,record[1]); error= unpack_current_row(rgi); + if (table->s->long_unique_table) + table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE); } DBUG_PRINT("debug",("preparing for update: before and after image")); @@ -14791,6 +14803,8 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) thd_proc_info(thd, message); if (unlikely((error= unpack_current_row(rgi, &m_cols_ai)))) goto err; + if (m_table->s->long_unique_table) + m_table->update_virtual_fields(m_table->file, VCOL_UPDATE_FOR_WRITE); /* Now we have the right row to update. The old row (the one we're From 961b96a5e0dd40512b8fff77dcec273187ccc9fd Mon Sep 17 00:00:00 2001 From: Nayana Thorat Date: Wed, 30 Aug 2023 14:19:51 +0530 Subject: [PATCH 050/165] MDEV-29324 s390x patch srw_lock.cc Fix debug mode build failure on s390x. Replaced builtin_ttest by __builtin_tx_nesting_depth() > 0 as a s390x equivalent version of the expression. --- storage/innobase/sync/srw_lock.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/innobase/sync/srw_lock.cc b/storage/innobase/sync/srw_lock.cc index 05445f1a68c6c..e41451d80035b 100644 --- a/storage/innobase/sync/srw_lock.cc +++ b/storage/innobase/sync/srw_lock.cc @@ -117,8 +117,13 @@ bool transactional_lock_enabled() __attribute__((target("htm"),hot)) bool xtest() { +# ifdef __s390x__ + return have_transactional_memory && + __builtin_tx_nesting_depth() > 0; +# else return have_transactional_memory && _HTM_STATE (__builtin_ttest ()) == _HTM_TRANSACTIONAL; +# endif } # endif #endif From e937a64d462c83e379b8cf26b7765df0e22ae358 Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Thu, 7 Sep 2023 13:39:28 +0200 Subject: [PATCH 051/165] MDEV-10356: rpl.rpl_parallel_temptable failure due to incorrect commit optimization of temptables The problem was that parallel replication of temporary tables using statement-based binlogging could overlap the COMMIT in one thread with a DML or DROP TEMPORARY TABLE in another thread using the same temporary table. Temporary tables are not safe for concurrent access, so this caused reference to freed memory and possibly other nastiness. The fix is to disable the optimisation with overlapping commits of one transaction with the start of a later transaction, when temporary tables are in use. Then the following event groups will be blocked from starting until the one using temporary tables is completed. This also fixes occasional test failures of rpl.rpl_parallel_temptable seen in Buildbot. Signed-off-by: Kristian Nielsen --- sql/rpl_parallel.cc | 18 +++++++++++++++++- sql/sql_class.cc | 4 ++++ sql/sql_class.h | 13 +++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 3bd27c7393272..fc4434b75de23 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -218,6 +218,7 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id, waiting for this). In most cases (normal DML), it will be a no-op. */ rgi->mark_start_commit_no_lock(); + rgi->commit_orderer.wakeup_blocked= false; if (entry->last_committed_sub_id < sub_id) { @@ -1425,7 +1426,22 @@ handle_rpl_parallel_thread(void *arg) if (!thd->killed) { DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit"); - rgi->mark_start_commit(); + if (thd->lex->stmt_accessed_temp_table()) + { + /* + Temporary tables are special, they require strict + single-threaded use as they have no locks protecting concurrent + access. Therefore, we cannot safely use the optimization of + overlapping the commit of this transaction with the start of the + following. + So we skip the early mark_start_commit() and also block any + wakeup_subsequent_commits() until this event group is fully + done, inside finish_event_group(). + */ + rgi->commit_orderer.wakeup_blocked= true; + } + else + rgi->mark_start_commit(); DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit"); } } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index e7e27401d6162..17feb006e213b 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -7536,6 +7536,7 @@ wait_for_commit::reinit() wakeup_error= 0; wakeup_subsequent_commits_running= false; commit_started= false; + wakeup_blocked= false; #ifdef SAFE_MUTEX /* When using SAFE_MUTEX, the ordering between taking the LOCK_wait_commit @@ -7808,6 +7809,9 @@ wait_for_commit::wakeup_subsequent_commits2(int wakeup_error) { wait_for_commit *waiter; + if (unlikely(wakeup_blocked)) + return; + mysql_mutex_lock(&LOCK_wait_commit); wakeup_subsequent_commits_running= true; waiter= subsequent_commits_list; diff --git a/sql/sql_class.h b/sql/sql_class.h index 4487a67c76dc9..4c172ba8e2a31 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2142,6 +2142,19 @@ struct wait_for_commit group commit as T1. */ bool commit_started; + /* + Set to temporarily ignore calls to wakeup_subsequent_commits(). The + caller must arrange that another wakeup_subsequent_commits() gets called + later after wakeup_blocked has been set back to false. + + This is used for parallel replication with temporary tables. + Temporary tables require strict single-threaded operation. The normal + optimization, of doing wakeup_subsequent_commits early and overlapping + part of the commit with the following transaction, is not safe. Thus + when temporary tables are replicated, wakeup is blocked until the + event group is fully done. + */ + bool wakeup_blocked; void register_wait_for_prior_commit(wait_for_commit *waitee); int wait_for_prior_commit(THD *thd, bool allow_kill=true) From 5544ea2eda48f6ae08f5f7e115b8899306aa0770 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Fri, 8 Sep 2023 15:39:10 +1000 Subject: [PATCH 052/165] MDEV-32130 Port MySQL test on protocol bug #106352 to MariaDB Port the test case from MySQL to MariaDB: MySQL fix Bug#33813951, Change-Id: I2448e3f2f36925fe70d882ae5681a6234f0d5a98. Function test_simple_temporal() from MySQL ported from C++ to pure C. This includes one change: - DIE_UNLESS(field->type == MYSQL_TYPE_DATETIME); + DIE_UNLESS(field->type == MYSQL_TYPE_TIMESTAMP); The bound param of SELECT ? is TIMESTAMP in this code. MySQL returns it back as DATETIME. MariaDB preserves TIMESTAMP. Code packaged for commit by Daniel Black. --- tests/mysql_client_test.c | 313 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 313 insertions(+) diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 92f9ff87765ce..4df31bda3023c 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -6296,6 +6296,318 @@ static void test_date_dt() test_bind_date_conv(2); } +static void test_simple_temporal() { + + MYSQL_STMT *stmt = NULL; + uint rc; + ulong length = 0; + MYSQL_BIND my_bind[4], my_bind2; + my_bool is_null = FALSE; + MYSQL_TIME tm; + char string[100]; + MYSQL_RES *rs; + MYSQL_FIELD *field; + + myheader("test_simple_temporal"); + + /* Initialize param/fetch buffers for data, null flags, lengths */ + memset(&my_bind, 0, sizeof(my_bind)); + memset(&my_bind2, 0, sizeof(my_bind2)); + + /* Initialize the first input parameter */ + my_bind[0].buffer_type = MYSQL_TYPE_DATETIME; + my_bind[0].buffer = &tm; + my_bind[0].is_null = &is_null; + my_bind[0].length = &length; + my_bind[0].buffer_length = sizeof(tm); + + /* Clone the other input parameters */ + my_bind[3] = my_bind[2] = my_bind[1] = my_bind[0]; + + my_bind[1].buffer_type = MYSQL_TYPE_TIMESTAMP; + my_bind[2].buffer_type = MYSQL_TYPE_DATE; + my_bind[3].buffer_type = MYSQL_TYPE_TIME; + + /* Initialize fetch parameter */ + my_bind2.buffer_type = MYSQL_TYPE_STRING; + my_bind2.length = &length; + my_bind2.is_null = &is_null; + my_bind2.buffer_length = sizeof(string); + my_bind2.buffer = string; + + /* Prepare and bind simple SELECT with DATETIME parameter */ + stmt = mysql_simple_prepare(mysql, "SELECT ?"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[0]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Initialize DATETIME value */ + tm.neg = FALSE; + tm.time_type = MYSQL_TIMESTAMP_DATETIME; + tm.year = 2001; + tm.month = 10; + tm.day = 20; + tm.hour = 10; + tm.minute = 10; + tm.second = 59; + tm.second_part = 500000; + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_DATETIME); + DIE_UNLESS(strcmp(string, "2001-10-20 10:10:59.500000") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); + + /* Same test with explicit CAST */ + stmt = mysql_simple_prepare(mysql, "SELECT CAST(? AS DATETIME(6))"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[0]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_DATETIME); + DIE_UNLESS(strcmp(string, "2001-10-20 10:10:59.500000") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); + + /* Prepare and bind simple SELECT with TIMESTAMP parameter */ + stmt = mysql_simple_prepare(mysql, "SELECT ?"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[1]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Initialize TIMESTAMP value */ + tm.neg = FALSE; + tm.time_type = MYSQL_TIMESTAMP_DATETIME; + tm.year = 2001; + tm.month = 10; + tm.day = 20; + tm.hour = 10; + tm.minute = 10; + tm.second = 59; + tm.second_part = 500000; + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_TIMESTAMP); + DIE_UNLESS(strcmp(string, "2001-10-20 10:10:59.500000") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); + + /* Prepare and bind simple SELECT with DATE parameter */ + stmt = mysql_simple_prepare(mysql, "SELECT ?"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[2]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Initialize DATE value */ + tm.neg = FALSE; + tm.time_type = MYSQL_TIMESTAMP_DATE; + tm.year = 2001; + tm.month = 10; + tm.day = 20; + tm.hour = 0; + tm.minute = 0; + tm.second = 0; + tm.second_part = 0; + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_DATE); + DIE_UNLESS(strcmp(string, "2001-10-20") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); + + /* Same test with explicit CAST */ + stmt = mysql_simple_prepare(mysql, "SELECT CAST(? AS DATE)"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[2]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_DATE); + DIE_UNLESS(strcmp(string, "2001-10-20") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); + + /* Prepare and bind simple SELECT with TIME parameter */ + stmt = mysql_simple_prepare(mysql, "SELECT ?"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[3]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Initialize TIME value */ + tm.neg = FALSE; + tm.time_type = MYSQL_TIMESTAMP_TIME; + tm.year = 0; + tm.month = 0; + tm.day = 0; + tm.hour = 10; + tm.minute = 10; + tm.second = 59; + tm.second_part = 500000; + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_TIME); + DIE_UNLESS(strcmp(string, "10:10:59.500000") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); + + /* Same test with explicit CAST */ + stmt = mysql_simple_prepare(mysql, "SELECT CAST(? AS TIME(6))"); + check_stmt(stmt); + verify_param_count(stmt, 1); + + rc = mysql_stmt_bind_param(stmt, &my_bind[3]); + check_execute(stmt, rc); + + rc = mysql_stmt_bind_result(stmt, &my_bind2); + check_execute(stmt, rc); + + /* Initialize TIME value */ + tm.neg = FALSE; + tm.time_type = MYSQL_TIMESTAMP_TIME; + tm.year = 0; + tm.month = 0; + tm.day = 0; + tm.hour = 10; + tm.minute = 10; + tm.second = 59; + tm.second_part = 500000; + + /* Execute and fetch */ + rc = mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + rs = mysql_stmt_result_metadata(stmt); + field = mysql_fetch_fields(rs); + + rc = mysql_stmt_store_result(stmt); + check_execute(stmt, rc); + + rc = mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + + DIE_UNLESS(field->type == MYSQL_TYPE_TIME); + DIE_UNLESS(strcmp(string, "10:10:59.500000") == 0); + + mysql_free_result(rs); + + mysql_stmt_close(stmt); +} + /* Misc tests to keep pure coverage happy */ @@ -21510,6 +21822,7 @@ static struct my_tests_st my_tests[]= { { "test_store_result2", test_store_result2 }, { "test_subselect", test_subselect }, { "test_date", test_date }, + { "test_simple_temporal", test_simple_temporal }, { "test_date_date", test_date_date }, { "test_date_time", test_date_time }, { "test_date_ts", test_date_ts }, From 34c283ba1b3b0b51488746416a737c1456df2fe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 8 Sep 2023 11:28:21 +0300 Subject: [PATCH 053/165] MDEV-32132 DROP INDEX followed by CREATE INDEX may corrupt data ibuf_set_bitmap_for_bulk_load(): Port a bug fix that was made as part of commit 165564d3c33ae3d677d70644a83afcb744bdbf65 (MDEV-30009) in MariaDB Server 10.5.19. --- storage/innobase/ibuf/ibuf0ibuf.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index d611c7793f7f7..dcd12f97cf031 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -4944,7 +4944,13 @@ void ibuf_set_bitmap_for_bulk_load(buf_block_t *block, mtr_t *mtr, bool reset) free_val = ibuf_index_page_calc_free(block); bitmap_page = ibuf_bitmap_get_map_page(block->page.id, - block->zip_size(), mtr); + block->zip_size(), mtr); + + if (ibuf_bitmap_page_get_bits(bitmap_page, block->page.id, + block->physical_size(), + IBUF_BITMAP_BUFFERED, mtr)) { + ibuf_delete_recs(block->page.id); + } free_val = reset ? 0 : ibuf_index_page_calc_free(block); ibuf_bitmap_page_set_bits( From 1815719a5b59f0fbd45805aeff8d898bcdb0cc70 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Tue, 15 Aug 2023 11:43:48 +1000 Subject: [PATCH 054/165] oqgraph: remove clang warnings -Wdeprecated-copy-with-user-provided-copy was causing a few errors on things that where defined in a way that was implicit. By removing code it now compiles without warnings. tested with fc38 / clang-16 --- storage/oqgraph/graphcore.cc | 11 ----------- storage/oqgraph/oqgraph_judy.h | 1 - storage/oqgraph/oqgraph_shim.h | 3 --- 3 files changed, 15 deletions(-) diff --git a/storage/oqgraph/graphcore.cc b/storage/oqgraph/graphcore.cc index 4554e092e12b0..92796538de3c6 100644 --- a/storage/oqgraph/graphcore.cc +++ b/storage/oqgraph/graphcore.cc @@ -146,17 +146,6 @@ namespace open_query HAVE_EDGE = 4, }; - // Force assignment operator, so we can trace through in the debugger - inline reference& operator=(const reference& ref) - { - m_flags = ref.m_flags; - m_sequence = ref.m_sequence; - m_vertex = ref.m_vertex; - m_edge = ref.m_edge; - m_weight = ref.m_weight; - return *this; - } - inline reference() : m_flags(0), m_sequence(0), m_vertex(graph_traits::null_vertex()), diff --git a/storage/oqgraph/oqgraph_judy.h b/storage/oqgraph/oqgraph_judy.h index 53bb47069673a..f6aeda1f9cc02 100644 --- a/storage/oqgraph/oqgraph_judy.h +++ b/storage/oqgraph/oqgraph_judy.h @@ -107,7 +107,6 @@ namespace open_query size_type n; }; - reference operator[](size_type n) { return reference(*this, n); } bool operator[](size_type n) const { return test(n); } size_type find_first() const; diff --git a/storage/oqgraph/oqgraph_shim.h b/storage/oqgraph/oqgraph_shim.h index cd63708e1cdbb..93c4e5fe04e57 100644 --- a/storage/oqgraph/oqgraph_shim.h +++ b/storage/oqgraph/oqgraph_shim.h @@ -60,9 +60,6 @@ namespace oqgraph3 edge_iterator(const graph_ptr& graph, size_t offset=0) : _graph(graph) , _offset(offset) { } - edge_iterator(const edge_iterator& pos) - : _graph(pos._graph) - , _offset(pos._offset) { } value_type operator*(); self& operator+=(size_t n) { _offset+= n; return *this; } self& operator++() { ++_offset; return *this; } From 53fd63254f43b1766c3f83c3f59d9519f904504c Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 8 Sep 2023 15:01:54 +0200 Subject: [PATCH 055/165] remove groonga examples to follow the similar upstream change --- .../vendor/groonga/examples/Makefile.am | 1 - .../groonga/examples/dictionary/Makefile.am | 34 - .../examples/dictionary/edict/Makefile.am | 4 - .../examples/dictionary/edict/edict-import.sh | 27 - .../examples/dictionary/edict/edict2grn.rb | 34 - .../examples/dictionary/eijiro/Makefile.am | 4 - .../dictionary/eijiro/eijiro-import.sh | 12 - .../examples/dictionary/eijiro/eijiro2grn.rb | 61 - .../examples/dictionary/gene95/Makefile.am | 4 - .../examples/dictionary/gene95/gene-import.sh | 26 - .../examples/dictionary/gene95/gene2grn.rb | 33 - .../dictionary/html/css/dictionary.css | 3 - .../images/ui-bg_flat_0_aaaaaa_40x100.png | Bin 180 -> 0 bytes .../images/ui-bg_flat_75_ffffff_40x100.png | Bin 178 -> 0 bytes .../images/ui-bg_glass_55_fbf9ee_1x400.png | Bin 120 -> 0 bytes .../images/ui-bg_glass_65_ffffff_1x400.png | Bin 105 -> 0 bytes .../images/ui-bg_glass_75_dadada_1x400.png | Bin 111 -> 0 bytes .../images/ui-bg_glass_75_e6e6e6_1x400.png | Bin 110 -> 0 bytes .../images/ui-bg_glass_95_fef1ec_1x400.png | Bin 119 -> 0 bytes .../ui-bg_highlight-soft_75_cccccc_1x100.png | Bin 101 -> 0 bytes .../images/ui-icons_222222_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_2e83ff_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_454545_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_888888_256x240.png | Bin 4369 -> 0 bytes .../images/ui-icons_cd0a0a_256x240.png | Bin 4369 -> 0 bytes .../smoothness/jquery-ui-1.8.12.custom.css | 578 - .../examples/dictionary/html/index.html | 28 - .../examples/dictionary/html/js/dictionary.js | 82 - .../dictionary/html/js/jquery-1.7.2.js | 9404 ------------ .../html/js/jquery-ui-1.8.18.custom.js | 11802 ---------------- .../groonga/examples/dictionary/init-db.sh | 10 - .../examples/dictionary/jmdict/Makefile.am | 3 - .../examples/dictionary/jmdict/jmdict.rb | 42 - .../groonga/examples/dictionary/readme.txt | 71 - 34 files changed, 22263 deletions(-) delete mode 100644 storage/mroonga/vendor/groonga/examples/Makefile.am delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/Makefile.am delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/edict/Makefile.am delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/edict/edict-import.sh delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/edict/edict2grn.rb delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/eijiro/Makefile.am delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/eijiro/eijiro-import.sh delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/eijiro/eijiro2grn.rb delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/gene95/Makefile.am delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene-import.sh delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene2grn.rb delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/dictionary.css delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_flat_0_aaaaaa_40x100.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_flat_75_ffffff_40x100.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_55_fbf9ee_1x400.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_65_ffffff_1x400.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_75_dadada_1x400.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_75_e6e6e6_1x400.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_95_fef1ec_1x400.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_highlight-soft_75_cccccc_1x100.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_222222_256x240.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_2e83ff_256x240.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_454545_256x240.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_888888_256x240.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_cd0a0a_256x240.png delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/jquery-ui-1.8.12.custom.css delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/index.html delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/js/dictionary.js delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-1.7.2.js delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-ui-1.8.18.custom.js delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/init-db.sh delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/jmdict/Makefile.am delete mode 100755 storage/mroonga/vendor/groonga/examples/dictionary/jmdict/jmdict.rb delete mode 100644 storage/mroonga/vendor/groonga/examples/dictionary/readme.txt diff --git a/storage/mroonga/vendor/groonga/examples/Makefile.am b/storage/mroonga/vendor/groonga/examples/Makefile.am deleted file mode 100644 index f436342d0535f..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -SUBDIRS = dictionary diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/Makefile.am b/storage/mroonga/vendor/groonga/examples/dictionary/Makefile.am deleted file mode 100644 index ee618a213bda2..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/Makefile.am +++ /dev/null @@ -1,34 +0,0 @@ -SUBDIRS = \ - edict \ - eijiro \ - gene95 \ - jmdict - -dist_examples_dictionary_SCRIPTS = \ - init-db.sh - -nobase_dist_examples_dictionary_DATA = \ - readme.txt \ - $(html_files) - -# find html -type f | sort | sed -e 's,^,\t,g' -html_files = \ - html/css/dictionary.css \ - html/css/smoothness/images/ui-bg_flat_0_aaaaaa_40x100.png \ - html/css/smoothness/images/ui-bg_flat_75_ffffff_40x100.png \ - html/css/smoothness/images/ui-bg_glass_55_fbf9ee_1x400.png \ - html/css/smoothness/images/ui-bg_glass_65_ffffff_1x400.png \ - html/css/smoothness/images/ui-bg_glass_75_dadada_1x400.png \ - html/css/smoothness/images/ui-bg_glass_75_e6e6e6_1x400.png \ - html/css/smoothness/images/ui-bg_glass_95_fef1ec_1x400.png \ - html/css/smoothness/images/ui-bg_highlight-soft_75_cccccc_1x100.png \ - html/css/smoothness/images/ui-icons_222222_256x240.png \ - html/css/smoothness/images/ui-icons_2e83ff_256x240.png \ - html/css/smoothness/images/ui-icons_454545_256x240.png \ - html/css/smoothness/images/ui-icons_888888_256x240.png \ - html/css/smoothness/images/ui-icons_cd0a0a_256x240.png \ - html/css/smoothness/jquery-ui-1.8.12.custom.css \ - html/index.html \ - html/js/dictionary.js \ - html/js/jquery-1.7.2.js \ - html/js/jquery-ui-1.8.18.custom.js diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/edict/Makefile.am b/storage/mroonga/vendor/groonga/examples/dictionary/edict/Makefile.am deleted file mode 100644 index 376f9d520ab1d..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/edict/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -edictdir = $(examples_dictionarydir)/edict -dist_edict_SCRIPTS = \ - edict2grn.rb \ - edict-import.sh diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/edict/edict-import.sh b/storage/mroonga/vendor/groonga/examples/dictionary/edict/edict-import.sh deleted file mode 100755 index e48700af07be0..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/edict/edict-import.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -base_dir=$(dirname $0) - -if [ 1 != $# -a 2 != $# ]; then - echo "usage: $0 db_path [edict.gz_path]" - exit 1 -fi - -if [ -z $2 ]; then - edict_gz=edict.gz - if [ ! -f $edict_gz ]; then - wget -O $edict_gz http://ftp.monash.edu.au/pub/nihongo/edict.gz - fi -else - edict_gz=$2 -fi - -if type gzcat > /dev/null 2>&1; then - zcat="gzcat" -else - zcat="zcat" -fi - -if $zcat $edict_gz | ${base_dir}/edict2grn.rb | groonga $1 > /dev/null; then - echo "edict data loaded." -fi diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/edict/edict2grn.rb b/storage/mroonga/vendor/groonga/examples/dictionary/edict/edict2grn.rb deleted file mode 100755 index b795e25a50b49..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/edict/edict2grn.rb +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env ruby - -require "English" -require "nkf" -require "json" - -print(< /dev/null; then - echo "eijiro data loaded." -fi diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/eijiro/eijiro2grn.rb b/storage/mroonga/vendor/groonga/examples/dictionary/eijiro/eijiro2grn.rb deleted file mode 100755 index 62c1e1309bfd6..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/eijiro/eijiro2grn.rb +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env ruby -# -*- coding: utf-8 -*- - -$KCODE = 'u' - -require 'rubygems' -require 'fastercsv' - -class String - def to_json - a = split(//).map {|char| - case char - when '"' then '\\"' - when '\\' then '\\\\' - when "\b" then '\b' - when "\f" then '\f' - when "\n" then '\n' - when "\r" then '' - when "\t" then '\t' - else char - end - } - "\"#{a.join('')}\"" - end -end - -class Array - def to_json - '[' + map {|element| - element.to_json - }.join(',') + ']' - end -end - -puts < "\r\n").each {|l| - if n > 0 - keyword,word,trans,exp,level,memory,modify,pron,filelink = l - kana = '' - if trans =~ /【@】(.*?)(【|$)/ - kana = $1.split("、") - end - puts [word,keyword,trans,exp,level,memory,modify,pron,filelink,kana].map{|e| e || ''}.to_json - end - n += 1 -} - -puts "]" diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/gene95/Makefile.am b/storage/mroonga/vendor/groonga/examples/dictionary/gene95/Makefile.am deleted file mode 100644 index e89f13f595c2b..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/gene95/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -gene95dir = $(examples_dictionarydir)/gene95 -dist_gene95_SCRIPTS = \ - gene2grn.rb \ - gene-import.sh diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene-import.sh b/storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene-import.sh deleted file mode 100755 index 488d6c83adc39..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene-import.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh - -base_dir=$(dirname $0) - -if [ 1 != $# -a 2 != $# ]; then - echo "usage: $0 db_path [gene.txt_path]" - exit 1 -fi - -if [ -z $2 ]; then - dictionary_dir=gene95-dictionary - gene_txt=${dictionary_dir}/gene.txt - if [ ! -f $gene_txt ]; then - gene95_tar_gz=gene95.tar.gz - wget -O $gene95_tar_gz \ - http://www.namazu.org/~tsuchiya/sdic/data/gene95.tar.gz - mkdir -p ${dictionary_dir} - tar xvzf ${gene95_tar_gz} -C ${dictionary_dir} - fi -else - gene_txt=$2 -fi - -if cat $gene_txt | ${base_dir}/gene2grn.rb | groonga $1 > /dev/null; then - echo "gene95 data loaded." -fi diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene2grn.rb b/storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene2grn.rb deleted file mode 100755 index c9d9a593b1167..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/gene95/gene2grn.rb +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env ruby - -require "json" - -print(<") - $stderr.puts(" body: <#{raw_body}>") - next - end - puts(",") - print([key, body].to_json) -end -puts -puts("]") diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/dictionary.css b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/dictionary.css deleted file mode 100644 index 72b5a6749b33f..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/dictionary.css +++ /dev/null @@ -1,3 +0,0 @@ -#result { - margin-top: 7em; -} diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_flat_0_aaaaaa_40x100.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_flat_0_aaaaaa_40x100.png deleted file mode 100644 index 5b5dab2ab7b1c50dea9cfe73dc5a269a92d2d4b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 180 zcmeAS@N?(olHy`uVBq!ia0vp^8bF-F!3HG1q!d*FscKIb$B>N1x91EQ4=4yQ7#`R^ z$vje}bP0l+XkK DSH>_4 diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_flat_75_ffffff_40x100.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_flat_75_ffffff_40x100.png deleted file mode 100644 index ac8b229af950c29356abf64a6c4aa894575445f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 178 zcmeAS@N?(olHy`uVBq!ia0vp^8bF-F!3HG1q!d*FsY*{5$B>N1x91EQ4=4yQYz+E8 zPo9&<{J;c_6SHRil>2s{Zw^OT)6@jj2u|u!(plXsM>LJD`vD!n;OXk;vd$@?2>^GI BH@yG= diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_55_fbf9ee_1x400.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_55_fbf9ee_1x400.png deleted file mode 100644 index ad3d6346e00f246102f72f2e026ed0491988b394..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 120 zcmeAS@N?(olHy`uVBq!ia0vp^j6gJjgAK^akKnour0hLi978O6-<~(*I$*%ybaDOn z{W;e!B}_MSUQoPXhYd^Y6RUoS1yepnPx`2Kz)7OXQG!!=-jY=F+d2OOy?#DnJ32>z UEim$g7SJdLPgg&ebxsLQ09~*s;{X5v diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_65_ffffff_1x400.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_65_ffffff_1x400.png deleted file mode 100644 index 42ccba269b6e91bef12ad0fa18be651b5ef0ee68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105 zcmeAS@N?(olHy`uVBq!ia0vp^j6gJjgAK^akKnouqzpV=978O6-=0?FV^9z|eBtf= z|7WztIJ;WT>{+tN>ySr~=F{k$>;_x^_y?afmf9pRKH0)6?eSP?3s5hEr>mdKI;Vst E0O;M1& diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_75_dadada_1x400.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_glass_75_dadada_1x400.png deleted file mode 100644 index 5a46b47cb16631068aee9e0bd61269fc4e95e5cd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 111 zcmeAS@N?(olHy`uVBq!ia0vp^j6gJjgAK^akKnouq|7{B978O6lPf+wIa#m9#>Unb zm^4K~wN3Zq+uP{vDV26o)#~38k_!`W=^oo1w6ixmPC4R1b Tyd6G3lNdZ*{an^LB{Ts5`idse diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_highlight-soft_75_cccccc_1x100.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-bg_highlight-soft_75_cccccc_1x100.png deleted file mode 100644 index 7c9fa6c6edcfcdd3e5b77e6f547b719e6fc66e30..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 101 zcmeAS@N?(olHy`uVBq!ia0vp^j6j^i!3HGVb)pi0l#Zv1V~E7mPmYTG^FX}c% zlGE{DS1Q;~I7-6ze&TN@+F-xsI6sd%SwK#*O5K|pDRZqEy< zJg0Nd8F@!OxqElm`~U#piM22@u@8B<moyKE%ct`B(jysxK+1m?G)UyIFs1t0}L zemGR&?jGaM1YQblj?v&@0iXS#fi-VbR9zLEnHLP?xQ|=%Ihrc7^yPWR!tW$yH!zrw z#I2}_!JnT^(qk)VgJr`NGdPtT^dmQIZc%=6nTAyJDXk+^3}wUOilJuwq>s=T_!9V) zr1)DT6VQ2~rgd@!Jlrte3}}m~j}juCS`J4(d-5+e-3@EzzTJNCE2z)w(kJ90z*QE) zBtnV@4mM>jTrZZ*$01SnGov0&=A-JrX5Ge%Pce1Vj}=5YQqBD^W@n4KmFxxpFK`uH zP;(xKV+6VJ2|g+?_Lct7`uElL<&jzGS8Gfva2+=8A@#V+xsAj9|Dkg)vL5yhX@~B= zN2KZSAUD%QH`x>H+@Ou(D1~Pyv#0nc&$!1kI?IO01yw3jD0@80qvc?T*Nr8?-%rC8 z@5$|WY?Hqp`ixmEkzeJTz_`_wsSRi1%Zivd`#+T{Aib6-rf$}M8sz6v zb6ERbr-SniO2wbOv!M4)nb}6UVzoVZEh5kQWh_5x4rYy3c!871NeaM(_p=4(kbS6U#x<*k8Wg^KHs2ttCz<+pBxQ$Z zQMv;kVm5_fF_vH`Mzrq$Y&6u?j6~ftIV0Yg)Nw7JysIN_ z-_n*K_v1c&D}-1{NbBwS2h#m1y0a5RiEcYil+58$8IDh49bPnzE7R8In6P%V{2IZU z7#clr=V4yyrRe@oXNqbqo^^LvlLE?%8XaI&N(Np90-psU}7kqmbWk zZ;YBwJNnNs$~d!mx9oMGyT( znaBoj0d}gpQ^aRr?6nW)$4god*`@Uh2e+YpS@0(Mw{|z|6ko3NbTvDiCu3YO+)egL z>uW(^ahKFj>iJ-JF!^KhKQyPTznJa;xyHYwxJgr16&Wid_9)-%*mEwo{B_|M9t@S1 zf@T@q?b2Qgl!~_(Roe;fdK)y|XG0;ls;ZbT)w-aOVttk#daQcY7$cpY496H*`m@+L zeP#$&yRbBjFWv}B)|5-1v=(66M_;V1SWv6MHnO}}1=vby&9l+gaP?|pXwp0AFDe#L z&MRJ^*qX6wgxhA_`*o=LGZ>G_NTX%AKHPz4bO^R72ZYK}ale3lffDgM8H!Wrw{B7A z{?c_|dh2J*y8b04c37OmqUw;#;G<* z@nz@dV`;7&^$)e!B}cd5tl0{g(Q>5_7H^@bEJi7;fQ4B$NGZerH#Ae1#8WDTH`iB&) zC6Et3BYY#mcJxh&)b2C^{aLq~psFN)Q1SucCaBaBUr%5PYX{~-q{KGEh)*;n;?75k z=hq%i^I}rd;z-#YyI`8-OfMpWz5kgJE3I!3ean6=UZi!BxG7i(YBk? z02HM7wS0)Wni{dWbQMRtd-A)_Az!t>F;IwWf~!*)-Az4}yryNkz&9)w>ElA80Oc`6 zHo#9H!Y3*Qx9n@Jn)!w6G^hb;e_n8zpIyXCN`JFkPc)^Q?2MsLNFhMgrcZI-<#1ne zjH;KFf?4eAT9mQZ}ZfHLGA#d%s;SZK4p0FwZT2S^{ zQ2BG1xJsbK6?yrHTjJi|5C0u=!|r!?*4FL%y%3q#(d+e>b_2I9!*iI!30}42Ia0bq zUf`Z?LGSEvtz8s``Tg5o_CP(FbR0X$FlE0yCnB7suDPmI2=yOg^*2#cY9o`X z;NY-3VBHZjnVcGS){GZ98{e+lq~O$u6pEcgd0CrnIsWffN1MbCZDH<7c^hv+Z0Ucf0{w zSzi^qKuUHD9Dgp0EAGg@@$zr32dQx>N=ws`MESEsmzgT2&L;?MSTo&ky&!-JR3g~1 zPGTt515X)wr+Bx(G9lWd;@Y3^Vl}50Wb&6-Tiy;HPS0drF`rC}qYq22K4)G#AoD0X zYw$E+Bz@Zr^50MAwu@$?%f9$r4WHH?*2|67&FXFhXBrVFGmg)6?h3^-1?t;UzH0*I zNVf9wQLNLnG2@q>6CGm>&y|lC`iCFfYd}9i%+xkl^5oBJ?<;aneCfcHqJh7Yl5uLS z9Fx-(kMdcNyZejXh22N{mCw_rX1O!cOE&3>e(ZH81PR95wQC37En4O{w;{3q9n1t&;p)D%&Z%Nw$gSPa!nz8Slh7=ko2am)XARwOWw zpsz0~K!s{(dM$NB=(A=kkp>T(*yU6<_dwIx>cH4+LWl282hXa6-EUq>R3t?G2623< z*RwTN%-fgBmD{fu*ejNn)1@KG?Sg*8z3hYtkQJQjB6 zQ|x>wA=o$=O)+nLmgTXW3_6diA;b4EY{*i*R%6dO2EMg z@6g?M3rpbnfB@hOdUeb96=~I?OIA3@BWAGmTwiQ{x5Cqq<8c10L!P zd@Qk^BseTX%$Q7^s}5n%HB|)gKx}H$d8Sb$bBnq9-AglT2dGR2(+I;_fL|R4p$odJ zllfb0NqI)7=^z~qAm1V{(PkpxXsQ#4*NH9yYZ`Vf@)?#ueGgtCmGGY|9U#v|hRdg- zQ%0#cGIfXCd{Y)JB~qykO;KPvHu|5Ck&(Hn%DF~cct@}j+87xhs2ew;fLm5#2+mb| z8{9e*YI(u|gt|{x1G+U=DA3y)9s2w7@cvQ($ZJIA)x$e~5_3LKFV~ASci8W}jF&VeJoPDUy(BB>ExJpck;%;!`0AAo zAcHgcnT8%OX&UW_n|%{2B|<6Wp2MMGvd5`T2KKv;ltt_~H+w00x6+SlAD`{K4!9zx z*1?EpQ%Lwiik){3n{-+YNrT;fH_niD_Ng9|58@m8RsKFVF!6pk@qxa{BH-&8tsim0 zdAQ(GyC^9ane7_KW*#^vMIoeQdpJqmPp%%px3GIftbwESu#+vPyI*YTuJ6+4`z{s? zpkv~0x4c_PFH`-tqafw5)>4AuQ78SkZ!$8}INLK;Egr;2tS18hEO5=t;QDmZ-qu?I zG+=DN`nR72Xto{{bJp||`k}-2G;5#xg8E~xgz22)^_Z;=K|4@(E&5J)SY2of=olcw z5)@L)_Ntcm!*5nEy0M9v0`S33;pO4TN;>4(Z+19p_0>u#e-vE zXCU(6gAvu~I7Cw(xd%0e59MNLw^U37ZDbsBrj%eDCexw8a3G`nTcXVNL6{B7Hj@i& zbVB{;ApEtHk76q08DJ48dSxd$C(;$K6=FpU<~l9pVoT9arW^Vu{%Bcn4`eIpkOVC| z$)AKYG_`ypM{0@BUb3^9lqi_c?ONH|4UJMJWDowMVjacycX7}9g={O7swOB+{;+?; zjBo!9?+nd)ie#x5IbFW-zBOo0c4q@9wGVt5;pNt`=-~Zgcw#*`m($6ibxtZ`H=e=} zF#GZ~5$%AUn};8U#tRem0J(JTR}d4vR(dgK2ML~lZsPhayJ2h1%sD4FVst| zKF)+@`iNzLRjg4=K8@**0=5cE>%?FDc({I^+g9USk<8$&^qD~@%W0i4b|yMG*p4`N zh}I!ltTRI8Ex$+@V{02Br%xq#O?UlhO{r8WsaZnZCZq0MK9%AXU%MDLT;3=0A9(BV z9VxxxJd7jo$hw3q;3o?yBLmA=azBUrd9>-<_ANs0n3?-Ic*6&ytb@H~?0E(*d>T5n z-HiH2jsDf6uWhID%#n>SzOqrFCPDfUcu5QPd?<(=w6pv1BE#nsxS{n!UnC9qAha1< z;3cpZ9A-e$+Y)%b;w@!!YRA9p%Kf9IHGGg^{+p`mh;q8i7}&e@V3EQaMsItEMS&=X plT@$;k0WcB_jb;cn%_Idz4HO$QU*abf4}+wi?e96N>fbq{{i|W0@(ln diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_2e83ff_256x240.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_2e83ff_256x240.png deleted file mode 100644 index 09d1cdc856c292c4ab6dd818c7543ac0828bd616..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4369 zcmd^?`8O2)_s3@pGmLE*`#M>&Z`mr_kcu#tBo!IbqU=l7VaSrbQrTh%5m}S08Obh0 zGL{*mi8RK}U~J#s@6Y%1S9~7lb?$xLU+y{go_o*h`AW1wUF3v{Kmh;%r@5J_9RL9Q zdj+hqg8o{9`K7(TZrR4t{=9O`!T-(~c=yEWZ{eswJJe->5bP8)t4;f(Y*i_HU*sLM z2=7-8guZ}@*(HhVC)Mqgr$3T8?#a(hu& z?Kzuw!O%PM>AicSW`_U(cbvJYv3{HfpIP~Q>@$^c588E$vv)V2c|Mr% zuFO$+I~Hg@u}wPm17n%}j1Y+Pbu!bt?iPkjGAo7>9eRN0FZz3X2_QZj+V!}+*8oBQ z_=iI^_TCA;Ea2tPmRNOeX3+VM>KL;o1(h`c@`6Ah`vdH<&+$yTg)jGWW72T}6J`kUAv?2CgyV zrs0y@Fpvpj@kWVE0TzL@Cy#qHn~kgensb{hIm6J&I8hkoNHOz6o1QQ3QM4NZyu?;= zLd>`wPT*uGr+6vAxYv3k8{gMDR>tO}UavDKzzyi6hvbuP=XQ4Y|A)r4#B$U(q7{1Z z0iLeSjo3;T*diS*me%4|!s23l@>R}rn@#Zc{<%CFt;?gd5S<)b=8Yz32U zBBLprntW3RE3f|uNX5Aw|I(IlJjW-Byd?QFFRk%hLU}O*YyYQel}WcXilLMJp9cB4 z)E?D+*Y4zai&XY!>niMfTW-2pp-^KFT93%Leig@uoQGPYRCva-`w#orm`is`p8b4s zxD462;f*^XO$=3by=VzN9i@xxr<1w=pcxl!$!fjWt|fYmq1@@badT?v`d zIi$|e$Ji}FXsiVYf)?pN1R0LBw;+)B5aUJj2fP+=m;=_Eho84g%Jq#@MLPSQEX*@T z6sZb)m?)zby>{j1)(;rRML|gKSs+9jorf-XhQJ2Jyt5Cqc*`S3iX@A5C3jvgAns|4 z*|)YQ%Kmsj+YZ53;nMqh|AFvehUV-9R;1ZZ;w5r9l}8hjSw@#k;>)$P*r%)=Extyu zB!$Kd-F?*50aJ2;TNTR-fc8B{KAq3!vW{g$LlGPfGW+%#CXU zJDcMsvyT2`x~v>>w8@yssoA`KuIZ98CLU{Ia%*nW3G4t}@ApsbC@o^WCqL>OXx>Y^ zSuVWEQ;3=A=@RxCnt0>G@#(VWBQ`0$qTwA#e>SX{_N~JWGsBxFHCw|5|?CzDi>92F-^=b*8sMXnhUJdb!>yGD2nhN@{582 zRPcxuDzs&;8De)>_J19z{0xppXQop#T_5ejGCKv@l>$O#DA-@X{y_1B-AsiU)H}DR z3xDZ8G`amV_WmA&8!W=@jgm|%bnwH%qkg(@J$hLaSV zC-rXIFMM%y<|Gb)o?j zpe-`dJ*N5tC-iH)d0CgLdBsw*C!ST9hY1EkI|Y(&=p&dH&q;a&7HXa5#_wtMsenQL zcpyhwx)Ppw@XmVz?P)DI#^ee1oC!i`>>Jq1ESk-OuQ(Pbv=s{A0AjM@rw#FaU;RUh z*At0{U*NtGVY_-JcuG$?zuuf%ZBTWxKU2yf?iN#-MRWs>A*2;p0G1Tp3d29u5RbnY zDOON-G|PidOOGeybnbzu7UVv71l!b=w7eU5l*{EdKuoKu`#LZ}|fnUr-+lSST9(MTT`0tqOG z#+Q_=lXe-=;rE4u8s~;%i~~ z8v&&+VPeXG=2zw9B5sR$e?R(n%nf?p-(BCZ8}x!_-9T+LT;2=Zu?Wv)j3#>35$6dR z4*7xmI)#06qjh#sXvX(%`#D1mD8fn1G~I;l%Dk{pw)}>_{+3^Fv_q)>2#de5qGCId zPz?ix-3954nM&u@vaw{o%-#HU%_bLJMO#@enR^&B{3ihWdoU6%pBJ`o>im+b-c6r-;c{vd0Z_)`75$jApy2?!9G4_FGa)iZ~9`6VELiYM+n!-mUfvfm{jt zC?!1=%pxJhF>vyQ47Q}R;O48pxgMs)rz$SbM&jkp<6X$r4DHWg>ZnGB-$r2o1*nL# zW0^*itcRY_^Uv^XgQP>W#>KQgM~l{;S(GkVW@&vld^AhWzG^m|9#0#USbM>^en{k2 za8~DTL`(Q~=ofsL&Fc`!L6r~qTnnGo8r98<(aG*<0%aNEr!!BIyY>VV82kxhR%d>V(lN&#BId#urK_i~Pe6?>C~J!pU_lRon#&S_cXoQv;poG8FK4atc

N)npz1~X%p6x{M(Gw!!H=!}lmO0Xr*8ewyH(Q+>oy`fxQkxJ zzzB$)%*xM4s_2(O>)T-QXhwP|&DZam#{O+47q|WKfz_ZL-MypRN~o{fE*I#6@eM?I zs%f-6{Lz6j7rB#U$%O$~TIT!j?|Ip1CpSmb=JA9qCY3-mQf|fVCxswPjok|VofUEP zW5^pTd5B;wRkyW%1a;nYHB$ef6Pv8^);`m0jv6p72iNJl+sVBqZugsq6cq_pyNREi z>GN!h6ZQ6`aOMr_2KI@j=XR@$aJj(2jcpY?>f=2kMV@di5W7Swj?ug10zRe}F1nR* ztMm6+T^)LJe^SzGgSxahQajq0h7#|8oMV0>D~*N}jl?9_X`ka42R4@rryDc3o(c$R?1*!1O9zleSOczw zYPS3~xbJ$~C(3+D7Zkrfjs_lneY^zv^kHmxt)aqZ!aeGABHZ`gvA&K`72z}ihI$Ht z9V&)wQy0g@R9irwbf!{uE&_J2l9jXz^Vj#=qA77*3Pd9OjrE_tKDHADd!AjFQv(ji zct-BMUt9()1Ox!dsI_h1(^F_U)_QJrx|%+y`zWWlD4=Nd?JQ=URh0*{fb1!o4tS(H z^r_T(8t1SAHf1oduG+X^*EC_kL(!QnXL6Hp);449yO&1xE>MXGqT)t10lzvALllX;;Q)RiJX$dm zlR8ep5-GdHmRm9?N#QCjNUA);vC03Gw6yds6^?c4;(MH>;O5xmQ2nGK3Dmk8i*v5t z-{jJsQq30%z}0`g7SN-yN`l-`@6rkJ|V|>18`MV zwUeH}DxWw&h+A+Dn|4|YNr&EfKS`Hz_NkeW3*sI5Rq-J&FzG=!{-K`n65#7O%^&f> z`PkqxyC_K)>781~7H${^Nj{`>XEa&OPqqQhySR5%w2{5+sEakXXHazJp6~LP2QKDx zpkvZrkDOa+A4BbqqX6ls&O)5-Q7`qkZ_?6~c-wQ9tseNtET;nhEOL^`*naKwcMX;R zbto&a;oTR0s;vjfj3wigUg)Sj)!OHQfZoJwAsWYI1A4ntz>X=W4s|y?tUk1r=>#Ct zf+?hq^>rQ3$KNboG$UhCdEmp{qAR13DK$f0ES7kAG~7q+g!jfVq`1b5+c62N^0%~o zKw91o@Wv;0EW*7fINAX3O~L-V{`;xB0q()#^HKZOlLrXVL*Dtw-$SUp8*_J{r( zW`6r`cz0yZQ#f0#*y+m64{bs7GP|2V$phf42rswJB?s@9qf;Bfc^pm-ZS#^5dkG{u zzv;l&B$NYcegSqAnjnPN1?17VUQbPummcWry((85IFB(pFQNGN{hhN$Fv?~l_fr?| z9=%dK(+;kZ(8=mwptjwC-ikBD$Z{l2++~*8wq5ynF<+PNlZI7ba5V#fg~L}kE;UH5 zJ;{P(`G{tNl&z5rUiH~e{I>GT8~9&*(J;Myx9z5P!db!F8RTII^I7c)HU=ss*bYB` zgwiIMZ_q>KEC$4lFm+Afvu6^$X1jm1rB*4H)-EIO5Rvz_p24?OkJ zovD4{-1KA6*oL?a;3qR7GZRB!cE5oAdA#M@{w+fGgsJ-lSmQ^-?8E&Q%tbmjd=@gZ z(}Mg*jsDf6Z)|7s%@9pc-tuw5W&zqUXjv2bVkC%-X?O3F72W4EsIl#1e>Mdz=X4k*_>VxCu_2?jjg16N*5fwC-36OW&;Sz}@jMn}hgJdEd pO;bST+>R{W-aENZYk%(=^(_R5N$LmL{Qc?!%+I4tt4z=_{|902Wu5>4 diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_454545_256x240.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_454545_256x240.png deleted file mode 100644 index 59bd45b907c4fd965697774ce8c5fc6b2fd9c105..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4369 zcmd^?`8O2)_s3^p#%>toqJ#RmwV2==ic*rz7lOw=eaq=H~;_ux21)-Jpcgw zdj+hrf&W^f<%Qk9Zpqf#;jH;N^Z%VA?R|9mZ{esQd(2F=?y+!`XZ5CR?ue=UdHIfUDFM*m15I;g=VN2jw zQW9?wOhDI#+P0|`@JQoC3!pu=AzGMtYB>V&?8(2>_B5_p`1Sb1t{^|J%bZYv09RS? zQ*dcs7}$)taJ@vX0E<96P{ur)Eygr{&ALyNoMP%_94m}=qFVT)&CeG1DBBMLUSKP^ zp%%Q3$MEtKll)X*+$)3O_3x`4%cHY0uhy7U;5x^Ir}X1)mv&B%|A)@A$a>f}tP{5X z9-gkti`YyT+hk9)cZW7fAQhjT%$XLLI^&VR=qev36;`WGBOP!^&(?!sK6jSH0Dnz4 zoEMMNu}y&n=rd-GWI?rGBI8!GD*NJ$k&e5-6+~-9F^6tV<=5`FcY~t{iqRcncEU+F zkT~jww!oy(@~b~WGI8!lzjURX&IpJjFGxShOKUunP+rW$I{c|x0qM6!Gxf6n(;$D> z+QYiULqq)Fy4VDk&Mev)NyM@nvF z7O6M*A$C)kBi0HGMT_+xfQ^USTM)>*h_Rx%eSRxA%n|FuC&=F=Pz}E5uCqbcy;7j=%Qh`glqEA-jx0(a<)uKO5Fe|JLD-ndZ-vnW`G=O&^%pa}Ah(2%m?oANs{lJ`?RhrZ8n!`Q97TKw{YAw9 zD)=M{mD(~_jj`LTd%q6Veum)Cnd!7lw}(5h%ubHcg^2O`prn%u9es3C#&%TsnmSD3%3Ik^Yd@6-d%(I7kqT(B@dVX2 zIidXgd>qYT-oTZ=1sGI7^*_E9Q)1F2mooE0R zXopPnh^ci@+wz2ZDjo&Owyxh6t90Gt!u0miLxc!bue^LvHF?)O@Yf!dQUXfW$u8(f_n07^N)-vpIe;TrHv5uKm{h_v`-IN^zwWc>Lk ziGsSr89sDcdOR_wa~DjrqV&Nd*$18(vohPJ3hSzEJPF2d!u}415wrSMtS(zNa7 zbO0G4ajgKNp{`D7DO<(T?wowarQ0dIKLb<}#prQM)ytB73YNTPQgX^xoT zm>;yKSJ*c@QfD8HW`6&+mowOaA|A&~G0fO6&xwj;E3O9^Zu~ZXts~;-d%FyyeXrijORi<_S(dw_5@h&-fTY?#FJo% zQZZ1&ED%$if+n8JVM{s-ZoK@P>p@z4s`AoI6hYxE!Ie_Y)cpjZjc8@~uNMYVfy#J$ z)+sdEX7DK^{}kUAST8U6^p6#c>0Lc>T~9`0}`*2 zizaU)TFS4(u;BenUWZr?s{D)Z)rc9L5&gUvz3iSQaF#J)D)Ts{YgagdDcI1S`dtes zPqb4|h-RIkjhnpmn(Q2Je6Di5C?MkCUL)!WoKn|P#al41v#-Q8`K1$Gh64UhPQj|T zaZb%tJ}O{A?Cvl26!jeKS3OUkp5@8RDBYwh`Loxb5W<^m*R37+v}#*m-G{{ocF-#r z7!k3ZS^4Qu9sNRNZ3`laW2TqV{rsR#~gtVp6C zL0?}~gbLTv^jqtPQD@Cpq6{B6v&*Y)?tx})z=qQNB4Z_59 zpI2L)xQ`!|J8wWgs82jSw_8(;#}y7~Y^&hY9P1G)@`CGtIi*tZ%-%&;$PuG(!M%)E zQ?T#imBH8dCZxUBX^RWPwIh9LcnL3#$befQDr@UJl{=}o0){qIt52vU9X=3L_gvVW zPqp_YhhpM6XiE7Lvn-G0Wzo>0;g|$_-7|ucz~*w%bW@hr6M?~v9dT}L=>UotTj13& z?Uvt0_uOvzMq4iG6)gZqeU;W=P@EVod;}Vr7P*@=C19v;iz$4N+c5ewauTtKK5e;yIx(FQUec0 z`G)VlTUY|m2L=KusMRgMlapu#wt8MohK3=y`!J`tD6nYd%?xIZO`Q)skL)R%3Vf(P z__5Sx3h%fKF=sNdZo2p(w=_|}1M%ri7fO?8))sU1ySG;M4p4;zrr}4l0lzvA!WQ&a zrwX>%lJkv`Gr_u=K>kHOg6(AB(R3FOryElY)-vi|fRsBS<)$1;TC_?BnyScjY6>_ZD=T|bjcbjz@D6V+yfHd4SU+J*2Dh%n;$5ou zHh6R=)$>IH@%5js2KH#JkfFCVI}P>~U;|}>kk|06tA}^~B;|gJ$UvSF-l4GX43DAR z&M2mp8OgiTaK4li0|Q2qmGNYsm+Qq^JM8yfCP>5!31rjh4Mnq~+5X8+_$scfP1Fp!c zcQO*#6cfJ?ZRxn_$Se_|}Xo1oIF7s(7CllypCW@W8-y5%Bel_K*0G zd~8UWeYCWz>~^hF3ond|tQcClJ(8^9FW&&?U)a4O-pE;Y*u|FHGax>F*Kg_beOF5c z&?#xRN5Q?ckEwCnNr-${XC=w-te5%QH(6O~yxke=R!_ns))PU07Pu)CY`<>$+XicZ zCI=g^;q7NZnw=-vf;HoWLD+}`&Bph>kiqyX5jxjI1A41d$R3nahq@CHULV#9ItIwJ z0)^JGy{hB;@SD|}Zel8~2z;UjN96MR@dt;EV`9RP4X&zn8ib=n*107cICSp7z6srZ~4Qg|Vp$OB0By{IxAPaD7HGFw_HTza~wWN1A6 z3`7BZFse2a4{y#V^&;nRVcZOz*2>A?jm$%?)KawLR0cEz24qxxOOo9_2)9MrWpSg7 zPiPz+M7(zPRZ3$#11ti?uI!}bM!Dg%L#+uR+^2L2RX+QlMpL zg_DrR=GIT7C~b+^OZK)?l7*9c-78zWVbLo1oS}bItdscuF80}guwA8c^(47DfaBjV z^V@&JJHxYHqS+e7&X;ezZwsE2+t~n0?*m^(db@WnI{LgAnOqOa<8pRvo0E>*O&~J_ z&A)t2LOG)5=3$3n2_gi2Kpvgv)#LCUh2Y~ z!A&(~-8reT$sJk0=L;m~ES3k}k% zkF%gzzT(+nRU0IeUvuW8pq=8uzr&7HW>K5ZiD*8qL17AI^ zGqo>*mvIChU6+&t{A3|!W?~pi9_O$>k2d|#(Z721wcT{S1)_UFZ+}QS^KZ*u?5Y~bz z^cLI;2{$C_ZwWqM@sYMYwG+^N<^Ivq8ZOwV;7xT+WCh)I9PHC}ut;VNr?w z<@?HsG!Qg3zaV+-xQ3ldtad!U<6iGz_enGH*2akP_r)o1D&8p^5M)_c8IIj6Wy*7HJo&CBLuo~nj>(63pZzO(Vv^ZuB3 zMYigjkwA;FEy|G}1jpiMj6|NTm7Uyiw=@FDE*nX<>jR!W@9XIyf%$Fd*J5*D0Z0Lm z9}ZQxyT|x5ftNy?V>EbJz-K>bV9gs9RaXUP<^=;e?&Fqxj;6{ieR-a-@HycA1KMKhql8GOmcxwZ?_-(3hMK^^a*(gaFvBH ziIC!fgH4$W*NbKIaY&T?%&13``KbD@S-0`xQ%v3TV+B!;RC7O!+1a9QCA$H@3tR;k z)SSoR7(s4)f{zM}eWgFN{(ZH5d1O}l)f$ruT!)Q&NImXyZsTzOf9TwctcSfr+M)aJ z5otO+$jvm-P4)ykH)x|cO5xeb>?!`qGw$(>&axqLL6yoB${vsMXgL_-bz@2J_tS92 zdvZG-+vKl@K4Vr(EL{WQt@Z+Ea-hxX0}nTSZxnpi^#Kn8Ox8FgIS|hc}KJQ4tm*HO16ui{(O9} z1YN)GjiQt6fGq`Cj+^`zUf?8hk^(T{{cOQGWFP98am}is28A!5%{R#ENv8fCN!j69 zlMEK(2z?|BY=Je$XD9mB-Kkem*(d-j^9j$2#6r$Dz?s)-TCDCGCs z8>6Pvj{Y+YIeFA@qY22V$)awy@q!9A4rgk5b9TcC;s9Ig^G|6nDP+5=Fzg&?(L=vc zCbGd>fSu~@6!94td+o#d@sid!EIX$rx7*cawe6 z`dScJ+$HssdOjE)O#Ybs56vm-FQ$7yuJJD^Zqk%hMaIgAJ<2yb_MFQte_i;62ScT$ zpjifYyR_E=rQ+>H)pmlr-Udzg*-!|ssw(D7wJvC+Sf8bb9;;q8#z?0p!!bsd{wy|5 zpBaMHE-Ve>i#LLjHRaMLtp%9&(HCng7Sw96jVv!#0k%?F^K7&=T)mnYn)D9(i;4x5 z^NJTJwq~pv;kH@#ejTd*48~(J(r6j34|m`h9fEDj0im)~+%I5XphWymhT;_Zty|Q& zzjPg#-ufAHZ1M*Gccw?Kf|8Pnhtb0`!{N`Bqsa37J+>wC$!e z00k+2Egzz;rbcWoUB%Jvp8W1}$XD%e3>4y;;OZ1ccT-O#uW6Ys@C}Pa`nZrNKzR(2 z4e%3)@QI4SE&E!lW`5y14QhbepBG%_XBV-O(%5tj)@9#|;sC-MNev!zGDHk}JdpGC`iJF#8=8-P$Xoku_=Dw%Cv3{U7L>gf zRQ?<$t`cZ*MP5GQmbmx#!+*!zu>0MewRO9GFGS{b^m_fJ-N0?j@EqoFf>$khj+E|@ z7r3We&^tR^YZrxKe*d22agXqCO0l44&kqCv{u)T|(lv`~PK@DvE z{QI_TlCH5z*gR!>LO)k67{^R+vWx24U2^2ODXpwT;6y+6+$5m)_*w4WY&#do9dCeE z)>p+Ykdhq($DhmMiaYXey!@N%L26uz($aJ!QT{B^Wu}U$^9e#5)=c+XF9@Ill?ZmM zlNgHiz*9!vDc&uxOo;ZVxb`Q!Sk0*gnfxWzmbZh4(=%CD%qP?0=);n$&zaW_$UKV9 z8axdcN#AyZ{P)wj?V{P}vM)YY!>6@}^>U+iv$`9>nMTCPjN>z%yF&3yf%>+T@0vh4 zlC8Xa6zeo?%=o3}M8{aebLHcO{^1Ar8qiM=Gquf?Jo)q5`-+?sUpg?QXyEUpWSm+n z$K-UyqkIwHLquru~o(OF)hhz$Y*|X>ZIbswnxRvr~ z2=rdOGVuD|xRlpAZE<0!X1F(%Anpl^@V^D3vbM}qxe|NI;TTiZy7(IM;R69RkA>a& z6gwYE2sREzQ_LHmWqB+ogMk(fMaSFeoDq-!HkFB_nXt5+2ncFuk9BQL1I&oB1zZi) zYW{6_&-Ip1l*OVRA##1ILQS;5R{-K^0wGTiJbVSi@LA^$D$;@J>^G{6@&+%4{b3(s zC~LEHiTv(0b#zxt?YJ0r_~pUZM~mQ(??(n#>&tD%+@nq=Abj5*8R!~Ul1`G~=qFJ4 zfl|m8ZDCYgtr`4LcOpgiJYX9qRY5;DcWti~PmS$VB$E-Zt^f4)vLDOe_3XTq5^ylW zJ9PKm!V-8sAOJXnUfuFNIf0R9tK-pNs2hO04zr620}5B(Ok>yB)Of-3sP59qfQNbm zA4{w!2@cB;GbR(~szVrbO%(w=5S!X`o@o@x++wbN_tMPT0Vc)*I;Fgsbf^*g0 z2Di?HTApwKq3+YwfNsqd3iP%{hyK1iyuVZc@*0tO_3+N0#GFsz>8MjeJ2UJ%L!%hi zGYYAthH`E+ywA*u{(eJ=ia3h*%k?779rk-K<0VZAPkl;TFUbmei|$fqWO8!_zIvqt z$ly$VrlH46nnpX~X5Yk0iBJl;=WuA4>~X4-f&K0yWf42h&0b30t@NYX$7egQ1Fp!a zbui-D6cWCWV&|R1CY@G8(qOmWjWeX3eX7UggZPGimA}soOuQdXe4uZ#2>5zN>qlI0 z9xk}lE=tNpX1m6*nFr2EQ3xs79!^sCldDJYE$m(qYv3q7>}1R7?iZW7>$~*%zKaC| z=$N?ME$>#+%T&MZC`dW1wUl6Z)JgyCn~V%K&i0H|iwE%$>xsZW3tTfZxIUePci@p;cRu|d=ItIwF z1clVHy{hH?@SD|(Zfqi^0DQ1hczHN7xq85h)rzQqLHMX2^IkuK7FB!kI40s$|CY7~ zNX^{_UjN8}L%Med;|+=4RNTMozn8KT;2tb77bUPCmioh+rZBfIiM6f_P34cQ__o1G zWqQp3VL~~pE5?qODf%iiQQ3f42YF@09tQ*$4v_EKUx;t1KCPCBtgqg z@+Tn;O)a0uky_%jm+WjNB?=~VyH>V#L!*=l*@OS6SVyt_UEH&NA=?V2stHPyKkVNy z&jg<#cjros){#ji)dK z%)We0L_478=HZ8-@xnwsKrWs8)x`MB;(Y`Cmu2c-&SH(vN-F(*e`l?c%+l$|y_AJJ zhcDGnwLvN+bu;_sX|1AiePhx@u&%P$hf*xE+O=~D?_(_KGWQ!158YL-y9$*6mmPo;Rp*Dl5lm-mVM2i`h- zM@nxv590_tvMwPD_{l=b$iOm|+|S{D9&P%zeT$GgX6Akl-tfUF>tL@Ld!B&{pN39t zH>3Vhqkr}2Yul+jb7UiouWVGPNsxX7Ueba+9|~dz?d*QM$ng0DZfO0`7fAy?2yMm| zcnRzUhZ&IcwgjH9cuU!w+VStYa{p*)4IgBf|E8)sqMYtB2KH_}SfsFq(c9i(Q6S3U oBo%DI*Kv;w;*%(i9W@f3_WCF#rGn diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_cd0a0a_256x240.png b/storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/images/ui-icons_cd0a0a_256x240.png deleted file mode 100644 index 2ab019b73ec11a485fa09378f3a0e155194f6a5d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4369 zcmd^?`8O2)_s3@pGmLE*`#M>&Z`mr_kcwz5Nh&gy7G+@45H9p05OJ)J0CH2owMSaGIN$+5!N; z<11j56?ANg=9hMl-IBGX-T8hf$N$b*H?$f4Xt&I`oABt1nR=k%#z{{*a!Axm|t}hCz zJg0Ln7;M4Zjx{$mwhMW+kWN;|j>qTx_-zNX!GzqEZRa}QF8_0yk6+=w}$QD^&hM4%OkT=uh$q9;5u~NL-I+NQyaVc|3l+iWI5~|(hA-G z08i8AMr@{uY_cWTxo^y|Qyb33mlZLvc7H2Zm~>mB7&=-1X^@|D z&0*~i?GBE&NM(Pv&Vt^zWu_bD3e|R?wTL{cSFwD^Ij9v%g=aLY@1U2Bxn#Te*{>%D zOOW-O-bfnJ7T8jd<*>8`Z2DsFQi~S$%^npJwXam5>>p zMd}QEjM)@~##n$LXpz1Hkl|2UGXi-JFFePXBWL+-5f%!S>L#KL3>Vl0w#d^21Jn<~_7q zWx^Xg1(>PsPGO&cu{S;(pRQ;=Vw2J<9NdQVWx<+g-`ia=Q@puS)75M+?u>DTa95e9 zt#1T?#a)uWC>Mia!K6>g|InPW{&Kp9$tC_3*;R_Xsz6^Eu|xW1$6j#0?XLs7^l+%O zlxddE)h^|=K(2UqS*0ECuDe0ic|H_^t*VOoTCKx0Qmn_^LyJ|b8l$Jvl3{2=3x8&7 z$1ik&YG>w#@x@y~$r`fhlUDo;yXecc6$`30m`3K8s{k8G&3RVp8n#|l6h(Xw`Axw9 z%6Y^J6k0P@4YAuSd%q7=eg)&u8EMoEmq$CWj1GY|rGQWw3ida!FHk&wCqrQh_0Bcw z!ZBS3CbxgZ+}~wzgGIQ#QId%T_TE~_qdUqxjqS#8#jPxdwO@(@-5_nSP&uT?aGYYD z6km36K9=gjUjImwO=5Hl#u85VF?r0HbW)#h^SR|s_L47Tl$&Z&Rz*ksl!t*(2O2;D z+8`6$qpLn}LchhCmv*X}moGMX5?F@juGeHQAddAn}0~r zS_0|d3*0v%Y)8+8K{ zGyoYPb|W9Grm9M4E?vb^@16ePbI4omZv+(NoZ##fLUmKlB(G_jEbtDCM*27t$v`JovAZa+%*Q5dDXF*Ftt*n!O>#ohCM4lZ)h5rdKV-3A za}2AO6@!`W>ROk5FN*>2Zza^Z%}8KT%*jBGH|rml2X1LR{wZhWx8V4>|5i}; zMnLIHn3!^)`87GYh}&Y`KMwyLbA#^pch}Z!`@P_qH&N^LS9SxpEy8mc!wFusq&Z@` zeO}<6PC@VNaII|=n(^cNUiLseig*$;NjG7;IwvfYCBN>kzv@v-V2eBQZ@oIs^)NLqMR935k|1}U;5<{s(Ebdj4r`?QtrrAPfQooq zmPs_(YTy|??+nitNIFDoR7~qLPPFFCf^_~8OUt{#!|9o*3Q{!@9ZAI$7O~piD!;WX8#v&RxNH27i59$`1{o zEYU_zE{bKEI%f3BbE0Fc;f2!4LjUlC`wgh4@R{1?O78r5t$hWKiLV{#QWWq{QZiPx zm3?x$;&DDRVt0SByRiFczw$-e)GSvpCRbzk^=E zz=(+LjEc{Ps_2(OYg=G(93!oS=IeJ|WA8STv+LgI*Oj1c-QC06N~mvJ&KKx{arGp5 zswvJ6{%BvBYo>#2$%O$~TITuh?Rr^jCpAUXh)}m74`O|aOU>w2KI`k<#efwa5=-l4Xx!o>Z9Evg`RLN5W7SQp3$@D3_hY4EV!0( ztMm6>zBcgY{RvHZ{9Ey&&)jr2B4s0qDPBUh1ITaAp&>rj3ng*B=VGXz* zs@eR<;J(XkpD6Q1U3}#FR)wlafiFMU(-=&e9(eQ`isrS-9aNwJ)7frS8RiXM4*SbC zL|4*c?h^jfYvSOpn%Z$W?C|TuZ;uy2pFWHXuGW`ZkGV&kPJsKqJJQ!NswAE!!cb2k zumi=AE$YIkm})cVlg>nn&PBjBRI*@mfhhRMsa5U8k#A!ztfiw)d7I_UyAif8$5sJ9a7WUv5!o%fL z(J7-8EQzv1YIc)BNeWkLK~m%y4vqe&q@|_ZR5;eC3-9rkf*T{_19jtuWKhdW4Bn|~ zZ-YyFLN!k)0AKg{dO)|v3K?=oy+dzb4%T1F4}JsByncB1Z(`2p@O0!E!JQelouN^* z%Q^YfQUh66D$Zx-RDZvLctsr9`_+1p#tz&4SMd@i_-8()tyg3OyhU~?Gt#-a{NKFN z0VGf+AH%@o6;-_*?$$T4QX-f_>Ny-5CV8Ccq+@>gNSeovbFr0@b}RiTcJbLx>ws&r zsvY!rR{4al#MpVKut~?&kTmF>_v3UaC!gvuxgg%5-{l{20}~&F6CUarF9N=u)BG71 zoQDlAwT+T=mfo&$Xy%4-kmW;4wuh6{{ABClybHV6L>t&k4?9_Ny8A_^?)ff#dEjhL z2RbC~cFVbz^fJ`$I0%prYc0g-9(7X3eUp}^#Mzv)Z1EsGW;qr3cY$+e2HU5d_O9L% zpbljP*1!A0PqpzNo3W&y(hD87qgweq5YQWYEkxrOuSain2-q@Z*P`x*ht-9)Fr5Ho zSTKduvc9h6`S^#$i)LgjDi3_PQ+RbaGP!!di^Y;4kB0lGo$y{if)rJIaXTbpRgO#B z1El6|18;s}$0FRjgK-7~ZwmI`_1{a`32+Y>&O_iTpm%vz6hNkjGR(#*! zpfJ2>OAQbTFba9S3j9BlRHXaG{)Zt(J<3ppA?}j+7F#{bV{M7zU)5e@~R&J_xf$+GKK~ z3{R;Y9fZGe^ifEqKL;!VMXv26=R~^TG(#*2!JKCWoo&c^$utAs#Gfq-?t!c&9TH5- zj&i5L4NWbdNs*djvsY}bC&ddUbh=iyc0;3-@Y#d^s8|Ql{ax(yenFcG#i|K%lRxy| zFys4w!@EPXp2AsbMUGc*eP|7uliAq-O6~(+MR>V(EZTd&9G+MY&gF2lZ=I8j*o`OC z`AxrmOGMeD=H_9Cq47clT|h34>-EI=%;E!my;o&wU(aKV&PymBzrV9q2uA62XS@JrjKYANZAU>;8mag#BU?Nv`+ZVhlAPV`HF_gKY_O zhbV2L`8qvR&f=@M5vH~geD+L&*L2s<)|5)clA0yt9TM{X)iWtx@wJO_!{vR#|AD6t z*OAg2&P_i8jjW5y0DdtOGcqvrCHD*1Uq_q1ZQmngPnf!2fHizH%sSX>#$2Rh!>1ur z+s(*-)abDuePc6~XNG8m@|KMXHVM#G4?~+V z1z!An!D0GD-7WqXE8ddUXLkI%u01$fTEhhy - - - - - -groonga dictionary search - - - - - - -

"); - $.each(items, - function(i, val) { - results.append($("
") - .append($("") - .text(val[0]) - .click(function() { - $(".search").val($(this).text()); - $("#search").submit(); - }))); - results.append($("
") - .append($("").text(val[1])) - .append($("").text(val[2])) - ); - }); - $("#result") - .empty() - .append(results); - }; - - var request_index = 0; - var columns = "_key,gene95_desc,edict_desc"; - var xhr; - function source(request, response) { - function onSuccess(data, status) { - if (this.autocomplete_request != request_index) { - return; - } - var completions = data[1]["complete"]; - var items = []; - if (completions && completions.length > 2) { - completions.shift(); - completions.shift(); - $.each(completions, - function(i, item) { - var key = item[0]; - items.push(key); - if (items.length >= 3) { - return false; - } - return true; - }); - } - if (completions.length > 0) { - displayItems(completions); - } - response(items); - } - - function onError() { - if (this.autocomplete_request != request_index) { - return; - } - response([]); - } - - if (xhr) { - xhr.abort(); - } - xhr = $.ajax(url, - { - data: { - query: request.term, - types: 'complete', - table: 'item_dictionary', - column: 'kana', - limit: 25, - output_columns: columns, - frequency_threshold: 1, - prefix_search: "yes" - }, - dataType: "jsonp", - autocomplete_request: ++request_index, - success: onSuccess, - error: onError - }); - }; - - return source; -} diff --git a/storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-1.7.2.js b/storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-1.7.2.js deleted file mode 100644 index 75ce261777253..0000000000000 --- a/storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-1.7.2.js +++ /dev/null @@ -1,9404 +0,0 @@ -/*! - * jQuery JavaScript Library v1.7.2 - * http://jquery.com/ - * - * Copyright 2011, John Resig - * Dual licensed under the MIT or GPL Version 2 licenses. - * http://jquery.org/license - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * Copyright 2011, The Dojo Foundation - * Released under the MIT, BSD, and GPL Licenses. - * - * Date: Wed Mar 21 12:46:34 2012 -0700 - */ -(function( window, undefined ) { - -// Use the correct document accordingly with window argument (sandbox) -var document = window.document, - navigator = window.navigator, - location = window.location; -var jQuery = (function() { - -// Define a local copy of jQuery -var jQuery = function( selector, context ) { - // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context, rootjQuery ); - }, - - // Map over jQuery in case of overwrite - _jQuery = window.jQuery, - - // Map over the $ in case of overwrite - _$ = window.$, - - // A central reference to the root jQuery(document) - rootjQuery, - - // A simple way to check for HTML strings or ID strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - quickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/, - - // Check if a string has a non-whitespace character in it - rnotwhite = /\S/, - - // Used for trimming whitespace - trimLeft = /^\s+/, - trimRight = /\s+$/, - - // Match a standalone tag - rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>)?$/, - - // JSON RegExp - rvalidchars = /^[\],:{}\s]*$/, - rvalidescape = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, - rvalidtokens = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, - rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g, - - // Useragent RegExp - rwebkit = /(webkit)[ \/]([\w.]+)/, - ropera = /(opera)(?:.*version)?[ \/]([\w.]+)/, - rmsie = /(msie) ([\w.]+)/, - rmozilla = /(mozilla)(?:.*? rv:([\w.]+))?/, - - // Matches dashed string for camelizing - rdashAlpha = /-([a-z]|[0-9])/ig, - rmsPrefix = /^-ms-/, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return ( letter + "" ).toUpperCase(); - }, - - // Keep a UserAgent string for use with jQuery.browser - userAgent = navigator.userAgent, - - // For matching the engine and version of the browser - browserMatch, - - // The deferred used on DOM ready - readyList, - - // The ready event handler - DOMContentLoaded, - - // Save a reference to some core methods - toString = Object.prototype.toString, - hasOwn = Object.prototype.hasOwnProperty, - push = Array.prototype.push, - slice = Array.prototype.slice, - trim = String.prototype.trim, - indexOf = Array.prototype.indexOf, - - // [[Class]] -> type pairs - class2type = {}; - -jQuery.fn = jQuery.prototype = { - constructor: jQuery, - init: function( selector, context, rootjQuery ) { - var match, elem, ret, doc; - - // Handle $(""), $(null), or $(undefined) - if ( !selector ) { - return this; - } - - // Handle $(DOMElement) - if ( selector.nodeType ) { - this.context = this[0] = selector; - this.length = 1; - return this; - } - - // The body element only exists once, optimize finding it - if ( selector === "body" && !context && document.body ) { - this.context = document; - this[0] = document.body; - this.selector = selector; - this.length = 1; - return this; - } - - // Handle HTML strings - if ( typeof selector === "string" ) { - // Are we dealing with HTML string or an ID? - if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = quickExpr.exec( selector ); - } - - // Verify a match, and that no context was specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) { - context = context instanceof jQuery ? context[0] : context; - doc = ( context ? context.ownerDocument || context : document ); - - // If a single string is passed in and it's a single tag - // just do a createElement and skip the rest - ret = rsingleTag.exec( selector ); - - if ( ret ) { - if ( jQuery.isPlainObject( context ) ) { - selector = [ document.createElement( ret[1] ) ]; - jQuery.fn.attr.call( selector, context, true ); - - } else { - selector = [ doc.createElement( ret[1] ) ]; - } - - } else { - ret = jQuery.buildFragment( [ match[1] ], [ doc ] ); - selector = ( ret.cacheable ? jQuery.clone(ret.fragment) : ret.fragment ).childNodes; - } - - return jQuery.merge( this, selector ); - - // HANDLE: $("#id") - } else { - elem = document.getElementById( match[2] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id !== match[2] ) { - return rootjQuery.find( selector ); - } - - // Otherwise, we inject the element directly into the jQuery object - this.length = 1; - this[0] = elem; - } - - this.context = document; - this.selector = selector; - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || rootjQuery ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return rootjQuery.ready( selector ); - } - - if ( selector.selector !== undefined ) { - this.selector = selector.selector; - this.context = selector.context; - } - - return jQuery.makeArray( selector, this ); - }, - - // Start with an empty selector - selector: "", - - // The current version of jQuery being used - jquery: "1.7.2", - - // The default length of a jQuery object is 0 - length: 0, - - // The number of elements contained in the matched element set - size: function() { - return this.length; - }, - - toArray: function() { - return slice.call( this, 0 ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num == null ? - - // Return a 'clean' array - this.toArray() : - - // Return just the object - ( num < 0 ? this[ this.length + num ] : this[ num ] ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems, name, selector ) { - // Build a new jQuery matched element set - var ret = this.constructor(); - - if ( jQuery.isArray( elems ) ) { - push.apply( ret, elems ); - - } else { - jQuery.merge( ret, elems ); - } - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - ret.context = this.context; - - if ( name === "find" ) { - ret.selector = this.selector + ( this.selector ? " " : "" ) + selector; - } else if ( name ) { - ret.selector = this.selector + "." + name + "(" + selector + ")"; - } - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); - }, - - ready: function( fn ) { - // Attach the listeners - jQuery.bindReady(); - - // Add the callback - readyList.add( fn ); - - return this; - }, - - eq: function( i ) { - i = +i; - return i === -1 ? - this.slice( i ) : - this.slice( i, i + 1 ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ), - "slice", slice.call(arguments).join(",") ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function( elem, i ) { - return callback.call( elem, i, elem ); - })); - }, - - end: function() { - return this.prevObject || this.constructor(null); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: [].sort, - splice: [].splice -}; - -// Give the init function the jQuery prototype for later instantiation -jQuery.fn.init.prototype = jQuery.fn; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[0] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction(target) ) { - target = {}; - } - - // extend jQuery itself if only one argument is passed - if ( length === i ) { - target = this; - --i; - } - - for ( ; i < length; i++ ) { - // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) { - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray(src) ? src : []; - - } else { - clone = src && jQuery.isPlainObject(src) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend({ - noConflict: function( deep ) { - if ( window.$ === jQuery ) { - window.$ = _$; - } - - if ( deep && window.jQuery === jQuery ) { - window.jQuery = _jQuery; - } - - return jQuery; - }, - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - // Either a released hold or an DOMready/load event and not yet ready - if ( (wait === true && !--jQuery.readyWait) || (wait !== true && !jQuery.isReady) ) { - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( !document.body ) { - return setTimeout( jQuery.ready, 1 ); - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.fireWith( document, [ jQuery ] ); - - // Trigger any bound ready events - if ( jQuery.fn.trigger ) { - jQuery( document ).trigger( "ready" ).off( "ready" ); - } - } - }, - - bindReady: function() { - if ( readyList ) { - return; - } - - readyList = jQuery.Callbacks( "once memory" ); - - // Catch cases where $(document).ready() is called after the - // browser event has already occurred. - if ( document.readyState === "complete" ) { - // Handle it asynchronously to allow scripts the opportunity to delay ready - return setTimeout( jQuery.ready, 1 ); - } - - // Mozilla, Opera and webkit nightlies currently support this event - if ( document.addEventListener ) { - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", jQuery.ready, false ); - - // If IE event model is used - } else if ( document.attachEvent ) { - // ensure firing before onload, - // maybe late but safe also for iframes - document.attachEvent( "onreadystatechange", DOMContentLoaded ); - - // A fallback to window.onload, that will always work - window.attachEvent( "onload", jQuery.ready ); - - // If IE and not a frame - // continually check to see if the document is ready - var toplevel = false; - - try { - toplevel = window.frameElement == null; - } catch(e) {} - - if ( document.documentElement.doScroll && toplevel ) { - doScrollCheck(); - } - } - }, - - // See test/unit/core.js for details concerning isFunction. - // Since version 1.3, DOM methods and functions like alert - // aren't supported. They return false on IE (#2968). - isFunction: function( obj ) { - return jQuery.type(obj) === "function"; - }, - - isArray: Array.isArray || function( obj ) { - return jQuery.type(obj) === "array"; - }, - - isWindow: function( obj ) { - return obj != null && obj == obj.window; - }, - - isNumeric: function( obj ) { - return !isNaN( parseFloat(obj) ) && isFinite( obj ); - }, - - type: function( obj ) { - return obj == null ? - String( obj ) : - class2type[ toString.call(obj) ] || "object"; - }, - - isPlainObject: function( obj ) { - // Must be an Object. - // Because of IE, we also have to check the presence of the constructor property. - // Make sure that DOM nodes and window objects don't pass through, as well - if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { - return false; - } - - try { - // Not own constructor property must be Object - if ( obj.constructor && - !hasOwn.call(obj, "constructor") && - !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { - return false; - } - } catch ( e ) { - // IE8,9 Will throw exceptions on certain host objects #9897 - return false; - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - - var key; - for ( key in obj ) {} - - return key === undefined || hasOwn.call( obj, key ); - }, - - isEmptyObject: function( obj ) { - for ( var name in obj ) { - return false; - } - return true; - }, - - error: function( msg ) { - throw new Error( msg ); - }, - - parseJSON: function( data ) { - if ( typeof data !== "string" || !data ) { - return null; - } - - // Make sure leading/trailing whitespace is removed (IE can't handle it) - data = jQuery.trim( data ); - - // Attempt to parse using the native JSON parser first - if ( window.JSON && window.JSON.parse ) { - return window.JSON.parse( data ); - } - - // Make sure the incoming data is actual JSON - // Logic borrowed from http://json.org/json2.js - if ( rvalidchars.test( data.replace( rvalidescape, "@" ) - .replace( rvalidtokens, "]" ) - .replace( rvalidbraces, "")) ) { - - return ( new Function( "return " + data ) )(); - - } - jQuery.error( "Invalid JSON: " + data ); - }, - - // Cross-browser xml parsing - parseXML: function( data ) { - if ( typeof data !== "string" || !data ) { - return null; - } - var xml, tmp; - try { - if ( window.DOMParser ) { // Standard - tmp = new DOMParser(); - xml = tmp.parseFromString( data , "text/xml" ); - } else { // IE - xml = new ActiveXObject( "Microsoft.XMLDOM" ); - xml.async = "false"; - xml.loadXML( data ); - } - } catch( e ) { - xml = undefined; - } - if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; - }, - - noop: function() {}, - - // Evaluates a script in a global context - // Workarounds based on findings by Jim Driscoll - // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context - globalEval: function( data ) { - if ( data && rnotwhite.test( data ) ) { - // We use execScript on Internet Explorer - // We use an anonymous function so that context is window - // rather than jQuery in Firefox - ( window.execScript || function( data ) { - window[ "eval" ].call( window, data ); - } )( data ); - } - }, - - // Convert dashed to camelCase; used by the css and data modules - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toUpperCase() === name.toUpperCase(); - }, - - // args is for internal usage only - each: function( object, callback, args ) { - var name, i = 0, - length = object.length, - isObj = length === undefined || jQuery.isFunction( object ); - - if ( args ) { - if ( isObj ) { - for ( name in object ) { - if ( callback.apply( object[ name ], args ) === false ) { - break; - } - } - } else { - for ( ; i < length; ) { - if ( callback.apply( object[ i++ ], args ) === false ) { - break; - } - } - } - - // A special, fast, case for the most common use of each - } else { - if ( isObj ) { - for ( name in object ) { - if ( callback.call( object[ name ], name, object[ name ] ) === false ) { - break; - } - } - } else { - for ( ; i < length; ) { - if ( callback.call( object[ i ], i, object[ i++ ] ) === false ) { - break; - } - } - } - } - - return object; - }, - - // Use native String.trim function wherever possible - trim: trim ? - function( text ) { - return text == null ? - "" : - trim.call( text ); - } : - - // Otherwise use our own trimming functionality - function( text ) { - return text == null ? - "" : - text.toString().replace( trimLeft, "" ).replace( trimRight, "" ); - }, - - // results is for internal usage only - makeArray: function( array, results ) { - var ret = results || []; - - if ( array != null ) { - // The window, strings (and functions) also have 'length' - // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930 - var type = jQuery.type( array ); - - if ( array.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( array ) ) { - push.call( ret, array ); - } else { - jQuery.merge( ret, array ); - } - } - - return ret; - }, - - inArray: function( elem, array, i ) { - var len; - - if ( array ) { - if ( indexOf ) { - return indexOf.call( array, elem, i ); - } - - len = array.length; - i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; - - for ( ; i < len; i++ ) { - // Skip accessing in sparse arrays - if ( i in array && array[ i ] === elem ) { - return i; - } - } - } - - return -1; - }, - - merge: function( first, second ) { - var i = first.length, - j = 0; - - if ( typeof second.length === "number" ) { - for ( var l = second.length; j < l; j++ ) { - first[ i++ ] = second[ j ]; - } - - } else { - while ( second[j] !== undefined ) { - first[ i++ ] = second[ j++ ]; - } - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, inv ) { - var ret = [], retVal; - inv = !!inv; - - // Go through the array, only saving the items - // that pass the validator function - for ( var i = 0, length = elems.length; i < length; i++ ) { - retVal = !!callback( elems[ i ], i ); - if ( inv !== retVal ) { - ret.push( elems[ i ] ); - } - } - - return ret; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var value, key, ret = [], - i = 0, - length = elems.length, - // jquery objects are treated as arrays - isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ; - - // Go through the array, translating each of the items to their - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - - // Go through every key on the object, - } else { - for ( key in elems ) { - value = callback( elems[ key ], key, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - } - - // Flatten any nested arrays - return ret.concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - if ( typeof context === "string" ) { - var tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - var args = slice.call( arguments, 2 ), - proxy = function() { - return fn.apply( context, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || proxy.guid || jQuery.guid++; - - return proxy; - }, - - // Mutifunctional method to get and set values to a collection - // The value/s can optionally be executed if it's a function - access: function( elems, fn, key, value, chainable, emptyGet, pass ) { - var exec, - bulk = key == null, - i = 0, - length = elems.length; - - // Sets many values - if ( key && typeof key === "object" ) { - for ( i in key ) { - jQuery.access( elems, fn, i, key[i], 1, emptyGet, value ); - } - chainable = 1; - - // Sets one value - } else if ( value !== undefined ) { - // Optionally, function values get executed if exec is true - exec = pass === undefined && jQuery.isFunction( value ); - - if ( bulk ) { - // Bulk operations only iterate when executing function values - if ( exec ) { - exec = fn; - fn = function( elem, key, value ) { - return exec.call( jQuery( elem ), value ); - }; - - // Otherwise they run against the entire set - } else { - fn.call( elems, value ); - fn = null; - } - } - - if ( fn ) { - for (; i < length; i++ ) { - fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass ); - } - } - - chainable = 1; - } - - return chainable ? - elems : - - // Gets - bulk ? - fn.call( elems ) : - length ? fn( elems[0], key ) : emptyGet; - }, - - now: function() { - return ( new Date() ).getTime(); - }, - - // Use of jQuery.browser is frowned upon. - // More details: http://docs.jquery.com/Utilities/jQuery.browser - uaMatch: function( ua ) { - ua = ua.toLowerCase(); - - var match = rwebkit.exec( ua ) || - ropera.exec( ua ) || - rmsie.exec( ua ) || - ua.indexOf("compatible") < 0 && rmozilla.exec( ua ) || - []; - - return { browser: match[1] || "", version: match[2] || "0" }; - }, - - sub: function() { - function jQuerySub( selector, context ) { - return new jQuerySub.fn.init( selector, context ); - } - jQuery.extend( true, jQuerySub, this ); - jQuerySub.superclass = this; - jQuerySub.fn = jQuerySub.prototype = this(); - jQuerySub.fn.constructor = jQuerySub; - jQuerySub.sub = this.sub; - jQuerySub.fn.init = function init( selector, context ) { - if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) { - context = jQuerySub( context ); - } - - return jQuery.fn.init.call( this, selector, context, rootjQuerySub ); - }; - jQuerySub.fn.init.prototype = jQuerySub.fn; - var rootjQuerySub = jQuerySub(document); - return jQuerySub; - }, - - browser: {} -}); - -// Populate the class2type map -jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -}); - -browserMatch = jQuery.uaMatch( userAgent ); -if ( browserMatch.browser ) { - jQuery.browser[ browserMatch.browser ] = true; - jQuery.browser.version = browserMatch.version; -} - -// Deprecated, use jQuery.browser.webkit instead -if ( jQuery.browser.webkit ) { - jQuery.browser.safari = true; -} - -// IE doesn't match non-breaking spaces with \s -if ( rnotwhite.test( "\xA0" ) ) { - trimLeft = /^[\s\xA0]+/; - trimRight = /[\s\xA0]+$/; -} - -// All jQuery objects should point back to these -rootjQuery = jQuery(document); - -// Cleanup functions for the document ready method -if ( document.addEventListener ) { - DOMContentLoaded = function() { - document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false ); - jQuery.ready(); - }; - -} else if ( document.attachEvent ) { - DOMContentLoaded = function() { - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( document.readyState === "complete" ) { - document.detachEvent( "onreadystatechange", DOMContentLoaded ); - jQuery.ready(); - } - }; -} - -// The DOM ready check for Internet Explorer -function doScrollCheck() { - if ( jQuery.isReady ) { - return; - } - - try { - // If IE is used, use the trick by Diego Perini - // http://javascript.nwbox.com/IEContentLoaded/ - document.documentElement.doScroll("left"); - } catch(e) { - setTimeout( doScrollCheck, 1 ); - return; - } - - // and execute any waiting functions - jQuery.ready(); -} - -return jQuery; - -})(); - - -// String to Object flags format cache -var flagsCache = {}; - -// Convert String-formatted flags into Object-formatted ones and store in cache -function createFlags( flags ) { - var object = flagsCache[ flags ] = {}, - i, length; - flags = flags.split( /\s+/ ); - for ( i = 0, length = flags.length; i < length; i++ ) { - object[ flags[i] ] = true; - } - return object; -} - -/* - * Create a callback list using the following parameters: - * - * flags: an optional list of space-separated flags that will change how - * the callback list behaves - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible flags: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( flags ) { - - // Convert flags from String-formatted to Object-formatted - // (we check in cache first) - flags = flags ? ( flagsCache[ flags ] || createFlags( flags ) ) : {}; - - var // Actual callback list - list = [], - // Stack of fire calls for repeatable lists - stack = [], - // Last fire value (for non-forgettable lists) - memory, - // Flag to know if list was already fired - fired, - // Flag to know if list is currently firing - firing, - // First callback to fire (used internally by add and fireWith) - firingStart, - // End of the loop when firing - firingLength, - // Index of currently firing callback (modified by remove if needed) - firingIndex, - // Add one or several callbacks to the list - add = function( args ) { - var i, - length, - elem, - type, - actual; - for ( i = 0, length = args.length; i < length; i++ ) { - elem = args[ i ]; - type = jQuery.type( elem ); - if ( type === "array" ) { - // Inspect recursively - add( elem ); - } else if ( type === "function" ) { - // Add if not in unique mode and callback is not in - if ( !flags.unique || !self.has( elem ) ) { - list.push( elem ); - } - } - } - }, - // Fire callbacks - fire = function( context, args ) { - args = args || []; - memory = !flags.memory || [ context, args ]; - fired = true; - firing = true; - firingIndex = firingStart || 0; - firingStart = 0; - firingLength = list.length; - for ( ; list && firingIndex < firingLength; firingIndex++ ) { - if ( list[ firingIndex ].apply( context, args ) === false && flags.stopOnFalse ) { - memory = true; // Mark as halted - break; - } - } - firing = false; - if ( list ) { - if ( !flags.once ) { - if ( stack && stack.length ) { - memory = stack.shift(); - self.fireWith( memory[ 0 ], memory[ 1 ] ); - } - } else if ( memory === true ) { - self.disable(); - } else { - list = []; - } - } - }, - // Actual Callbacks object - self = { - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - var length = list.length; - add( arguments ); - // Do we need to add the callbacks to the - // current firing batch? - if ( firing ) { - firingLength = list.length; - // With memory, if we're not firing then - // we should call right away, unless previous - // firing was halted (stopOnFalse) - } else if ( memory && memory !== true ) { - firingStart = length; - fire( memory[ 0 ], memory[ 1 ] ); - } - } - return this; - }, - // Remove a callback from the list - remove: function() { - if ( list ) { - var args = arguments, - argIndex = 0, - argLength = args.length; - for ( ; argIndex < argLength ; argIndex++ ) { - for ( var i = 0; i < list.length; i++ ) { - if ( args[ argIndex ] === list[ i ] ) { - // Handle firingIndex and firingLength - if ( firing ) { - if ( i <= firingLength ) { - firingLength--; - if ( i <= firingIndex ) { - firingIndex--; - } - } - } - // Remove the element - list.splice( i--, 1 ); - // If we have some unicity property then - // we only need to do this once - if ( flags.unique ) { - break; - } - } - } - } - } - return this; - }, - // Control if a given callback is in the list - has: function( fn ) { - if ( list ) { - var i = 0, - length = list.length; - for ( ; i < length; i++ ) { - if ( fn === list[ i ] ) { - return true; - } - } - } - return false; - }, - // Remove all callbacks from the list - empty: function() { - list = []; - return this; - }, - // Have the list do nothing anymore - disable: function() { - list = stack = memory = undefined; - return this; - }, - // Is it disabled? - disabled: function() { - return !list; - }, - // Lock the list in its current state - lock: function() { - stack = undefined; - if ( !memory || memory === true ) { - self.disable(); - } - return this; - }, - // Is it locked? - locked: function() { - return !stack; - }, - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( stack ) { - if ( firing ) { - if ( !flags.once ) { - stack.push( [ context, args ] ); - } - } else if ( !( flags.once && memory ) ) { - fire( context, args ); - } - } - return this; - }, - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - - - -var // Static reference to slice - sliceDeferred = [].slice; - -jQuery.extend({ - - Deferred: function( func ) { - var doneList = jQuery.Callbacks( "once memory" ), - failList = jQuery.Callbacks( "once memory" ), - progressList = jQuery.Callbacks( "memory" ), - state = "pending", - lists = { - resolve: doneList, - reject: failList, - notify: progressList - }, - promise = { - done: doneList.add, - fail: failList.add, - progress: progressList.add, - - state: function() { - return state; - }, - - // Deprecated - isResolved: doneList.fired, - isRejected: failList.fired, - - then: function( doneCallbacks, failCallbacks, progressCallbacks ) { - deferred.done( doneCallbacks ).fail( failCallbacks ).progress( progressCallbacks ); - return this; - }, - always: function() { - deferred.done.apply( deferred, arguments ).fail.apply( deferred, arguments ); - return this; - }, - pipe: function( fnDone, fnFail, fnProgress ) { - return jQuery.Deferred(function( newDefer ) { - jQuery.each( { - done: [ fnDone, "resolve" ], - fail: [ fnFail, "reject" ], - progress: [ fnProgress, "notify" ] - }, function( handler, data ) { - var fn = data[ 0 ], - action = data[ 1 ], - returned; - if ( jQuery.isFunction( fn ) ) { - deferred[ handler ](function() { - returned = fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise().then( newDefer.resolve, newDefer.reject, newDefer.notify ); - } else { - newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] ); - } - }); - } else { - deferred[ handler ]( newDefer[ action ] ); - } - }); - }).promise(); - }, - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - if ( obj == null ) { - obj = promise; - } else { - for ( var key in promise ) { - obj[ key ] = promise[ key ]; - } - } - return obj; - } - }, - deferred = promise.promise({}), - key; - - for ( key in lists ) { - deferred[ key ] = lists[ key ].fire; - deferred[ key + "With" ] = lists[ key ].fireWith; - } - - // Handle state - deferred.done( function() { - state = "resolved"; - }, failList.disable, progressList.lock ).fail( function() { - state = "rejected"; - }, doneList.disable, progressList.lock ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( firstParam ) { - var args = sliceDeferred.call( arguments, 0 ), - i = 0, - length = args.length, - pValues = new Array( length ), - count = length, - pCount = length, - deferred = length <= 1 && firstParam && jQuery.isFunction( firstParam.promise ) ? - firstParam : - jQuery.Deferred(), - promise = deferred.promise(); - function resolveFunc( i ) { - return function( value ) { - args[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value; - if ( !( --count ) ) { - deferred.resolveWith( deferred, args ); - } - }; - } - function progressFunc( i ) { - return function( value ) { - pValues[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value; - deferred.notifyWith( promise, pValues ); - }; - } - if ( length > 1 ) { - for ( ; i < length; i++ ) { - if ( args[ i ] && args[ i ].promise && jQuery.isFunction( args[ i ].promise ) ) { - args[ i ].promise().then( resolveFunc(i), deferred.reject, progressFunc(i) ); - } else { - --count; - } - } - if ( !count ) { - deferred.resolveWith( deferred, args ); - } - } else if ( deferred !== firstParam ) { - deferred.resolveWith( deferred, length ? [ firstParam ] : [] ); - } - return promise; - } -}); - - - - -jQuery.support = (function() { - - var support, - all, - a, - select, - opt, - input, - fragment, - tds, - events, - eventName, - i, - isSupported, - div = document.createElement( "div" ), - documentElement = document.documentElement; - - // Preliminary tests - div.setAttribute("className", "t"); - div.innerHTML = "
a"; - - all = div.getElementsByTagName( "*" ); - a = div.getElementsByTagName( "a" )[ 0 ]; - - // Can't get basic test support - if ( !all || !all.length || !a ) { - return {}; - } - - // First batch of supports tests - select = document.createElement( "select" ); - opt = select.appendChild( document.createElement("option") ); - input = div.getElementsByTagName( "input" )[ 0 ]; - - support = { - // IE strips leading whitespace when .innerHTML is used - leadingWhitespace: ( div.firstChild.nodeType === 3 ), - - // Make sure that tbody elements aren't automatically inserted - // IE will insert them into empty tables - tbody: !div.getElementsByTagName("tbody").length, - - // Make sure that link elements get serialized correctly by innerHTML - // This requires a wrapper element in IE - htmlSerialize: !!div.getElementsByTagName("link").length, - - // Get the style information from getAttribute - // (IE uses .cssText instead) - style: /top/.test( a.getAttribute("style") ), - - // Make sure that URLs aren't manipulated - // (IE normalizes it by default) - hrefNormalized: ( a.getAttribute("href") === "/a" ), - - // Make sure that element opacity exists - // (IE uses filter instead) - // Use a regex to work around a WebKit issue. See #5145 - opacity: /^0.55/.test( a.style.opacity ), - - // Verify style float existence - // (IE uses styleFloat instead of cssFloat) - cssFloat: !!a.style.cssFloat, - - // Make sure that if no value is specified for a checkbox - // that it defaults to "on". - // (WebKit defaults to "" instead) - checkOn: ( input.value === "on" ), - - // Make sure that a selected-by-default option has a working selected property. - // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) - optSelected: opt.selected, - - // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) - getSetAttribute: div.className !== "t", - - // Tests for enctype support on a form(#6743) - enctype: !!document.createElement("form").enctype, - - // Makes sure cloning an html5 element does not cause problems - // Where outerHTML is undefined, this still works - html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav>", - - // Will be defined later - submitBubbles: true, - changeBubbles: true, - focusinBubbles: false, - deleteExpando: true, - noCloneEvent: true, - inlineBlockNeedsLayout: false, - shrinkWrapBlocks: false, - reliableMarginRight: true, - pixelMargin: true - }; - - // jQuery.boxModel DEPRECATED in 1.3, use jQuery.support.boxModel instead - jQuery.boxModel = support.boxModel = (document.compatMode === "CSS1Compat"); - - // Make sure checked status is properly cloned - input.checked = true; - support.noCloneChecked = input.cloneNode( true ).checked; - - // Make sure that the options inside disabled selects aren't marked as disabled - // (WebKit marks them as disabled) - select.disabled = true; - support.optDisabled = !opt.disabled; - - // Test to see if it's possible to delete an expando from an element - // Fails in Internet Explorer - try { - delete div.test; - } catch( e ) { - support.deleteExpando = false; - } - - if ( !div.addEventListener && div.attachEvent && div.fireEvent ) { - div.attachEvent( "onclick", function() { - // Cloning a node shouldn't copy over any - // bound event handlers (IE does this) - support.noCloneEvent = false; - }); - div.cloneNode( true ).fireEvent( "onclick" ); - } - - // Check if a radio maintains its value - // after being appended to the DOM - input = document.createElement("input"); - input.value = "t"; - input.setAttribute("type", "radio"); - support.radioValue = input.value === "t"; - - input.setAttribute("checked", "checked"); - - // #11217 - WebKit loses check when the name is after the checked attribute - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - fragment = document.createDocumentFragment(); - fragment.appendChild( div.lastChild ); - - // WebKit doesn't clone checked state correctly in fragments - support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Check if a disconnected checkbox will retain its checked - // value of true after appended to the DOM (IE6/7) - support.appendChecked = input.checked; - - fragment.removeChild( input ); - fragment.appendChild( div ); - - // Technique from Juriy Zaytsev - // http://perfectionkills.com/detecting-event-support-without-browser-sniffing/ - // We only care about the case where non-standard event systems - // are used, namely in IE. Short-circuiting here helps us to - // avoid an eval call (in setAttribute) which can cause CSP - // to go haywire. See: https://developer.mozilla.org/en/Security/CSP - if ( div.attachEvent ) { - for ( i in { - submit: 1, - change: 1, - focusin: 1 - }) { - eventName = "on" + i; - isSupported = ( eventName in div ); - if ( !isSupported ) { - div.setAttribute( eventName, "return;" ); - isSupported = ( typeof div[ eventName ] === "function" ); - } - support[ i + "Bubbles" ] = isSupported; - } - } - - fragment.removeChild( div ); - - // Null elements to avoid leaks in IE - fragment = select = opt = div = input = null; - - // Run tests that need a body at doc ready - jQuery(function() { - var container, outer, inner, table, td, offsetSupport, - marginDiv, conMarginTop, style, html, positionTopLeftWidthHeight, - paddingMarginBorderVisibility, paddingMarginBorder, - body = document.getElementsByTagName("body")[0]; - - if ( !body ) { - // Return for frameset docs that don't have a body - return; - } - - conMarginTop = 1; - paddingMarginBorder = "padding:0;margin:0;border:"; - positionTopLeftWidthHeight = "position:absolute;top:0;left:0;width:1px;height:1px;"; - paddingMarginBorderVisibility = paddingMarginBorder + "0;visibility:hidden;"; - style = "style='" + positionTopLeftWidthHeight + paddingMarginBorder + "5px solid #000;"; - html = "
" + - "" + - "
"; - - container = document.createElement("div"); - container.style.cssText = paddingMarginBorderVisibility + "width:0;height:0;position:static;top:0;margin-top:" + conMarginTop + "px"; - body.insertBefore( container, body.firstChild ); - - // Construct the test element - div = document.createElement("div"); - container.appendChild( div ); - - // Check if table cells still have offsetWidth/Height when they are set - // to display:none and there are still other visible table cells in a - // table row; if so, offsetWidth/Height are not reliable for use when - // determining if an element has been hidden directly using - // display:none (it is still safe to use offsets if a parent element is - // hidden; don safety goggles and see bug #4512 for more information). - // (only IE 8 fails this test) - div.innerHTML = "
t
"; - tds = div.getElementsByTagName( "td" ); - isSupported = ( tds[ 0 ].offsetHeight === 0 ); - - tds[ 0 ].style.display = ""; - tds[ 1 ].style.display = "none"; - - // Check if empty table cells still have offsetWidth/Height - // (IE <= 8 fail this test) - support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); - - // Check if div with explicit width and no margin-right incorrectly - // gets computed margin-right based on width of container. For more - // info see bug #3333 - // Fails in WebKit before Feb 2011 nightlies - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - if ( window.getComputedStyle ) { - div.innerHTML = ""; - marginDiv = document.createElement( "div" ); - marginDiv.style.width = "0"; - marginDiv.style.marginRight = "0"; - div.style.width = "2px"; - div.appendChild( marginDiv ); - support.reliableMarginRight = - ( parseInt( ( window.getComputedStyle( marginDiv, null ) || { marginRight: 0 } ).marginRight, 10 ) || 0 ) === 0; - } - - if ( typeof div.style.zoom !== "undefined" ) { - // Check if natively block-level elements act like inline-block - // elements when setting their display to 'inline' and giving - // them layout - // (IE < 8 does this) - div.innerHTML = ""; - div.style.width = div.style.padding = "1px"; - div.style.border = 0; - div.style.overflow = "hidden"; - div.style.display = "inline"; - div.style.zoom = 1; - support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); - - // Check if elements with layout shrink-wrap their children - // (IE 6 does this) - div.style.display = "block"; - div.style.overflow = "visible"; - div.innerHTML = "
"; - support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); - } - - div.style.cssText = positionTopLeftWidthHeight + paddingMarginBorderVisibility; - div.innerHTML = html; - - outer = div.firstChild; - inner = outer.firstChild; - td = outer.nextSibling.firstChild.firstChild; - - offsetSupport = { - doesNotAddBorder: ( inner.offsetTop !== 5 ), - doesAddBorderForTableAndCells: ( td.offsetTop === 5 ) - }; - - inner.style.position = "fixed"; - inner.style.top = "20px"; - - // safari subtracts parent border width here which is 5px - offsetSupport.fixedPosition = ( inner.offsetTop === 20 || inner.offsetTop === 15 ); - inner.style.position = inner.style.top = ""; - - outer.style.overflow = "hidden"; - outer.style.position = "relative"; - - offsetSupport.subtractsBorderForOverflowNotVisible = ( inner.offsetTop === -5 ); - offsetSupport.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== conMarginTop ); - - if ( window.getComputedStyle ) { - div.style.marginTop = "1%"; - support.pixelMargin = ( window.getComputedStyle( div, null ) || { marginTop: 0 } ).marginTop !== "1%"; - } - - if ( typeof container.style.zoom !== "undefined" ) { - container.style.zoom = 1; - } - - body.removeChild( container ); - marginDiv = div = container = null; - - jQuery.extend( support, offsetSupport ); - }); - - return support; -})(); - - - - -var rbrace = /^(?:\{.*\}|\[.*\])$/, - rmultiDash = /([A-Z])/g; - -jQuery.extend({ - cache: {}, - - // Please use with caution - uuid: 0, - - // Unique for each copy of jQuery on the page - // Non-digits removed to match rinlinejQuery - expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ), - - // The following elements throw uncatchable exceptions if you - // attempt to add expando properties to them. - noData: { - "embed": true, - // Ban all objects except for Flash (which handle expandos) - "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000", - "applet": true - }, - - hasData: function( elem ) { - elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; - return !!elem && !isEmptyDataObject( elem ); - }, - - data: function( elem, name, data, pvt /* Internal Use Only */ ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var privateCache, thisCache, ret, - internalKey = jQuery.expando, - getByName = typeof name === "string", - - // We have to handle DOM nodes and JS objects differently because IE6-7 - // can't GC object references properly across the DOM-JS boundary - isNode = elem.nodeType, - - // Only DOM nodes need the global jQuery cache; JS object data is - // attached directly to the object so GC can occur automatically - cache = isNode ? jQuery.cache : elem, - - // Only defining an ID for JS objects if its cache already exists allows - // the code to shortcut on the same path as a DOM node with no cache - id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey, - isEvents = name === "events"; - - // Avoid doing any more work than we need to when trying to get data on an - // object that has no data at all - if ( (!id || !cache[id] || (!isEvents && !pvt && !cache[id].data)) && getByName && data === undefined ) { - return; - } - - if ( !id ) { - // Only DOM nodes need a new unique ID for each element since their data - // ends up in the global cache - if ( isNode ) { - elem[ internalKey ] = id = ++jQuery.uuid; - } else { - id = internalKey; - } - } - - if ( !cache[ id ] ) { - cache[ id ] = {}; - - // Avoids exposing jQuery metadata on plain JS objects when the object - // is serialized using JSON.stringify - if ( !isNode ) { - cache[ id ].toJSON = jQuery.noop; - } - } - - // An object can be passed to jQuery.data instead of a key/value pair; this gets - // shallow copied over onto the existing cache - if ( typeof name === "object" || typeof name === "function" ) { - if ( pvt ) { - cache[ id ] = jQuery.extend( cache[ id ], name ); - } else { - cache[ id ].data = jQuery.extend( cache[ id ].data, name ); - } - } - - privateCache = thisCache = cache[ id ]; - - // jQuery data() is stored in a separate object inside the object's internal data - // cache in order to avoid key collisions between internal data and user-defined - // data. - if ( !pvt ) { - if ( !thisCache.data ) { - thisCache.data = {}; - } - - thisCache = thisCache.data; - } - - if ( data !== undefined ) { - thisCache[ jQuery.camelCase( name ) ] = data; - } - - // Users should not attempt to inspect the internal events object using jQuery.data, - // it is undocumented and subject to change. But does anyone listen? No. - if ( isEvents && !thisCache[ name ] ) { - return privateCache.events; - } - - // Check for both converted-to-camel and non-converted data property names - // If a data property was specified - if ( getByName ) { - - // First Try to find as-is property data - ret = thisCache[ name ]; - - // Test for null|undefined property data - if ( ret == null ) { - - // Try to find the camelCased property - ret = thisCache[ jQuery.camelCase( name ) ]; - } - } else { - ret = thisCache; - } - - return ret; - }, - - removeData: function( elem, name, pvt /* Internal Use Only */ ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var thisCache, i, l, - - // Reference to internal data cache key - internalKey = jQuery.expando, - - isNode = elem.nodeType, - - // See jQuery.data for more information - cache = isNode ? jQuery.cache : elem, - - // See jQuery.data for more information - id = isNode ? elem[ internalKey ] : internalKey; - - // If there is already no cache entry for this object, there is no - // purpose in continuing - if ( !cache[ id ] ) { - return; - } - - if ( name ) { - - thisCache = pvt ? cache[ id ] : cache[ id ].data; - - if ( thisCache ) { - - // Support array or space separated string names for data keys - if ( !jQuery.isArray( name ) ) { - - // try the string as a key before any manipulation - if ( name in thisCache ) { - name = [ name ]; - } else { - - // split the camel cased version by spaces unless a key with the spaces exists - name = jQuery.camelCase( name ); - if ( name in thisCache ) { - name = [ name ]; - } else { - name = name.split( " " ); - } - } - } - - for ( i = 0, l = name.length; i < l; i++ ) { - delete thisCache[ name[i] ]; - } - - // If there is no data left in the cache, we want to continue - // and let the cache object itself get destroyed - if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) { - return; - } - } - } - - // See jQuery.data for more information - if ( !pvt ) { - delete cache[ id ].data; - - // Don't destroy the parent cache unless the internal data object - // had been the only thing left in it - if ( !isEmptyDataObject(cache[ id ]) ) { - return; - } - } - - // Browsers that fail expando deletion also refuse to delete expandos on - // the window, but it will allow it on all other JS objects; other browsers - // don't care - // Ensure that `cache` is not a window object #10080 - if ( jQuery.support.deleteExpando || !cache.setInterval ) { - delete cache[ id ]; - } else { - cache[ id ] = null; - } - - // We destroyed the cache and need to eliminate the expando on the node to avoid - // false lookups in the cache for entries that no longer exist - if ( isNode ) { - // IE does not allow us to delete expando properties from nodes, - // nor does it have a removeAttribute function on Document nodes; - // we must handle all of these cases - if ( jQuery.support.deleteExpando ) { - delete elem[ internalKey ]; - } else if ( elem.removeAttribute ) { - elem.removeAttribute( internalKey ); - } else { - elem[ internalKey ] = null; - } - } - }, - - // For internal use only. - _data: function( elem, name, data ) { - return jQuery.data( elem, name, data, true ); - }, - - // A method for determining if a DOM node can handle the data expando - acceptData: function( elem ) { - if ( elem.nodeName ) { - var match = jQuery.noData[ elem.nodeName.toLowerCase() ]; - - if ( match ) { - return !(match === true || elem.getAttribute("classid") !== match); - } - } - - return true; - } -}); - -jQuery.fn.extend({ - data: function( key, value ) { - var parts, part, attr, name, l, - elem = this[0], - i = 0, - data = null; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = jQuery.data( elem ); - - if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { - attr = elem.attributes; - for ( l = attr.length; i < l; i++ ) { - name = attr[i].name; - - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.substring(5) ); - - dataAttr( elem, name, data[ name ] ); - } - } - jQuery._data( elem, "parsedAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each(function() { - jQuery.data( this, key ); - }); - } - - parts = key.split( ".", 2 ); - parts[1] = parts[1] ? "." + parts[1] : ""; - part = parts[1] + "!"; - - return jQuery.access( this, function( value ) { - - if ( value === undefined ) { - data = this.triggerHandler( "getData" + part, [ parts[0] ] ); - - // Try to fetch any internally stored data first - if ( data === undefined && elem ) { - data = jQuery.data( elem, key ); - data = dataAttr( elem, key, data ); - } - - return data === undefined && parts[1] ? - this.data( parts[0] ) : - data; - } - - parts[1] = value; - this.each(function() { - var self = jQuery( this ); - - self.triggerHandler( "setData" + part, parts ); - jQuery.data( this, key, value ); - self.triggerHandler( "changeData" + part, parts ); - }); - }, null, value, arguments.length > 1, null, false ); - }, - - removeData: function( key ) { - return this.each(function() { - jQuery.removeData( this, key ); - }); - } -}); - -function dataAttr( elem, key, data ) { - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - - var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); - - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = data === "true" ? true : - data === "false" ? false : - data === "null" ? null : - jQuery.isNumeric( data ) ? +data : - rbrace.test( data ) ? jQuery.parseJSON( data ) : - data; - } catch( e ) {} - - // Make sure we set the data so it isn't changed later - jQuery.data( elem, key, data ); - - } else { - data = undefined; - } - } - - return data; -} - -// checks a cache object for emptiness -function isEmptyDataObject( obj ) { - for ( var name in obj ) { - - // if the public data object is empty, the private is still empty - if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { - continue; - } - if ( name !== "toJSON" ) { - return false; - } - } - - return true; -} - - - - -function handleQueueMarkDefer( elem, type, src ) { - var deferDataKey = type + "defer", - queueDataKey = type + "queue", - markDataKey = type + "mark", - defer = jQuery._data( elem, deferDataKey ); - if ( defer && - ( src === "queue" || !jQuery._data(elem, queueDataKey) ) && - ( src === "mark" || !jQuery._data(elem, markDataKey) ) ) { - // Give room for hard-coded callbacks to fire first - // and eventually mark/queue something else on the element - setTimeout( function() { - if ( !jQuery._data( elem, queueDataKey ) && - !jQuery._data( elem, markDataKey ) ) { - jQuery.removeData( elem, deferDataKey, true ); - defer.fire(); - } - }, 0 ); - } -} - -jQuery.extend({ - - _mark: function( elem, type ) { - if ( elem ) { - type = ( type || "fx" ) + "mark"; - jQuery._data( elem, type, (jQuery._data( elem, type ) || 0) + 1 ); - } - }, - - _unmark: function( force, elem, type ) { - if ( force !== true ) { - type = elem; - elem = force; - force = false; - } - if ( elem ) { - type = type || "fx"; - var key = type + "mark", - count = force ? 0 : ( (jQuery._data( elem, key ) || 1) - 1 ); - if ( count ) { - jQuery._data( elem, key, count ); - } else { - jQuery.removeData( elem, key, true ); - handleQueueMarkDefer( elem, type, "mark" ); - } - } - }, - - queue: function( elem, type, data ) { - var q; - if ( elem ) { - type = ( type || "fx" ) + "queue"; - q = jQuery._data( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !q || jQuery.isArray(data) ) { - q = jQuery._data( elem, type, jQuery.makeArray(data) ); - } else { - q.push( data ); - } - } - return q || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - fn = queue.shift(), - hooks = {}; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - } - - if ( fn ) { - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - jQuery._data( elem, type + ".run", hooks ); - fn.call( elem, function() { - jQuery.dequeue( elem, type ); - }, hooks ); - } - - if ( !queue.length ) { - jQuery.removeData( elem, type + "queue " + type + ".run", true ); - handleQueueMarkDefer( elem, type, "queue" ); - } - } -}); - -jQuery.fn.extend({ - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[0], type ); - } - - return data === undefined ? - this : - this.each(function() { - var queue = jQuery.queue( this, type, data ); - - if ( type === "fx" && queue[0] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - }); - }, - dequeue: function( type ) { - return this.each(function() { - jQuery.dequeue( this, type ); - }); - }, - // Based off of the plugin by Clint Helfers, with permission. - // http://blindsignals.com/index.php/2009/07/jquery-delay/ - delay: function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = setTimeout( next, time ); - hooks.stop = function() { - clearTimeout( timeout ); - }; - }); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, object ) { - if ( typeof type !== "string" ) { - object = type; - type = undefined; - } - type = type || "fx"; - var defer = jQuery.Deferred(), - elements = this, - i = elements.length, - count = 1, - deferDataKey = type + "defer", - queueDataKey = type + "queue", - markDataKey = type + "mark", - tmp; - function resolve() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - } - while( i-- ) { - if (( tmp = jQuery.data( elements[ i ], deferDataKey, undefined, true ) || - ( jQuery.data( elements[ i ], queueDataKey, undefined, true ) || - jQuery.data( elements[ i ], markDataKey, undefined, true ) ) && - jQuery.data( elements[ i ], deferDataKey, jQuery.Callbacks( "once memory" ), true ) )) { - count++; - tmp.add( resolve ); - } - } - resolve(); - return defer.promise( object ); - } -}); - - - - -var rclass = /[\n\t\r]/g, - rspace = /\s+/, - rreturn = /\r/g, - rtype = /^(?:button|input)$/i, - rfocusable = /^(?:button|input|object|select|textarea)$/i, - rclickable = /^a(?:rea)?$/i, - rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i, - getSetAttribute = jQuery.support.getSetAttribute, - nodeHook, boolHook, fixSpecified; - -jQuery.fn.extend({ - attr: function( name, value ) { - return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each(function() { - jQuery.removeAttr( this, name ); - }); - }, - - prop: function( name, value ) { - return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - name = jQuery.propFix[ name ] || name; - return this.each(function() { - // try/catch handles cases where IE balks (such as removing a property on window) - try { - this[ name ] = undefined; - delete this[ name ]; - } catch( e ) {} - }); - }, - - addClass: function( value ) { - var classNames, i, l, elem, - setClass, c, cl; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).addClass( value.call(this, j, this.className) ); - }); - } - - if ( value && typeof value === "string" ) { - classNames = value.split( rspace ); - - for ( i = 0, l = this.length; i < l; i++ ) { - elem = this[ i ]; - - if ( elem.nodeType === 1 ) { - if ( !elem.className && classNames.length === 1 ) { - elem.className = value; - - } else { - setClass = " " + elem.className + " "; - - for ( c = 0, cl = classNames.length; c < cl; c++ ) { - if ( !~setClass.indexOf( " " + classNames[ c ] + " " ) ) { - setClass += classNames[ c ] + " "; - } - } - elem.className = jQuery.trim( setClass ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classNames, i, l, elem, className, c, cl; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).removeClass( value.call(this, j, this.className) ); - }); - } - - if ( (value && typeof value === "string") || value === undefined ) { - classNames = ( value || "" ).split( rspace ); - - for ( i = 0, l = this.length; i < l; i++ ) { - elem = this[ i ]; - - if ( elem.nodeType === 1 && elem.className ) { - if ( value ) { - className = (" " + elem.className + " ").replace( rclass, " " ); - for ( c = 0, cl = classNames.length; c < cl; c++ ) { - className = className.replace(" " + classNames[ c ] + " ", " "); - } - elem.className = jQuery.trim( className ); - - } else { - elem.className = ""; - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isBool = typeof stateVal === "boolean"; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( i ) { - jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); - }); - } - - return this.each(function() { - if ( type === "string" ) { - // toggle individual class names - var className, - i = 0, - self = jQuery( this ), - state = stateVal, - classNames = value.split( rspace ); - - while ( (className = classNames[ i++ ]) ) { - // check each className given, space seperated list - state = isBool ? state : !self.hasClass( className ); - self[ state ? "addClass" : "removeClass" ]( className ); - } - - } else if ( type === "undefined" || type === "boolean" ) { - if ( this.className ) { - // store className if set - jQuery._data( this, "__className__", this.className ); - } - - // toggle whole className - this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; - } - }); - }, - - hasClass: function( selector ) { - var className = " " + selector + " ", - i = 0, - l = this.length; - for ( ; i < l; i++ ) { - if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) > -1 ) { - return true; - } - } - - return false; - }, - - val: function( value ) { - var hooks, ret, isFunction, - elem = this[0]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { - return ret; - } - - ret = elem.value; - - return typeof ret === "string" ? - // handle most common string cases - ret.replace(rreturn, "") : - // handle cases where value is null/undef or number - ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each(function( i ) { - var self = jQuery(this), val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, self.val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - } else if ( typeof val === "number" ) { - val += ""; - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map(val, function ( value ) { - return value == null ? "" : value + ""; - }); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - }); - } -}); - -jQuery.extend({ - valHooks: { - option: { - get: function( elem ) { - // attributes.value is undefined in Blackberry 4.7 but - // uses .value. See #6932 - var val = elem.attributes.value; - return !val || val.specified ? elem.value : elem.text; - } - }, - select: { - get: function( elem ) { - var value, i, max, option, - index = elem.selectedIndex, - values = [], - options = elem.options, - one = elem.type === "select-one"; - - // Nothing was selected - if ( index < 0 ) { - return null; - } - - // Loop through all the selected options - i = one ? index : 0; - max = one ? index + 1 : options.length; - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Don't return options that are disabled or in a disabled optgroup - if ( option.selected && (jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null) && - (!option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" )) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - // Fixes Bug #2551 -- select.val() broken in IE after form.reset() - if ( one && !values.length && options.length ) { - return jQuery( options[ index ] ).val(); - } - - return values; - }, - - set: function( elem, value ) { - var values = jQuery.makeArray( value ); - - jQuery(elem).find("option").each(function() { - this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; - }); - - if ( !values.length ) { - elem.selectedIndex = -1; - } - return values; - } - } - }, - - attrFn: { - val: true, - css: true, - html: true, - text: true, - data: true, - width: true, - height: true, - offset: true - }, - - attr: function( elem, name, value, pass ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set attributes on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( pass && name in jQuery.attrFn ) { - return jQuery( elem )[ name ]( value ); - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - // All attributes are lowercase - // Grab necessary hook if one is defined - if ( notxml ) { - name = name.toLowerCase(); - hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook ); - } - - if ( value !== undefined ) { - - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - - } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - elem.setAttribute( name, "" + value ); - return value; - } - - } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - - ret = elem.getAttribute( name ); - - // Non-existent attributes return null, we normalize to undefined - return ret === null ? - undefined : - ret; - } - }, - - removeAttr: function( elem, value ) { - var propName, attrNames, name, l, isBool, - i = 0; - - if ( value && elem.nodeType === 1 ) { - attrNames = value.toLowerCase().split( rspace ); - l = attrNames.length; - - for ( ; i < l; i++ ) { - name = attrNames[ i ]; - - if ( name ) { - propName = jQuery.propFix[ name ] || name; - isBool = rboolean.test( name ); - - // See #9699 for explanation of this approach (setting first, then removal) - // Do not do this for boolean attributes (see #10870) - if ( !isBool ) { - jQuery.attr( elem, name, "" ); - } - elem.removeAttribute( getSetAttribute ? name : propName ); - - // Set corresponding property to false for boolean attributes - if ( isBool && propName in elem ) { - elem[ propName ] = false; - } - } - } - } - }, - - attrHooks: { - type: { - set: function( elem, value ) { - // We can't allow the type property to be changed (since it causes problems in IE) - if ( rtype.test( elem.nodeName ) && elem.parentNode ) { - jQuery.error( "type property can't be changed" ); - } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { - // Setting the type on a radio button after the value resets the value in IE6-9 - // Reset value to it's default in case type is set after value - // This is for element creation - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - }, - // Use the value property for back compat - // Use the nodeHook for button elements in IE6/7 (#1954) - value: { - get: function( elem, name ) { - if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { - return nodeHook.get( elem, name ); - } - return name in elem ? - elem.value : - null; - }, - set: function( elem, value, name ) { - if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { - return nodeHook.set( elem, value, name ); - } - // Does not return so that setAttribute is also used - elem.value = value; - } - } - }, - - propFix: { - tabindex: "tabIndex", - readonly: "readOnly", - "for": "htmlFor", - "class": "className", - maxlength: "maxLength", - cellspacing: "cellSpacing", - cellpadding: "cellPadding", - rowspan: "rowSpan", - colspan: "colSpan", - usemap: "useMap", - frameborder: "frameBorder", - contenteditable: "contentEditable" - }, - - prop: function( elem, name, value ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set properties on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - if ( notxml ) { - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - return ( elem[ name ] = value ); - } - - } else { - if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - return elem[ name ]; - } - } - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set - // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - var attributeNode = elem.getAttributeNode("tabindex"); - - return attributeNode && attributeNode.specified ? - parseInt( attributeNode.value, 10 ) : - rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? - 0 : - undefined; - } - } - } -}); - -// Add the tabIndex propHook to attrHooks for back-compat (different case is intentional) -jQuery.attrHooks.tabindex = jQuery.propHooks.tabIndex; - -// Hook for boolean attributes -boolHook = { - get: function( elem, name ) { - // Align boolean attributes with corresponding properties - // Fall back to attribute presence where some booleans are not supported - var attrNode, - property = jQuery.prop( elem, name ); - return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ? - name.toLowerCase() : - undefined; - }, - set: function( elem, value, name ) { - var propName; - if ( value === false ) { - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - // value is true since we know at this point it's type boolean and not false - // Set boolean attributes to the same name and set the DOM property - propName = jQuery.propFix[ name ] || name; - if ( propName in elem ) { - // Only set the IDL specifically if it already exists on the element - elem[ propName ] = true; - } - - elem.setAttribute( name, name.toLowerCase() ); - } - return name; - } -}; - -// IE6/7 do not support getting/setting some attributes with get/setAttribute -if ( !getSetAttribute ) { - - fixSpecified = { - name: true, - id: true, - coords: true - }; - - // Use this for any attribute in IE6/7 - // This fixes almost every IE6/7 issue - nodeHook = jQuery.valHooks.button = { - get: function( elem, name ) { - var ret; - ret = elem.getAttributeNode( name ); - return ret && ( fixSpecified[ name ] ? ret.nodeValue !== "" : ret.specified ) ? - ret.nodeValue : - undefined; - }, - set: function( elem, value, name ) { - // Set the existing or create a new attribute node - var ret = elem.getAttributeNode( name ); - if ( !ret ) { - ret = document.createAttribute( name ); - elem.setAttributeNode( ret ); - } - return ( ret.nodeValue = value + "" ); - } - }; - - // Apply the nodeHook to tabindex - jQuery.attrHooks.tabindex.set = nodeHook.set; - - // Set width and height to auto instead of 0 on empty string( Bug #8150 ) - // This is for removals - jQuery.each([ "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - set: function( elem, value ) { - if ( value === "" ) { - elem.setAttribute( name, "auto" ); - return value; - } - } - }); - }); - - // Set contenteditable to false on removals(#10429) - // Setting to empty string throws an error as an invalid value - jQuery.attrHooks.contenteditable = { - get: nodeHook.get, - set: function( elem, value, name ) { - if ( value === "" ) { - value = "false"; - } - nodeHook.set( elem, value, name ); - } - }; -} - - -// Some attributes require a special call on IE -if ( !jQuery.support.hrefNormalized ) { - jQuery.each([ "href", "src", "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - get: function( elem ) { - var ret = elem.getAttribute( name, 2 ); - return ret === null ? undefined : ret; - } - }); - }); -} - -if ( !jQuery.support.style ) { - jQuery.attrHooks.style = { - get: function( elem ) { - // Return undefined in the case of empty string - // Normalize to lowercase since IE uppercases css property names - return elem.style.cssText.toLowerCase() || undefined; - }, - set: function( elem, value ) { - return ( elem.style.cssText = "" + value ); - } - }; -} - -// Safari mis-reports the default selected property of an option -// Accessing the parent's selectedIndex property fixes it -if ( !jQuery.support.optSelected ) { - jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, { - get: function( elem ) { - var parent = elem.parentNode; - - if ( parent ) { - parent.selectedIndex; - - // Make sure that it also works with optgroups, see #5701 - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - return null; - } - }); -} - -// IE6/7 call enctype encoding -if ( !jQuery.support.enctype ) { - jQuery.propFix.enctype = "encoding"; -} - -// Radios and checkboxes getter/setter -if ( !jQuery.support.checkOn ) { - jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - get: function( elem ) { - // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified - return elem.getAttribute("value") === null ? "on" : elem.value; - } - }; - }); -} -jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], { - set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); - } - } - }); -}); - - - - -var rformElems = /^(?:textarea|input|select)$/i, - rtypenamespace = /^([^\.]*)?(?:\.(.+))?$/, - rhoverHack = /(?:^|\s)hover(\.\S+)?\b/, - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|contextmenu)|click/, - rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - rquickIs = /^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/, - quickParse = function( selector ) { - var quick = rquickIs.exec( selector ); - if ( quick ) { - // 0 1 2 3 - // [ _, tag, id, class ] - quick[1] = ( quick[1] || "" ).toLowerCase(); - quick[3] = quick[3] && new RegExp( "(?:^|\\s)" + quick[3] + "(?:\\s|$)" ); - } - return quick; - }, - quickIs = function( elem, m ) { - var attrs = elem.attributes || {}; - return ( - (!m[1] || elem.nodeName.toLowerCase() === m[1]) && - (!m[2] || (attrs.id || {}).value === m[2]) && - (!m[3] || m[3].test( (attrs[ "class" ] || {}).value )) - ); - }, - hoverHack = function( events ) { - return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" ); - }; - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - add: function( elem, types, handler, data, selector ) { - - var elemData, eventHandle, events, - t, tns, type, namespaces, handleObj, - handleObjIn, quick, handlers, special; - - // Don't attach events to noData or text/comment nodes (allow plain objects tho) - if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - events = elemData.events; - if ( !events ) { - elemData.events = events = {}; - } - eventHandle = elemData.handle; - if ( !eventHandle ) { - elemData.handle = eventHandle = function( e ) { - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ? - jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : - undefined; - }; - // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events - eventHandle.elem = elem; - } - - // Handle multiple events separated by a space - // jQuery(...).bind("mouseover mouseout", fn); - types = jQuery.trim( hoverHack(types) ).split( " " ); - for ( t = 0; t < types.length; t++ ) { - - tns = rtypenamespace.exec( types[t] ) || []; - type = tns[1]; - namespaces = ( tns[2] || "" ).split( "." ).sort(); - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend({ - type: type, - origType: tns[1], - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - quick: selector && quickParse( selector ), - namespace: namespaces.join(".") - }, handleObjIn ); - - // Init the event handler queue if we're the first - handlers = events[ type ]; - if ( !handlers ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener/attachEvent if the special events handler returns false - if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - // Bind the global event handler to the element - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle, false ); - - } else if ( elem.attachEvent ) { - elem.attachEvent( "on" + type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - // Nullify elem to prevent memory leaks in IE - elem = null; - }, - - global: {}, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var elemData = jQuery.hasData( elem ) && jQuery._data( elem ), - t, tns, type, origType, namespaces, origCount, - j, events, special, handle, eventType, handleObj; - - if ( !elemData || !(events = elemData.events) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = jQuery.trim( hoverHack( types || "" ) ).split(" "); - for ( t = 0; t < types.length; t++ ) { - tns = rtypenamespace.exec( types[t] ) || []; - type = origType = tns[1]; - namespaces = tns[2]; - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector? special.delegateType : special.bindType ) || type; - eventType = events[ type ] || []; - origCount = eventType.length; - namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.)?") + "(\\.|$)") : null; - - // Remove matching events - for ( j = 0; j < eventType.length; j++ ) { - handleObj = eventType[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !namespaces || namespaces.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { - eventType.splice( j--, 1 ); - - if ( handleObj.selector ) { - eventType.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( eventType.length === 0 && origCount !== eventType.length ) { - if ( !special.teardown || special.teardown.call( elem, namespaces ) === false ) { - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - handle = elemData.handle; - if ( handle ) { - handle.elem = null; - } - - // removeData also checks for emptiness and clears the expando if empty - // so use it instead of delete - jQuery.removeData( elem, [ "events", "handle" ], true ); - } - }, - - // Events that are safe to short-circuit if no handlers are attached. - // Native DOM events should not be added, they may have inline handlers. - customEvent: { - "getData": true, - "setData": true, - "changeData": true - }, - - trigger: function( event, data, elem, onlyHandlers ) { - // Don't do events on text and comment nodes - if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) { - return; - } - - // Event object or event type - var type = event.type || event, - namespaces = [], - cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType; - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "!" ) >= 0 ) { - // Exclusive events trigger only for the exact event (no namespaces) - type = type.slice(0, -1); - exclusive = true; - } - - if ( type.indexOf( "." ) >= 0 ) { - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split("."); - type = namespaces.shift(); - namespaces.sort(); - } - - if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) { - // No jQuery handlers for this event type, and it can't have inline handlers - return; - } - - // Caller can pass in an Event, Object, or just an event type string - event = typeof event === "object" ? - // jQuery.Event object - event[ jQuery.expando ] ? event : - // Object literal - new jQuery.Event( type, event ) : - // Just the event type (string) - new jQuery.Event( type ); - - event.type = type; - event.isTrigger = true; - event.exclusive = exclusive; - event.namespace = namespaces.join( "." ); - event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.)?") + "(\\.|$)") : null; - ontype = type.indexOf( ":" ) < 0 ? "on" + type : ""; - - // Handle a global trigger - if ( !elem ) { - - // TODO: Stop taunting the data cache; remove global events and always attach to document - cache = jQuery.cache; - for ( i in cache ) { - if ( cache[ i ].events && cache[ i ].events[ type ] ) { - jQuery.event.trigger( event, data, cache[ i ].handle.elem, true ); - } - } - return; - } - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data != null ? jQuery.makeArray( data ) : []; - data.unshift( event ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - eventPath = [[ elem, special.bindType || type ]]; - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode; - old = null; - for ( ; cur; cur = cur.parentNode ) { - eventPath.push([ cur, bubbleType ]); - old = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( old && old === elem.ownerDocument ) { - eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]); - } - } - - // Fire handlers on the event path - for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) { - - cur = eventPath[i][0]; - event.type = eventPath[i][1]; - - handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - // Note that this is a bare JS function and not a jQuery handler - handle = ontype && cur[ ontype ]; - if ( handle && jQuery.acceptData( cur ) && handle.apply( cur, data ) === false ) { - event.preventDefault(); - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) && - !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name name as the event. - // Can't use an .isFunction() check here because IE6/7 fails that test. - // Don't do default actions on window, that's where global variables be (#6170) - // IE<9 dies on focus/blur to hidden element (#1486) - if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - old = elem[ ontype ]; - - if ( old ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( old ) { - elem[ ontype ] = old; - } - } - } - } - - return event.result; - }, - - dispatch: function( event ) { - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( event || window.event ); - - var handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []), - delegateCount = handlers.delegateCount, - args = [].slice.call( arguments, 0 ), - run_all = !event.exclusive && !event.namespace, - special = jQuery.event.special[ event.type ] || {}, - handlerQueue = [], - i, j, cur, jqcur, ret, selMatch, matched, matches, handleObj, sel, related; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[0] = event; - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers that should run if there are delegated events - // Avoid non-left-click bubbling in Firefox (#3861) - if ( delegateCount && !(event.button && event.type === "click") ) { - - // Pregenerate a single jQuery object for reuse with .is() - jqcur = jQuery(this); - jqcur.context = this.ownerDocument || this; - - for ( cur = event.target; cur != this; cur = cur.parentNode || this ) { - - // Don't process events on disabled elements (#6911, #8165) - if ( cur.disabled !== true ) { - selMatch = {}; - matches = []; - jqcur[0] = cur; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - sel = handleObj.selector; - - if ( selMatch[ sel ] === undefined ) { - selMatch[ sel ] = ( - handleObj.quick ? quickIs( cur, handleObj.quick ) : jqcur.is( sel ) - ); - } - if ( selMatch[ sel ] ) { - matches.push( handleObj ); - } - } - if ( matches.length ) { - handlerQueue.push({ elem: cur, matches: matches }); - } - } - } - } - - // Add the remaining (directly-bound) handlers - if ( handlers.length > delegateCount ) { - handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) }); - } - - // Run delegates first; they may want to stop propagation beneath us - for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) { - matched = handlerQueue[ i ]; - event.currentTarget = matched.elem; - - for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) { - handleObj = matched.matches[ j ]; - - // Triggered event must either 1) be non-exclusive and have no namespace, or - // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). - if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) { - - event.data = handleObj.data; - event.handleObj = handleObj; - - ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) - .apply( matched.elem, args ); - - if ( ret !== undefined ) { - event.result = ret; - if ( ret === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - // Includes some event props shared by KeyEvent and MouseEvent - // *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 *** - props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), - - fixHooks: {}, - - keyHooks: { - props: "char charCode key keyCode".split(" "), - filter: function( event, original ) { - - // Add which for key events - if ( event.which == null ) { - event.which = original.charCode != null ? original.charCode : original.keyCode; - } - - return event; - } - }, - - mouseHooks: { - props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), - filter: function( event, original ) { - var eventDoc, doc, body, - button = original.button, - fromElement = original.fromElement; - - // Calculate pageX/Y if missing and clientX/Y available - if ( event.pageX == null && original.clientX != null ) { - eventDoc = event.target.ownerDocument || document; - doc = eventDoc.documentElement; - body = eventDoc.body; - - event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); - event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); - } - - // Add relatedTarget, if necessary - if ( !event.relatedTarget && fromElement ) { - event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - // Note: button is not normalized, so don't use it - if ( !event.which && button !== undefined ) { - event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); - } - - return event; - } - }, - - fix: function( event ) { - if ( event[ jQuery.expando ] ) { - return event; - } - - // Create a writable copy of the event object and normalize some properties - var i, prop, - originalEvent = event, - fixHook = jQuery.event.fixHooks[ event.type ] || {}, - copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; - - event = jQuery.Event( originalEvent ); - - for ( i = copy.length; i; ) { - prop = copy[ --i ]; - event[ prop ] = originalEvent[ prop ]; - } - - // Fix target property, if necessary (#1925, IE 6/7/8 & Safari2) - if ( !event.target ) { - event.target = originalEvent.srcElement || document; - } - - // Target should not be a text node (#504, Safari) - if ( event.target.nodeType === 3 ) { - event.target = event.target.parentNode; - } - - // For mouse/key events; add metaKey if it's not there (#3368, IE6/7/8) - if ( event.metaKey === undefined ) { - event.metaKey = event.ctrlKey; - } - - return fixHook.filter? fixHook.filter( event, originalEvent ) : event; - }, - - special: { - ready: { - // Make sure the ready event is setup - setup: jQuery.bindReady - }, - - load: { - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - - focus: { - delegateType: "focusin" - }, - blur: { - delegateType: "focusout" - }, - - beforeunload: { - setup: function( data, namespaces, eventHandle ) { - // We only want to do this special case on windows - if ( jQuery.isWindow( this ) ) { - this.onbeforeunload = eventHandle; - } - }, - - teardown: function( namespaces, eventHandle ) { - if ( this.onbeforeunload === eventHandle ) { - this.onbeforeunload = null; - } - } - } - }, - - simulate: function( type, elem, event, bubble ) { - // Piggyback on a donor event to simulate a different one. - // Fake originalEvent to avoid donor's stopPropagation, but if the - // simulated event prevents default then we do the same on the donor. - var e = jQuery.extend( - new jQuery.Event(), - event, - { type: type, - isSimulated: true, - originalEvent: {} - } - ); - if ( bubble ) { - jQuery.event.trigger( e, null, elem ); - } else { - jQuery.event.dispatch.call( elem, e ); - } - if ( e.isDefaultPrevented() ) { - event.preventDefault(); - } - } -}; - -// Some plugins are using, but it's undocumented/deprecated and will be removed. -// The 1.7 special event interface should provide all the hooks needed now. -jQuery.event.handle = jQuery.event.dispatch; - -jQuery.removeEvent = document.removeEventListener ? - function( elem, type, handle ) { - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle, false ); - } - } : - function( elem, type, handle ) { - if ( elem.detachEvent ) { - elem.detachEvent( "on" + type, handle ); - } - }; - -jQuery.Event = function( src, props ) { - // Allow instantiation without the 'new' keyword - if ( !(this instanceof jQuery.Event) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || - src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -function returnFalse() { - return false; -} -function returnTrue() { - return true; -} - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - preventDefault: function() { - this.isDefaultPrevented = returnTrue; - - var e = this.originalEvent; - if ( !e ) { - return; - } - - // if preventDefault exists run it on the original event - if ( e.preventDefault ) { - e.preventDefault(); - - // otherwise set the returnValue property of the original event to false (IE) - } else { - e.returnValue = false; - } - }, - stopPropagation: function() { - this.isPropagationStopped = returnTrue; - - var e = this.originalEvent; - if ( !e ) { - return; - } - // if stopPropagation exists run it on the original event - if ( e.stopPropagation ) { - e.stopPropagation(); - } - // otherwise set the cancelBubble property of the original event to true (IE) - e.cancelBubble = true; - }, - stopImmediatePropagation: function() { - this.isImmediatePropagationStopped = returnTrue; - this.stopPropagation(); - }, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse -}; - -// Create mouseenter/leave events using mouseover/out and event-time checks -jQuery.each({ - mouseenter: "mouseover", - mouseleave: "mouseout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var target = this, - related = event.relatedTarget, - handleObj = event.handleObj, - selector = handleObj.selector, - ret; - - // For mousenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || (related !== target && !jQuery.contains( target, related )) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -}); - -// IE submit delegation -if ( !jQuery.support.submitBubbles ) { - - jQuery.event.special.submit = { - setup: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Lazy-add a submit handler when a descendant form may potentially be submitted - jQuery.event.add( this, "click._submit keypress._submit", function( e ) { - // Node name check avoids a VML-related crash in IE (#9807) - var elem = e.target, - form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; - if ( form && !form._submit_attached ) { - jQuery.event.add( form, "submit._submit", function( event ) { - event._submit_bubble = true; - }); - form._submit_attached = true; - } - }); - // return undefined since we don't need an event listener - }, - - postDispatch: function( event ) { - // If form was submitted by the user, bubble the event up the tree - if ( event._submit_bubble ) { - delete event._submit_bubble; - if ( this.parentNode && !event.isTrigger ) { - jQuery.event.simulate( "submit", this.parentNode, event, true ); - } - } - }, - - teardown: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Remove delegated handlers; cleanData eventually reaps submit handlers attached above - jQuery.event.remove( this, "._submit" ); - } - }; -} - -// IE change delegation and checkbox/radio fix -if ( !jQuery.support.changeBubbles ) { - - jQuery.event.special.change = { - - setup: function() { - - if ( rformElems.test( this.nodeName ) ) { - // IE doesn't fire change on a check/radio until blur; trigger it on click - // after a propertychange. Eat the blur-change in special.change.handle. - // This still fires onchange a second time for check/radio after blur. - if ( this.type === "checkbox" || this.type === "radio" ) { - jQuery.event.add( this, "propertychange._change", function( event ) { - if ( event.originalEvent.propertyName === "checked" ) { - this._just_changed = true; - } - }); - jQuery.event.add( this, "click._change", function( event ) { - if ( this._just_changed && !event.isTrigger ) { - this._just_changed = false; - jQuery.event.simulate( "change", this, event, true ); - } - }); - } - return false; - } - // Delegated event; lazy-add a change handler on descendant inputs - jQuery.event.add( this, "beforeactivate._change", function( e ) { - var elem = e.target; - - if ( rformElems.test( elem.nodeName ) && !elem._change_attached ) { - jQuery.event.add( elem, "change._change", function( event ) { - if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { - jQuery.event.simulate( "change", this.parentNode, event, true ); - } - }); - elem._change_attached = true; - } - }); - }, - - handle: function( event ) { - var elem = event.target; - - // Swallow native change events from checkbox/radio, we already triggered them above - if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { - return event.handleObj.handler.apply( this, arguments ); - } - }, - - teardown: function() { - jQuery.event.remove( this, "._change" ); - - return rformElems.test( this.nodeName ); - } - }; -} - -// Create "bubbling" focus and blur events -if ( !jQuery.support.focusinBubbles ) { - jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler while someone wants focusin/focusout - var attaches = 0, - handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - if ( attaches++ === 0 ) { - document.addEventListener( orig, handler, true ); - } - }, - teardown: function() { - if ( --attaches === 0 ) { - document.removeEventListener( orig, handler, true ); - } - } - }; - }); -} - -jQuery.fn.extend({ - - on: function( types, selector, data, fn, /*INTERNAL*/ one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { // && selector != null - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - this.on( type, selector, data, types[ type ], one ); - } - return this; - } - - if ( data == null && fn == null ) { - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return this; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return this.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - }); - }, - one: function( types, selector, data, fn ) { - return this.on( types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - if ( types && types.preventDefault && types.handleObj ) { - // ( event ) dispatched jQuery.Event - var handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - // ( types-object [, selector] ) - for ( var type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each(function() { - jQuery.event.remove( this, types, fn, selector ); - }); - }, - - bind: function( types, data, fn ) { - return this.on( types, null, data, fn ); - }, - unbind: function( types, fn ) { - return this.off( types, null, fn ); - }, - - live: function( types, data, fn ) { - jQuery( this.context ).on( types, this.selector, data, fn ); - return this; - }, - die: function( types, fn ) { - jQuery( this.context ).off( types, this.selector || "**", fn ); - return this; - }, - - delegate: function( selector, types, data, fn ) { - return this.on( types, selector, data, fn ); - }, - undelegate: function( selector, types, fn ) { - // ( namespace ) or ( selector, types [, fn] ) - return arguments.length == 1? this.off( selector, "**" ) : this.off( types, selector, fn ); - }, - - trigger: function( type, data ) { - return this.each(function() { - jQuery.event.trigger( type, data, this ); - }); - }, - triggerHandler: function( type, data ) { - if ( this[0] ) { - return jQuery.event.trigger( type, data, this[0], true ); - } - }, - - toggle: function( fn ) { - // Save reference to arguments for access in closure - var args = arguments, - guid = fn.guid || jQuery.guid++, - i = 0, - toggler = function( event ) { - // Figure out which function to execute - var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i; - jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 ); - - // Make sure that clicks stop - event.preventDefault(); - - // and execute the function - return args[ lastToggle ].apply( this, arguments ) || false; - }; - - // link all the functions, so any of them can unbind this click handler - toggler.guid = guid; - while ( i < args.length ) { - args[ i++ ].guid = guid; - } - - return this.click( toggler ); - }, - - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -}); - -jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - if ( fn == null ) { - fn = data; - data = null; - } - - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; - - if ( jQuery.attrFn ) { - jQuery.attrFn[ name ] = true; - } - - if ( rkeyEvent.test( name ) ) { - jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks; - } - - if ( rmouseEvent.test( name ) ) { - jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks; - } -}); - - - -/*! - * Sizzle CSS Selector Engine - * Copyright 2011, The Dojo Foundation - * Released under the MIT, BSD, and GPL Licenses. - * More information: http://sizzlejs.com/ - */ -(function(){ - -var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g, - expando = "sizcache" + (Math.random() + '').replace('.', ''), - done = 0, - toString = Object.prototype.toString, - hasDuplicate = false, - baseHasDuplicate = true, - rBackslash = /\\/g, - rReturn = /\r\n/g, - rNonWord = /\W/; - -// Here we check if the JavaScript engine is using some sort of -// optimization where it does not always call our comparision -// function. If that is the case, discard the hasDuplicate value. -// Thus far that includes Google Chrome. -[0, 0].sort(function() { - baseHasDuplicate = false; - return 0; -}); - -var Sizzle = function( selector, context, results, seed ) { - results = results || []; - context = context || document; - - var origContext = context; - - if ( context.nodeType !== 1 && context.nodeType !== 9 ) { - return []; - } - - if ( !selector || typeof selector !== "string" ) { - return results; - } - - var m, set, checkSet, extra, ret, cur, pop, i, - prune = true, - contextXML = Sizzle.isXML( context ), - parts = [], - soFar = selector; - - // Reset the position of the chunker regexp (start from head) - do { - chunker.exec( "" ); - m = chunker.exec( soFar ); - - if ( m ) { - soFar = m[3]; - - parts.push( m[1] ); - - if ( m[2] ) { - extra = m[3]; - break; - } - } - } while ( m ); - - if ( parts.length > 1 && origPOS.exec( selector ) ) { - - if ( parts.length === 2 && Expr.relative[ parts[0] ] ) { - set = posProcess( parts[0] + parts[1], context, seed ); - - } else { - set = Expr.relative[ parts[0] ] ? - [ context ] : - Sizzle( parts.shift(), context ); - - while ( parts.length ) { - selector = parts.shift(); - - if ( Expr.relative[ selector ] ) { - selector += parts.shift(); - } - - set = posProcess( selector, set, seed ); - } - } - - } else { - // Take a shortcut and set the context if the root selector is an ID - // (but not if it'll be faster if the inner selector is an ID) - if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML && - Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) { - - ret = Sizzle.find( parts.shift(), context, contextXML ); - context = ret.expr ? - Sizzle.filter( ret.expr, ret.set )[0] : - ret.set[0]; - } - - if ( context ) { - ret = seed ? - { expr: parts.pop(), set: makeArray(seed) } : - Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML ); - - set = ret.expr ? - Sizzle.filter( ret.expr, ret.set ) : - ret.set; - - if ( parts.length > 0 ) { - checkSet = makeArray( set ); - - } else { - prune = false; - } - - while ( parts.length ) { - cur = parts.pop(); - pop = cur; - - if ( !Expr.relative[ cur ] ) { - cur = ""; - } else { - pop = parts.pop(); - } - - if ( pop == null ) { - pop = context; - } - - Expr.relative[ cur ]( checkSet, pop, contextXML ); - } - - } else { - checkSet = parts = []; - } - } - - if ( !checkSet ) { - checkSet = set; - } - - if ( !checkSet ) { - Sizzle.error( cur || selector ); - } - - if ( toString.call(checkSet) === "[object Array]" ) { - if ( !prune ) { - results.push.apply( results, checkSet ); - - } else if ( context && context.nodeType === 1 ) { - for ( i = 0; checkSet[i] != null; i++ ) { - if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && Sizzle.contains(context, checkSet[i])) ) { - results.push( set[i] ); - } - } - - } else { - for ( i = 0; checkSet[i] != null; i++ ) { - if ( checkSet[i] && checkSet[i].nodeType === 1 ) { - results.push( set[i] ); - } - } - } - - } else { - makeArray( checkSet, results ); - } - - if ( extra ) { - Sizzle( extra, origContext, results, seed ); - Sizzle.uniqueSort( results ); - } - - return results; -}; - -Sizzle.uniqueSort = function( results ) { - if ( sortOrder ) { - hasDuplicate = baseHasDuplicate; - results.sort( sortOrder ); - - if ( hasDuplicate ) { - for ( var i = 1; i < results.length; i++ ) { - if ( results[i] === results[ i - 1 ] ) { - results.splice( i--, 1 ); - } - } - } - } - - return results; -}; - -Sizzle.matches = function( expr, set ) { - return Sizzle( expr, null, null, set ); -}; - -Sizzle.matchesSelector = function( node, expr ) { - return Sizzle( expr, null, null, [node] ).length > 0; -}; - -Sizzle.find = function( expr, context, isXML ) { - var set, i, len, match, type, left; - - if ( !expr ) { - return []; - } - - for ( i = 0, len = Expr.order.length; i < len; i++ ) { - type = Expr.order[i]; - - if ( (match = Expr.leftMatch[ type ].exec( expr )) ) { - left = match[1]; - match.splice( 1, 1 ); - - if ( left.substr( left.length - 1 ) !== "\\" ) { - match[1] = (match[1] || "").replace( rBackslash, "" ); - set = Expr.find[ type ]( match, context, isXML ); - - if ( set != null ) { - expr = expr.replace( Expr.match[ type ], "" ); - break; - } - } - } - } - - if ( !set ) { - set = typeof context.getElementsByTagName !== "undefined" ? - context.getElementsByTagName( "*" ) : - []; - } - - return { set: set, expr: expr }; -}; - -Sizzle.filter = function( expr, set, inplace, not ) { - var match, anyFound, - type, found, item, filter, left, - i, pass, - old = expr, - result = [], - curLoop = set, - isXMLFilter = set && set[0] && Sizzle.isXML( set[0] ); - - while ( expr && set.length ) { - for ( type in Expr.filter ) { - if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) { - filter = Expr.filter[ type ]; - left = match[1]; - - anyFound = false; - - match.splice(1,1); - - if ( left.substr( left.length - 1 ) === "\\" ) { - continue; - } - - if ( curLoop === result ) { - result = []; - } - - if ( Expr.preFilter[ type ] ) { - match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter ); - - if ( !match ) { - anyFound = found = true; - - } else if ( match === true ) { - continue; - } - } - - if ( match ) { - for ( i = 0; (item = curLoop[i]) != null; i++ ) { - if ( item ) { - found = filter( item, match, i, curLoop ); - pass = not ^ found; - - if ( inplace && found != null ) { - if ( pass ) { - anyFound = true; - - } else { - curLoop[i] = false; - } - - } else if ( pass ) { - result.push( item ); - anyFound = true; - } - } - } - } - - if ( found !== undefined ) { - if ( !inplace ) { - curLoop = result; - } - - expr = expr.replace( Expr.match[ type ], "" ); - - if ( !anyFound ) { - return []; - } - - break; - } - } - } - - // Improper expression - if ( expr === old ) { - if ( anyFound == null ) { - Sizzle.error( expr ); - - } else { - break; - } - } - - old = expr; - } - - return curLoop; -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Utility function for retreiving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -var getText = Sizzle.getText = function( elem ) { - var i, node, - nodeType = elem.nodeType, - ret = ""; - - if ( nodeType ) { - if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent || innerText for elements - if ( typeof elem.textContent === 'string' ) { - return elem.textContent; - } else if ( typeof elem.innerText === 'string' ) { - // Replace IE's carriage returns - return elem.innerText.replace( rReturn, '' ); - } else { - // Traverse it's children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - } else { - - // If no nodeType, this is expected to be an array - for ( i = 0; (node = elem[i]); i++ ) { - // Do not traverse comment nodes - if ( node.nodeType !== 8 ) { - ret += getText( node ); - } - } - } - return ret; -}; - -var Expr = Sizzle.selectors = { - order: [ "ID", "NAME", "TAG" ], - - match: { - ID: /#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/, - CLASS: /\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/, - NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/, - ATTR: /\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/, - TAG: /^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/, - CHILD: /:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/, - POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/, - PSEUDO: /:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/ - }, - - leftMatch: {}, - - attrMap: { - "class": "className", - "for": "htmlFor" - }, - - attrHandle: { - href: function( elem ) { - return elem.getAttribute( "href" ); - }, - type: function( elem ) { - return elem.getAttribute( "type" ); - } - }, - - relative: { - "+": function(checkSet, part){ - var isPartStr = typeof part === "string", - isTag = isPartStr && !rNonWord.test( part ), - isPartStrNotTag = isPartStr && !isTag; - - if ( isTag ) { - part = part.toLowerCase(); - } - - for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) { - if ( (elem = checkSet[i]) ) { - while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {} - - checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ? - elem || false : - elem === part; - } - } - - if ( isPartStrNotTag ) { - Sizzle.filter( part, checkSet, true ); - } - }, - - ">": function( checkSet, part ) { - var elem, - isPartStr = typeof part === "string", - i = 0, - l = checkSet.length; - - if ( isPartStr && !rNonWord.test( part ) ) { - part = part.toLowerCase(); - - for ( ; i < l; i++ ) { - elem = checkSet[i]; - - if ( elem ) { - var parent = elem.parentNode; - checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false; - } - } - - } else { - for ( ; i < l; i++ ) { - elem = checkSet[i]; - - if ( elem ) { - checkSet[i] = isPartStr ? - elem.parentNode : - elem.parentNode === part; - } - } - - if ( isPartStr ) { - Sizzle.filter( part, checkSet, true ); - } - } - }, - - "": function(checkSet, part, isXML){ - var nodeCheck, - doneName = done++, - checkFn = dirCheck; - - if ( typeof part === "string" && !rNonWord.test( part ) ) { - part = part.toLowerCase(); - nodeCheck = part; - checkFn = dirNodeCheck; - } - - checkFn( "parentNode", part, doneName, checkSet, nodeCheck, isXML ); - }, - - "~": function( checkSet, part, isXML ) { - var nodeCheck, - doneName = done++, - checkFn = dirCheck; - - if ( typeof part === "string" && !rNonWord.test( part ) ) { - part = part.toLowerCase(); - nodeCheck = part; - checkFn = dirNodeCheck; - } - - checkFn( "previousSibling", part, doneName, checkSet, nodeCheck, isXML ); - } - }, - - find: { - ID: function( match, context, isXML ) { - if ( typeof context.getElementById !== "undefined" && !isXML ) { - var m = context.getElementById(match[1]); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - return m && m.parentNode ? [m] : []; - } - }, - - NAME: function( match, context ) { - if ( typeof context.getElementsByName !== "undefined" ) { - var ret = [], - results = context.getElementsByName( match[1] ); - - for ( var i = 0, l = results.length; i < l; i++ ) { - if ( results[i].getAttribute("name") === match[1] ) { - ret.push( results[i] ); - } - } - - return ret.length === 0 ? null : ret; - } - }, - - TAG: function( match, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( match[1] ); - } - } - }, - preFilter: { - CLASS: function( match, curLoop, inplace, result, not, isXML ) { - match = " " + match[1].replace( rBackslash, "" ) + " "; - - if ( isXML ) { - return match; - } - - for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) { - if ( elem ) { - if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n\r]/g, " ").indexOf(match) >= 0) ) { - if ( !inplace ) { - result.push( elem ); - } - - } else if ( inplace ) { - curLoop[i] = false; - } - } - } - - return false; - }, - - ID: function( match ) { - return match[1].replace( rBackslash, "" ); - }, - - TAG: function( match, curLoop ) { - return match[1].replace( rBackslash, "" ).toLowerCase(); - }, - - CHILD: function( match ) { - if ( match[1] === "nth" ) { - if ( !match[2] ) { - Sizzle.error( match[0] ); - } - - match[2] = match[2].replace(/^\+|\s*/g, ''); - - // parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6' - var test = /(-?)(\d*)(?:n([+\-]?\d*))?/.exec( - match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" || - !/\D/.test( match[2] ) && "0n+" + match[2] || match[2]); - - // calculate the numbers (first)n+(last) including if they are negative - match[2] = (test[1] + (test[2] || 1)) - 0; - match[3] = test[3] - 0; - } - else if ( match[2] ) { - Sizzle.error( match[0] ); - } - - // TODO: Move to normal caching system - match[0] = done++; - - return match; - }, - - ATTR: function( match, curLoop, inplace, result, not, isXML ) { - var name = match[1] = match[1].replace( rBackslash, "" ); - - if ( !isXML && Expr.attrMap[name] ) { - match[1] = Expr.attrMap[name]; - } - - // Handle if an un-quoted value was used - match[4] = ( match[4] || match[5] || "" ).replace( rBackslash, "" ); - - if ( match[2] === "~=" ) { - match[4] = " " + match[4] + " "; - } - - return match; - }, - - PSEUDO: function( match, curLoop, inplace, result, not ) { - if ( match[1] === "not" ) { - // If we're dealing with a complex expression, or a simple one - if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) { - match[3] = Sizzle(match[3], null, null, curLoop); - - } else { - var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not); - - if ( !inplace ) { - result.push.apply( result, ret ); - } - - return false; - } - - } else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) { - return true; - } - - return match; - }, - - POS: function( match ) { - match.unshift( true ); - - return match; - } - }, - - filters: { - enabled: function( elem ) { - return elem.disabled === false && elem.type !== "hidden"; - }, - - disabled: function( elem ) { - return elem.disabled === true; - }, - - checked: function( elem ) { - return elem.checked === true; - }, - - selected: function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - parent: function( elem ) { - return !!elem.firstChild; - }, - - empty: function( elem ) { - return !elem.firstChild; - }, - - has: function( elem, i, match ) { - return !!Sizzle( match[3], elem ).length; - }, - - header: function( elem ) { - return (/h\d/i).test( elem.nodeName ); - }, - - text: function( elem ) { - var attr = elem.getAttribute( "type" ), type = elem.type; - // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) - // use getAttribute instead to test this case - return elem.nodeName.toLowerCase() === "input" && "text" === type && ( attr === type || attr === null ); - }, - - radio: function( elem ) { - return elem.nodeName.toLowerCase() === "input" && "radio" === elem.type; - }, - - checkbox: function( elem ) { - return elem.nodeName.toLowerCase() === "input" && "checkbox" === elem.type; - }, - - file: function( elem ) { - return elem.nodeName.toLowerCase() === "input" && "file" === elem.type; - }, - - password: function( elem ) { - return elem.nodeName.toLowerCase() === "input" && "password" === elem.type; - }, - - submit: function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && "submit" === elem.type; - }, - - image: function( elem ) { - return elem.nodeName.toLowerCase() === "input" && "image" === elem.type; - }, - - reset: function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && "reset" === elem.type; - }, - - button: function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && "button" === elem.type || name === "button"; - }, - - input: function( elem ) { - return (/input|select|textarea|button/i).test( elem.nodeName ); - }, - - focus: function( elem ) { - return elem === elem.ownerDocument.activeElement; - } - }, - setFilters: { - first: function( elem, i ) { - return i === 0; - }, - - last: function( elem, i, match, array ) { - return i === array.length - 1; - }, - - even: function( elem, i ) { - return i % 2 === 0; - }, - - odd: function( elem, i ) { - return i % 2 === 1; - }, - - lt: function( elem, i, match ) { - return i < match[3] - 0; - }, - - gt: function( elem, i, match ) { - return i > match[3] - 0; - }, - - nth: function( elem, i, match ) { - return match[3] - 0 === i; - }, - - eq: function( elem, i, match ) { - return match[3] - 0 === i; - } - }, - filter: { - PSEUDO: function( elem, match, i, array ) { - var name = match[1], - filter = Expr.filters[ name ]; - - if ( filter ) { - return filter( elem, i, match, array ); - - } else if ( name === "contains" ) { - return (elem.textContent || elem.innerText || getText([ elem ]) || "").indexOf(match[3]) >= 0; - - } else if ( name === "not" ) { - var not = match[3]; - - for ( var j = 0, l = not.length; j < l; j++ ) { - if ( not[j] === elem ) { - return false; - } - } - - return true; - - } else { - Sizzle.error( name ); - } - }, - - CHILD: function( elem, match ) { - var first, last, - doneName, parent, cache, - count, diff, - type = match[1], - node = elem; - - switch ( type ) { - case "only": - case "first": - while ( (node = node.previousSibling) ) { - if ( node.nodeType === 1 ) { - return false; - } - } - - if ( type === "first" ) { - return true; - } - - node = elem; - - /* falls through */ - case "last": - while ( (node = node.nextSibling) ) { - if ( node.nodeType === 1 ) { - return false; - } - } - - return true; - - case "nth": - first = match[2]; - last = match[3]; - - if ( first === 1 && last === 0 ) { - return true; - } - - doneName = match[0]; - parent = elem.parentNode; - - if ( parent && (parent[ expando ] !== doneName || !elem.nodeIndex) ) { - count = 0; - - for ( node = parent.firstChild; node; node = node.nextSibling ) { - if ( node.nodeType === 1 ) { - node.nodeIndex = ++count; - } - } - - parent[ expando ] = doneName; - } - - diff = elem.nodeIndex - last; - - if ( first === 0 ) { - return diff === 0; - - } else { - return ( diff % first === 0 && diff / first >= 0 ); - } - } - }, - - ID: function( elem, match ) { - return elem.nodeType === 1 && elem.getAttribute("id") === match; - }, - - TAG: function( elem, match ) { - return (match === "*" && elem.nodeType === 1) || !!elem.nodeName && elem.nodeName.toLowerCase() === match; - }, - - CLASS: function( elem, match ) { - return (" " + (elem.className || elem.getAttribute("class")) + " ") - .indexOf( match ) > -1; - }, - - ATTR: function( elem, match ) { - var name = match[1], - result = Sizzle.attr ? - Sizzle.attr( elem, name ) : - Expr.attrHandle[ name ] ? - Expr.attrHandle[ name ]( elem ) : - elem[ name ] != null ? - elem[ name ] : - elem.getAttribute( name ), - value = result + "", - type = match[2], - check = match[4]; - - return result == null ? - type === "!=" : - !type && Sizzle.attr ? - result != null : - type === "=" ? - value === check : - type === "*=" ? - value.indexOf(check) >= 0 : - type === "~=" ? - (" " + value + " ").indexOf(check) >= 0 : - !check ? - value && result !== false : - type === "!=" ? - value !== check : - type === "^=" ? - value.indexOf(check) === 0 : - type === "$=" ? - value.substr(value.length - check.length) === check : - type === "|=" ? - value === check || value.substr(0, check.length + 1) === check + "-" : - false; - }, - - POS: function( elem, match, i, array ) { - var name = match[2], - filter = Expr.setFilters[ name ]; - - if ( filter ) { - return filter( elem, i, match, array ); - } - } - } -}; - -var origPOS = Expr.match.POS, - fescape = function(all, num){ - return "\\" + (num - 0 + 1); - }; - -for ( var type in Expr.match ) { - Expr.match[ type ] = new RegExp( Expr.match[ type ].source + (/(?![^\[]*\])(?![^\(]*\))/.source) ); - Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, fescape) ); -} -// Expose origPOS -// "global" as in regardless of relation to brackets/parens -Expr.match.globalPOS = origPOS; - -var makeArray = function( array, results ) { - array = Array.prototype.slice.call( array, 0 ); - - if ( results ) { - results.push.apply( results, array ); - return results; - } - - return array; -}; - -// Perform a simple check to determine if the browser is capable of -// converting a NodeList to an array using builtin methods. -// Also verifies that the returned array holds DOM nodes -// (which is not the case in the Blackberry browser) -try { - Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType; - -// Provide a fallback method if it does not work -} catch( e ) { - makeArray = function( array, results ) { - var i = 0, - ret = results || []; - - if ( toString.call(array) === "[object Array]" ) { - Array.prototype.push.apply( ret, array ); - - } else { - if ( typeof array.length === "number" ) { - for ( var l = array.length; i < l; i++ ) { - ret.push( array[i] ); - } - - } else { - for ( ; array[i]; i++ ) { - ret.push( array[i] ); - } - } - } - - return ret; - }; -} - -var sortOrder, siblingCheck; - -if ( document.documentElement.compareDocumentPosition ) { - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) { - return a.compareDocumentPosition ? -1 : 1; - } - - return a.compareDocumentPosition(b) & 4 ? -1 : 1; - }; - -} else { - sortOrder = function( a, b ) { - // The nodes are identical, we can exit early - if ( a === b ) { - hasDuplicate = true; - return 0; - - // Fallback to using sourceIndex (in IE) if it's available on both nodes - } else if ( a.sourceIndex && b.sourceIndex ) { - return a.sourceIndex - b.sourceIndex; - } - - var al, bl, - ap = [], - bp = [], - aup = a.parentNode, - bup = b.parentNode, - cur = aup; - - // If the nodes are siblings (or identical) we can do a quick check - if ( aup === bup ) { - return siblingCheck( a, b ); - - // If no parents were found then the nodes are disconnected - } else if ( !aup ) { - return -1; - - } else if ( !bup ) { - return 1; - } - - // Otherwise they're somewhere else in the tree so we need - // to build up a full list of the parentNodes for comparison - while ( cur ) { - ap.unshift( cur ); - cur = cur.parentNode; - } - - cur = bup; - - while ( cur ) { - bp.unshift( cur ); - cur = cur.parentNode; - } - - al = ap.length; - bl = bp.length; - - // Start walking down the tree looking for a discrepancy - for ( var i = 0; i < al && i < bl; i++ ) { - if ( ap[i] !== bp[i] ) { - return siblingCheck( ap[i], bp[i] ); - } - } - - // We ended someplace up the tree so do a sibling check - return i === al ? - siblingCheck( a, bp[i], -1 ) : - siblingCheck( ap[i], b, 1 ); - }; - - siblingCheck = function( a, b, ret ) { - if ( a === b ) { - return ret; - } - - var cur = a.nextSibling; - - while ( cur ) { - if ( cur === b ) { - return -1; - } - - cur = cur.nextSibling; - } - - return 1; - }; -} - -// Check to see if the browser returns elements by name when -// querying by getElementById (and provide a workaround) -(function(){ - // We're going to inject a fake input element with a specified name - var form = document.createElement("div"), - id = "script" + (new Date()).getTime(), - root = document.documentElement; - - form.innerHTML = ""; - - // Inject it into the root element, check its status, and remove it quickly - root.insertBefore( form, root.firstChild ); - - // The workaround has to do additional checks after a getElementById - // Which slows things down for other browsers (hence the branching) - if ( document.getElementById( id ) ) { - Expr.find.ID = function( match, context, isXML ) { - if ( typeof context.getElementById !== "undefined" && !isXML ) { - var m = context.getElementById(match[1]); - - return m ? - m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ? - [m] : - undefined : - []; - } - }; - - Expr.filter.ID = function( elem, match ) { - var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id"); - - return elem.nodeType === 1 && node && node.nodeValue === match; - }; - } - - root.removeChild( form ); - - // release memory in IE - root = form = null; -})(); - -(function(){ - // Check to see if the browser returns only elements - // when doing getElementsByTagName("*") - - // Create a fake element - var div = document.createElement("div"); - div.appendChild( document.createComment("") ); - - // Make sure no comments are found - if ( div.getElementsByTagName("*").length > 0 ) { - Expr.find.TAG = function( match, context ) { - var results = context.getElementsByTagName( match[1] ); - - // Filter out possible comments - if ( match[1] === "*" ) { - var tmp = []; - - for ( var i = 0; results[i]; i++ ) { - if ( results[i].nodeType === 1 ) { - tmp.push( results[i] ); - } - } - - results = tmp; - } - - return results; - }; - } - - // Check to see if an attribute returns normalized href attributes - div.innerHTML = ""; - - if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" && - div.firstChild.getAttribute("href") !== "#" ) { - - Expr.attrHandle.href = function( elem ) { - return elem.getAttribute( "href", 2 ); - }; - } - - // release memory in IE - div = null; -})(); - -if ( document.querySelectorAll ) { - (function(){ - var oldSizzle = Sizzle, - div = document.createElement("div"), - id = "__sizzle__"; - - div.innerHTML = "

"; - - // Safari can't handle uppercase or unicode characters when - // in quirks mode. - if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) { - return; - } - - Sizzle = function( query, context, extra, seed ) { - context = context || document; - - // Only use querySelectorAll on non-XML documents - // (ID selectors don't work in non-HTML documents) - if ( !seed && !Sizzle.isXML(context) ) { - // See if we find a selector to speed up - var match = /^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec( query ); - - if ( match && (context.nodeType === 1 || context.nodeType === 9) ) { - // Speed-up: Sizzle("TAG") - if ( match[1] ) { - return makeArray( context.getElementsByTagName( query ), extra ); - - // Speed-up: Sizzle(".CLASS") - } else if ( match[2] && Expr.find.CLASS && context.getElementsByClassName ) { - return makeArray( context.getElementsByClassName( match[2] ), extra ); - } - } - - if ( context.nodeType === 9 ) { - // Speed-up: Sizzle("body") - // The body element only exists once, optimize finding it - if ( query === "body" && context.body ) { - return makeArray( [ context.body ], extra ); - - // Speed-up: Sizzle("#ID") - } else if ( match && match[3] ) { - var elem = context.getElementById( match[3] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id === match[3] ) { - return makeArray( [ elem ], extra ); - } - - } else { - return makeArray( [], extra ); - } - } - - try { - return makeArray( context.querySelectorAll(query), extra ); - } catch(qsaError) {} - - // qSA works strangely on Element-rooted queries - // We can work around this by specifying an extra ID on the root - // and working up from there (Thanks to Andrew Dupont for the technique) - // IE 8 doesn't work on object elements - } else if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { - var oldContext = context, - old = context.getAttribute( "id" ), - nid = old || id, - hasParent = context.parentNode, - relativeHierarchySelector = /^\s*[+~]/.test( query ); - - if ( !old ) { - context.setAttribute( "id", nid ); - } else { - nid = nid.replace( /'/g, "\\$&" ); - } - if ( relativeHierarchySelector && hasParent ) { - context = context.parentNode; - } - - try { - if ( !relativeHierarchySelector || hasParent ) { - return makeArray( context.querySelectorAll( "[id='" + nid + "'] " + query ), extra ); - } - - } catch(pseudoError) { - } finally { - if ( !old ) { - oldContext.removeAttribute( "id" ); - } - } - } - } - - return oldSizzle(query, context, extra, seed); - }; - - for ( var prop in oldSizzle ) { - Sizzle[ prop ] = oldSizzle[ prop ]; - } - - // release memory in IE - div = null; - })(); -} - -(function(){ - var html = document.documentElement, - matches = html.matchesSelector || html.mozMatchesSelector || html.webkitMatchesSelector || html.msMatchesSelector; - - if ( matches ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9 fails this) - var disconnectedMatch = !matches.call( document.createElement( "div" ), "div" ), - pseudoWorks = false; - - try { - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( document.documentElement, "[test!='']:sizzle" ); - - } catch( pseudoError ) { - pseudoWorks = true; - } - - Sizzle.matchesSelector = function( node, expr ) { - // Make sure that attribute selectors are quoted - expr = expr.replace(/\=\s*([^'"\]]*)\s*\]/g, "='$1']"); - - if ( !Sizzle.isXML( node ) ) { - try { - if ( pseudoWorks || !Expr.match.PSEUDO.test( expr ) && !/!=/.test( expr ) ) { - var ret = matches.call( node, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || !disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9, so check for that - node.document && node.document.nodeType !== 11 ) { - return ret; - } - } - } catch(e) {} - } - - return Sizzle(expr, null, null, [node]).length > 0; - }; - } -})(); - -(function(){ - var div = document.createElement("div"); - - div.innerHTML = "
"; - - // Opera can't find a second classname (in 9.6) - // Also, make sure that getElementsByClassName actually exists - if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) { - return; - } - - // Safari caches class attributes, doesn't catch changes (in 3.2) - div.lastChild.className = "e"; - - if ( div.getElementsByClassName("e").length === 1 ) { - return; - } - - Expr.order.splice(1, 0, "CLASS"); - Expr.find.CLASS = function( match, context, isXML ) { - if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) { - return context.getElementsByClassName(match[1]); - } - }; - - // release memory in IE - div = null; -})(); - -function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) { - for ( var i = 0, l = checkSet.length; i < l; i++ ) { - var elem = checkSet[i]; - - if ( elem ) { - var match = false; - - elem = elem[dir]; - - while ( elem ) { - if ( elem[ expando ] === doneName ) { - match = checkSet[elem.sizset]; - break; - } - - if ( elem.nodeType === 1 && !isXML ){ - elem[ expando ] = doneName; - elem.sizset = i; - } - - if ( elem.nodeName.toLowerCase() === cur ) { - match = elem; - break; - } - - elem = elem[dir]; - } - - checkSet[i] = match; - } - } -} - -function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) { - for ( var i = 0, l = checkSet.length; i < l; i++ ) { - var elem = checkSet[i]; - - if ( elem ) { - var match = false; - - elem = elem[dir]; - - while ( elem ) { - if ( elem[ expando ] === doneName ) { - match = checkSet[elem.sizset]; - break; - } - - if ( elem.nodeType === 1 ) { - if ( !isXML ) { - elem[ expando ] = doneName; - elem.sizset = i; - } - - if ( typeof cur !== "string" ) { - if ( elem === cur ) { - match = true; - break; - } - - } else if ( Sizzle.filter( cur, [elem] ).length > 0 ) { - match = elem; - break; - } - } - - elem = elem[dir]; - } - - checkSet[i] = match; - } - } -} - -if ( document.documentElement.contains ) { - Sizzle.contains = function( a, b ) { - return a !== b && (a.contains ? a.contains(b) : true); - }; - -} else if ( document.documentElement.compareDocumentPosition ) { - Sizzle.contains = function( a, b ) { - return !!(a.compareDocumentPosition(b) & 16); - }; - -} else { - Sizzle.contains = function() { - return false; - }; -} - -Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement; - - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -var posProcess = function( selector, context, seed ) { - var match, - tmpSet = [], - later = "", - root = context.nodeType ? [context] : context; - - // Position selectors must be done after the filter - // And so must :not(positional) so we move all PSEUDOs to the end - while ( (match = Expr.match.PSEUDO.exec( selector )) ) { - later += match[0]; - selector = selector.replace( Expr.match.PSEUDO, "" ); - } - - selector = Expr.relative[selector] ? selector + "*" : selector; - - for ( var i = 0, l = root.length; i < l; i++ ) { - Sizzle( selector, root[i], tmpSet, seed ); - } - - return Sizzle.filter( later, tmpSet ); -}; - -// EXPOSE -// Override sizzle attribute retrieval -Sizzle.attr = jQuery.attr; -Sizzle.selectors.attrMap = {}; -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; -jQuery.expr[":"] = jQuery.expr.filters; -jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; - - -})(); - - -var runtil = /Until$/, - rparentsprev = /^(?:parents|prevUntil|prevAll)/, - // Note: This RegExp should be improved, or likely pulled from Sizzle - rmultiselector = /,/, - isSimple = /^.[^:#\[\.,]*$/, - slice = Array.prototype.slice, - POS = jQuery.expr.match.globalPOS, - // methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend({ - find: function( selector ) { - var self = this, - i, l; - - if ( typeof selector !== "string" ) { - return jQuery( selector ).filter(function() { - for ( i = 0, l = self.length; i < l; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - }); - } - - var ret = this.pushStack( "", "find", selector ), - length, n, r; - - for ( i = 0, l = this.length; i < l; i++ ) { - length = ret.length; - jQuery.find( selector, this[i], ret ); - - if ( i > 0 ) { - // Make sure that the results are unique - for ( n = length; n < ret.length; n++ ) { - for ( r = 0; r < length; r++ ) { - if ( ret[r] === ret[n] ) { - ret.splice(n--, 1); - break; - } - } - } - } - } - - return ret; - }, - - has: function( target ) { - var targets = jQuery( target ); - return this.filter(function() { - for ( var i = 0, l = targets.length; i < l; i++ ) { - if ( jQuery.contains( this, targets[i] ) ) { - return true; - } - } - }); - }, - - not: function( selector ) { - return this.pushStack( winnow(this, selector, false), "not", selector); - }, - - filter: function( selector ) { - return this.pushStack( winnow(this, selector, true), "filter", selector ); - }, - - is: function( selector ) { - return !!selector && ( - typeof selector === "string" ? - // If this is a positional selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - POS.test( selector ) ? - jQuery( selector, this.context ).index( this[0] ) >= 0 : - jQuery.filter( selector, this ).length > 0 : - this.filter( selector ).length > 0 ); - }, - - closest: function( selectors, context ) { - var ret = [], i, l, cur = this[0]; - - // Array (deprecated as of jQuery 1.7) - if ( jQuery.isArray( selectors ) ) { - var level = 1; - - while ( cur && cur.ownerDocument && cur !== context ) { - for ( i = 0; i < selectors.length; i++ ) { - - if ( jQuery( cur ).is( selectors[ i ] ) ) { - ret.push({ selector: selectors[ i ], elem: cur, level: level }); - } - } - - cur = cur.parentNode; - level++; - } - - return ret; - } - - // String - var pos = POS.test( selectors ) || typeof selectors !== "string" ? - jQuery( selectors, context || this.context ) : - 0; - - for ( i = 0, l = this.length; i < l; i++ ) { - cur = this[i]; - - while ( cur ) { - if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) { - ret.push( cur ); - break; - - } else { - cur = cur.parentNode; - if ( !cur || !cur.ownerDocument || cur === context || cur.nodeType === 11 ) { - break; - } - } - } - } - - ret = ret.length > 1 ? jQuery.unique( ret ) : ret; - - return this.pushStack( ret, "closest", selectors ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1; - } - - // index in selector - if ( typeof elem === "string" ) { - return jQuery.inArray( this[0], jQuery( elem ) ); - } - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[0] : elem, this ); - }, - - add: function( selector, context ) { - var set = typeof selector === "string" ? - jQuery( selector, context ) : - jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), - all = jQuery.merge( this.get(), set ); - - return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ? - all : - jQuery.unique( all ) ); - }, - - andSelf: function() { - return this.add( this.prevObject ); - } -}); - -// A painfully simple check to see if an element is disconnected -// from a document (should be improved, where feasible). -function isDisconnected( node ) { - return !node || !node.parentNode || node.parentNode.nodeType === 11; -} - -jQuery.each({ - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return jQuery.dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return jQuery.dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return jQuery.nth( elem, 2, "nextSibling" ); - }, - prev: function( elem ) { - return jQuery.nth( elem, 2, "previousSibling" ); - }, - nextAll: function( elem ) { - return jQuery.dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return jQuery.dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return jQuery.dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return jQuery.dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return jQuery.sibling( elem.firstChild ); - }, - contents: function( elem ) { - return jQuery.nodeName( elem, "iframe" ) ? - elem.contentDocument || elem.contentWindow.document : - jQuery.makeArray( elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var ret = jQuery.map( this, fn, until ); - - if ( !runtil.test( name ) ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - ret = jQuery.filter( selector, ret ); - } - - ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret; - - if ( (this.length > 1 || rmultiselector.test( selector )) && rparentsprev.test( name ) ) { - ret = ret.reverse(); - } - - return this.pushStack( ret, name, slice.call( arguments ).join(",") ); - }; -}); - -jQuery.extend({ - filter: function( expr, elems, not ) { - if ( not ) { - expr = ":not(" + expr + ")"; - } - - return elems.length === 1 ? - jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] : - jQuery.find.matches(expr, elems); - }, - - dir: function( elem, dir, until ) { - var matched = [], - cur = elem[ dir ]; - - while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { - if ( cur.nodeType === 1 ) { - matched.push( cur ); - } - cur = cur[dir]; - } - return matched; - }, - - nth: function( cur, result, dir, elem ) { - result = result || 1; - var num = 0; - - for ( ; cur; cur = cur[dir] ) { - if ( cur.nodeType === 1 && ++num === result ) { - break; - } - } - - return cur; - }, - - sibling: function( n, elem ) { - var r = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - r.push( n ); - } - } - - return r; - } -}); - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, keep ) { - - // Can't pass null or undefined to indexOf in Firefox 4 - // Set to 0 to skip string check - qualifier = qualifier || 0; - - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep(elements, function( elem, i ) { - var retVal = !!qualifier.call( elem, i, elem ); - return retVal === keep; - }); - - } else if ( qualifier.nodeType ) { - return jQuery.grep(elements, function( elem, i ) { - return ( elem === qualifier ) === keep; - }); - - } else if ( typeof qualifier === "string" ) { - var filtered = jQuery.grep(elements, function( elem ) { - return elem.nodeType === 1; - }); - - if ( isSimple.test( qualifier ) ) { - return jQuery.filter(qualifier, filtered, !keep); - } else { - qualifier = jQuery.filter( qualifier, filtered ); - } - } - - return jQuery.grep(elements, function( elem, i ) { - return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep; - }); -} - - - - -function createSafeFragment( document ) { - var list = nodeNames.split( "|" ), - safeFrag = document.createDocumentFragment(); - - if ( safeFrag.createElement ) { - while ( list.length ) { - safeFrag.createElement( - list.pop() - ); - } - } - return safeFrag; -} - -var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + - "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", - rinlinejQuery = / jQuery\d+="(?:\d+|null)"/g, - rleadingWhitespace = /^\s+/, - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig, - rtagName = /<([\w:]+)/, - rtbody = /]", "i"), - // checked="checked" or checked - rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, - rscriptType = /\/(java|ecma)script/i, - rcleanScript = /^\s*", "" ], - legend: [ 1, "
", "
" ], - thead: [ 1, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - col: [ 2, "", "
" ], - area: [ 1, "", "" ], - _default: [ 0, "", "" ] - }, - safeFragment = createSafeFragment( document ); - -wrapMap.optgroup = wrapMap.option; -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// IE can't serialize and