diff --git a/cmake/os/Darwin.cmake b/cmake/os/Darwin.cmake deleted file mode 100644 index 21e18360dfe98..0000000000000 --- a/cmake/os/Darwin.cmake +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA - -# This file includes OSX specific options and quirks, related to system checks diff --git a/extra/mariabackup/ds_compress.cc b/extra/mariabackup/ds_compress.cc index 64f342df46252..f7a9b7a1fbd65 100644 --- a/extra/mariabackup/ds_compress.cc +++ b/extra/mariabackup/ds_compress.cc @@ -34,9 +34,10 @@ typedef struct { pthread_t id; uint num; pthread_mutex_t data_mutex; + pthread_cond_t avail_cond; pthread_cond_t data_cond; pthread_cond_t done_cond; - my_bool data_avail; + pthread_t data_avail; my_bool cancelled; const char *from; size_t from_len; @@ -195,9 +196,13 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len) threads = comp_ctxt->threads; nthreads = comp_ctxt->nthreads; + const pthread_t self = pthread_self(); + ptr = (const char *) buf; while (len > 0) { - uint max_thread; + bool wait = nthreads == 1; +retry: + bool submitted = false; /* Send data to worker threads for compression */ for (i = 0; i < nthreads; i++) { @@ -206,16 +211,33 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len) thd = threads + i; pthread_mutex_lock(&thd->data_mutex); + if (thd->data_avail == pthread_t(~0UL)) { + } else if (!wait) { +skip: + pthread_mutex_unlock(&thd->data_mutex); + continue; + } else { + for (;;) { + pthread_cond_wait(&thd->avail_cond, + &thd->data_mutex); + if (thd->data_avail + == pthread_t(~0UL)) { + break; + } + goto skip; + } + } chunk_len = (len > COMPRESS_CHUNK_SIZE) ? COMPRESS_CHUNK_SIZE : len; thd->from = ptr; thd->from_len = chunk_len; - thd->data_avail = TRUE; + thd->data_avail = self; pthread_cond_signal(&thd->data_cond); pthread_mutex_unlock(&thd->data_mutex); + submitted = true; len -= chunk_len; if (len == 0) { break; @@ -223,13 +245,20 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len) ptr += chunk_len; } - max_thread = (i < nthreads) ? i : nthreads - 1; + if (!submitted) { + wait = true; + goto retry; + } - /* Reap and stream the compressed data */ - for (i = 0; i <= max_thread; i++) { + for (i = 0; i < nthreads; i++) { thd = threads + i; pthread_mutex_lock(&thd->data_mutex); + if (thd->data_avail != self) { + pthread_mutex_unlock(&thd->data_mutex); + continue; + } + while (!thd->to_len) { pthread_cond_wait(&thd->done_cond, &thd->data_mutex); @@ -247,6 +276,8 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len) } thd->to_len = 0; + thd->data_avail = pthread_t(~0UL); + pthread_cond_signal(&thd->avail_cond); pthread_mutex_unlock(&thd->data_mutex); if (fail) { @@ -334,6 +365,7 @@ destroy_worker_thread(comp_thread_ctxt_t *thd) pthread_join(thd->id, NULL); + pthread_cond_destroy(&thd->avail_cond); pthread_cond_destroy(&thd->data_cond); pthread_cond_destroy(&thd->done_cond); pthread_mutex_destroy(&thd->data_mutex); @@ -364,11 +396,14 @@ create_worker_threads(uint n) /* Initialize and data mutex and condition var */ if (pthread_mutex_init(&thd->data_mutex, NULL) || + pthread_cond_init(&thd->avail_cond, NULL) || pthread_cond_init(&thd->data_cond, NULL) || pthread_cond_init(&thd->done_cond, NULL)) { goto err; } + thd->data_avail = pthread_t(~0UL); + if (pthread_create(&thd->id, NULL, compress_worker_thread_func, thd)) { msg("compress: pthread_create() failed: " @@ -410,13 +445,13 @@ compress_worker_thread_func(void *arg) pthread_mutex_lock(&thd->data_mutex); while (1) { - while (!thd->data_avail && !thd->cancelled) { + while (!thd->cancelled + && (thd->to_len || thd->data_avail == pthread_t(~0UL))) { pthread_cond_wait(&thd->data_cond, &thd->data_mutex); } if (thd->cancelled) break; - thd->data_avail = FALSE; thd->to_len = qlz_compress(thd->from, thd->to, thd->from_len, &thd->state); diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 50ef7892e394a..7b92544ee13cc 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1016,6 +1016,33 @@ j {"ID": "4", "Name": "Betty", "Age": 19} drop table t1; # +# MDEV-27151: JSON_VALUE() does not parse NULL properties properly +# +# +# It is correct for JSON_EXTRACT() to give null instead of "NULL" because +# it returns the json literal that is put inside json. +# Hence it should return null as in 'null' string and not SQL NULL. +# JSON_VALUE() returns the "VALUE" so it is correct for it to return SQl NULL +# +SELECT NULL; +NULL +NULL +SELECT JSON_VALUE('{"nulltest": null}', '$.nulltest'); +JSON_VALUE('{"nulltest": null}', '$.nulltest') +NULL +SELECT 1 + NULL; +1 + NULL +NULL +SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest'); +1 + JSON_VALUE('{"nulltest": null}', '$.nulltest') +NULL +SELECT NULL; +NULL +NULL +SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a'); +JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a') +null +# # End of 10.3 tests # # diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 1021ce071397a..83f4bf9d1adbe 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -627,6 +627,25 @@ SELECT * FROM t1 WHERE JSON_EXTRACT(j, '$.Age')=19; drop table t1; +--echo # +--echo # MDEV-27151: JSON_VALUE() does not parse NULL properties properly +--echo # +--echo # +--echo # It is correct for JSON_EXTRACT() to give null instead of "NULL" because +--echo # it returns the json literal that is put inside json. +--echo # Hence it should return null as in 'null' string and not SQL NULL. +--echo # JSON_VALUE() returns the "VALUE" so it is correct for it to return SQl NULL +--echo # + +SELECT NULL; +SELECT JSON_VALUE('{"nulltest": null}', '$.nulltest'); +SELECT 1 + NULL; +SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest'); + + +SELECT NULL; +SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a'); + --echo # --echo # End of 10.3 tests --echo # diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff index 6cf0fdf415945..44446602b9f05 100644 --- a/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff +++ b/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff @@ -1,18 +1,18 @@ ---- check_ibd_filesize.result -+++ check_ibd_filesize.result,32k +--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530 ++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:29:25.129637040 +0530 @@ -3,18 +3,12 @@ # SPACE IN 5.7 THAN IN 5.6 # CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 196608 +-# bytes: 65536 ++# bytes: 131072 INSERT INTO t1 SELECT * FROM seq_1_to_25000; -# bytes: 9437184 +# bytes: 786432 DROP TABLE t1; CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 196608 +-# bytes: 65536 ++# bytes: 131072 INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20; -# bytes: 4194304 -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff index 52cd683275543..ef55ad971fe79 100644 --- a/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff +++ b/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff @@ -1,17 +1,17 @@ ---- check_ibd_filesize.result -+++ check_ibd_filesize.result,4k +--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530 ++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:39.288769153 +0530 @@ -3,18 +3,18 @@ # SPACE IN 5.7 THAN IN 5.6 # CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 24576 +-# bytes: 65536 ++# bytes: 16384 INSERT INTO t1 SELECT * FROM seq_1_to_25000; # bytes: 9437184 DROP TABLE t1; CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 24576 +-# bytes: 65536 ++# bytes: 16384 INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20; # bytes: 4194304 DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff index 23d9fbe608f03..bcdcea3116060 100644 --- a/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff +++ b/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff @@ -1,18 +1,18 @@ ---- check_ibd_filesize.result -+++ check_ibd_filesize.result,64k +--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530 ++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:30:28.957174270 +0530 @@ -3,18 +3,12 @@ # SPACE IN 5.7 THAN IN 5.6 # CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 393216 +-# bytes: 65536 ++# bytes: 262144 INSERT INTO t1 SELECT * FROM seq_1_to_25000; -# bytes: 9437184 +# bytes: 983040 DROP TABLE t1; CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 393216 +-# bytes: 65536 ++# bytes: 262144 INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20; -# bytes: 4194304 -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff index 17936a3def94f..7b699ef4cea45 100644 --- a/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff +++ b/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff @@ -1,17 +1,17 @@ ---- check_ibd_filesize.result -+++ check_ibd_filesize.result,8k +--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530 ++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:03.516962339 +0530 @@ -3,18 +3,18 @@ # SPACE IN 5.7 THAN IN 5.6 # CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 49152 +-# bytes: 65536 ++# bytes: 32768 INSERT INTO t1 SELECT * FROM seq_1_to_25000; # bytes: 9437184 DROP TABLE t1; CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB; --# bytes: 98304 -+# bytes: 49152 +-# bytes: 65536 ++# bytes: 32768 INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20; # bytes: 4194304 DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize.result b/mysql-test/suite/innodb/r/check_ibd_filesize.result index a6f5fbd938797..0d224d6ac5fd1 100644 --- a/mysql-test/suite/innodb/r/check_ibd_filesize.result +++ b/mysql-test/suite/innodb/r/check_ibd_filesize.result @@ -3,12 +3,12 @@ # SPACE IN 5.7 THAN IN 5.6 # CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; -# bytes: 98304 +# bytes: 65536 INSERT INTO t1 SELECT * FROM seq_1_to_25000; # bytes: 9437184 DROP TABLE t1; CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB; -# bytes: 98304 +# bytes: 65536 INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20; # bytes: 4194304 DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index 594ab95ef582c..e59f51623b736 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -5,6 +5,7 @@ # --source include/have_innodb.inc +--source include/innodb_page_size.inc # Embedded server does not restart of server --source include/not_embedded.inc --source include/no_valgrind_without_big.inc diff --git a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result index ad1c1141a203c..05b4793eb4d87 100644 --- a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result +++ b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result @@ -1024,7 +1024,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 122880 +The size of the tab5.ibd file: 106496 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -1331,7 +1331,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 245760 +The size of the tab5.ibd file: 212992 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -2637,7 +2637,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 122880 +The size of the tab5.ibd file: 106496 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -2946,7 +2946,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 245760 +The size of the tab5.ibd file: 212992 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -4151,7 +4151,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 122880 +The size of the tab5.ibd file: 106496 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -4439,7 +4439,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 245760 +The size of the tab5.ibd file: 212992 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -5697,7 +5697,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 122880 +The size of the tab5.ibd file: 106496 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -6004,7 +6004,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 245760 +The size of the tab5.ibd file: 212992 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -7288,7 +7288,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 122880 +The size of the tab5.ibd file: 106496 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data @@ -7595,7 +7595,7 @@ AND compress_ops BETWEEN @inl_val AND 1000 AND table_name='tab5' AND database_name='test' AND index_name like 'idx%' ; compress_stat 1 -The size of the tab5.ibd file: 245760 +The size of the tab5.ibd file: 212992 # for determintic resons simple data should be inserted. # insert some 100 records # Load the data diff --git a/mysql-test/suite/mariabackup/log_page_corruption.result b/mysql-test/suite/mariabackup/log_page_corruption.result index 4dcd21f1e2f70..f2a88ea4bdfbb 100644 --- a/mysql-test/suite/mariabackup/log_page_corruption.result +++ b/mysql-test/suite/mariabackup/log_page_corruption.result @@ -31,13 +31,13 @@ FOUND 1 /Database page corruption detected.*/ in backup.log FOUND 1 /completed OK!/ in backup.log --- "innodb_corrupted_pages" file content: --- test/t1_corrupted -6 8 9 +4 6 7 test/t2_corrupted -7 8 10 +5 6 8 test/t4_corrupted_new 1 test/t5_corrupted_to_rename_renamed -6 +4 test/t7_corrupted_to_alter 3 ------ @@ -48,19 +48,19 @@ INSERT INTO t3_inc VALUES (3), (4), (5), (6), (7), (8), (9); # Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option --- "innodb_corrupted_pages" file content: --- test/t1_corrupted -6 8 9 +4 6 7 test/t1_inc_corrupted -6 8 9 +4 6 7 test/t2_corrupted -7 8 10 +5 6 8 test/t2_inc_corrupted -7 8 10 +5 6 8 test/t4_inc_corrupted_new 1 test/t5_corrupted_to_rename_renamed -6 +4 test/t5_inc_corrupted_to_rename_renamed -6 +4 test/t7_inc_corrupted_to_alter 3 ------ @@ -85,16 +85,16 @@ DROP TABLE t7_inc_corrupted_to_alter; # Full backup with --log-innodb-page-corruption --- "innodb_corrupted_pages" file content: --- test/t3 -6 8 +4 6 ------ # Extend some tablespace and corrupt extended pages for incremental backup # restart # Incremental backup --log-innodb-page-corruption --- "innodb_corrupted_pages" file content: --- test/t3 -6 8 +4 6 test/t3_inc -6 8 +4 6 ------ # Full backup prepare # "innodb_corrupted_pages" file must not exist after successful prepare diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 22c55066cdcea..1dc511e8da9e7 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, 2021, MariaDB Corporation. +/* Copyright (c) 2016, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -591,10 +591,6 @@ bool Item_func_json_query::fix_length_and_dec() } -/* - Returns NULL, not an error if the found value - is not a scalar. -*/ bool Json_path_extractor::extract(String *str, Item *item_js, Item *item_jp, CHARSET_INFO *cs) { @@ -627,6 +623,9 @@ bool Json_path_extractor::extract(String *str, Item *item_js, Item *item_jp, if (json_read_value(&je)) return true; + if (je.value_type == JSON_VALUE_NULL) + return true; + if (unlikely(check_and_get_value(&je, str, &error))) { if (error) @@ -1109,7 +1108,6 @@ my_decimal *Item_func_json_extract::val_decimal(my_decimal *to) case JSON_VALUE_ARRAY: case JSON_VALUE_FALSE: case JSON_VALUE_UNINITALIZED: - // TODO: fix: NULL should be NULL case JSON_VALUE_NULL: int2my_decimal(E_DEC_FATAL_ERROR, 0, false/*unsigned_flag*/, to); return to; diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 54a43b920bb6c..153a0e88c631e 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -1664,6 +1664,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err, fseg_inode_t* inode; ib_id_t seg_id; uint32_t n_reserved; + bool reserved_extent = false; DBUG_ENTER("fseg_create"); @@ -1677,14 +1678,6 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err, ut_ad(!block || block->page.id().space() == space->id); - if (!has_done_reservation) { - *err = fsp_reserve_free_extents(&n_reserved, space, 2, - FSP_NORMAL, mtr); - if (UNIV_UNLIKELY(*err != DB_SUCCESS)) { - DBUG_RETURN(nullptr); - } - } - buf_block_t* header = fsp_get_header(space, mtr, err); if (!header) { block = nullptr; @@ -1693,10 +1686,32 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err, buf_block_t* iblock; +inode_alloc: inode = fsp_alloc_seg_inode(space, header, &iblock, mtr, err); - if (inode == NULL) { + if (!inode) { block = nullptr; +reserve_extent: + if (!has_done_reservation && !reserved_extent) { + *err = fsp_reserve_free_extents(&n_reserved, space, 2, + FSP_NORMAL, mtr); + if (UNIV_UNLIKELY(*err != DB_SUCCESS)) { + DBUG_RETURN(nullptr); + } + + /* Extents reserved successfully. So + try allocating the page or inode */ + reserved_extent = true; + if (inode) { + goto page_alloc; + } + + goto inode_alloc; + } + + if (inode) { + fsp_free_seg_inode(space, inode, iblock, mtr); + } goto funct_exit; } @@ -1724,6 +1739,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err, FSEG_FRAG_SLOT_SIZE * FSEG_FRAG_ARR_N_SLOTS, 0xff); if (!block) { +page_alloc: block = fseg_alloc_free_page_low(space, inode, iblock, 0, FSP_UP, #ifdef UNIV_DEBUG @@ -1731,13 +1747,9 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err, #endif /* UNIV_DEBUG */ mtr, mtr, err); - /* The allocation cannot fail if we have already reserved a - space for the page. */ - ut_ad(!has_done_reservation || block != NULL); - if (!block) { - fsp_free_seg_inode(space, inode, iblock, mtr); - goto funct_exit; + ut_ad(!has_done_reservation); + goto reserve_extent; } ut_d(const auto x = block->page.lock.x_lock_count()); @@ -1759,7 +1771,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err, + block->page.frame, space->id); funct_exit: - if (!has_done_reservation) { + if (!has_done_reservation && reserved_extent) { space->release_free_extents(n_reserved); } diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29008.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29008.result new file mode 100644 index 0000000000000..66e33f42a7ed2 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29008.result @@ -0,0 +1,38 @@ +# +# MDEV-29008 Server crash or assertion `field' failed in spider_db_open_item_ident / group by handler +# +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 +connection child2_1; +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +CREATE TABLE tbl_a ( +a INT, +b INT +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +INSERT INTO tbl_a VALUES (1,2),(3,4); +connection master_1; +CREATE DATABASE auto_test_local; +USE auto_test_local; +CREATE TABLE tbl_a ( +a INT, +b INT +) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"'; +SELECT MIN(t2.a) AS f1, t1.b AS f2 FROM tbl_a AS t1 JOIN tbl_a AS t2 GROUP BY f2 ORDER BY f1, f2; +f1 f2 +1 2 +1 4 +connection master_1; +DROP DATABASE IF EXISTS auto_test_local; +connection child2_1; +DROP DATABASE IF EXISTS auto_test_remote; +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29008.cnf b/storage/spider/mysql-test/spider/bugfix/t/mdev_29008.cnf new file mode 100644 index 0000000000000..05dfd8a0bcea9 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29008.cnf @@ -0,0 +1,3 @@ +!include include/default_mysqld.cnf +!include ../my_1_1.cnf +!include ../my_2_1.cnf diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29008.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29008.test new file mode 100644 index 0000000000000..28d9a9244e396 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29008.test @@ -0,0 +1,39 @@ +--echo # +--echo # MDEV-29008 Server crash or assertion `field' failed in spider_db_open_item_ident / group by handler +--echo # + +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log + +--connection child2_1 +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +eval CREATE TABLE tbl_a ( + a INT, + b INT +) $CHILD2_1_ENGINE $CHILD2_1_CHARSET; +INSERT INTO tbl_a VALUES (1,2),(3,4); + +--connection master_1 +CREATE DATABASE auto_test_local; +USE auto_test_local; +eval CREATE TABLE tbl_a ( + a INT, + b INT +) $MASTER_1_ENGINE $MASTER_1_CHARSET COMMENT='table "tbl_a", srv "s_2_1"'; + +SELECT MIN(t2.a) AS f1, t1.b AS f2 FROM tbl_a AS t1 JOIN tbl_a AS t2 GROUP BY f2 ORDER BY f1, f2; + +--connection master_1 +DROP DATABASE IF EXISTS auto_test_local; +--connection child2_1 +DROP DATABASE IF EXISTS auto_test_remote; + +--disable_query_log +--disable_result_log +--source ../t/test_deinit.inc +--enable_query_log +--enable_result_log diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 3692bb6f6d6c2..6f3adfba1932e 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1,5 +1,5 @@ /* Copyright (C) 2008-2019 Kentoku Shiba - Copyright (C) 2019, 2020, MariaDB Corporation. + Copyright (C) 2019, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -9875,8 +9875,6 @@ int spider_db_open_item_ident( } else { if (!use_fields) { - if (!(field = spider->field_exchange(field))) - DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); if (str) { if ((error_num = share->dbton_share[dbton_id]-> @@ -9885,6 +9883,8 @@ int spider_db_open_item_ident( DBUG_RETURN(error_num); } } else { + if (!(field = spider->field_exchange(field))) + DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); if (str) { SPIDER_FIELD_CHAIN *field_chain = fields->get_next_field_chain();